Source

kinect-viewer / main.vs

Cliff Biffle 46f9829 




Cliff Biffle f167c68 
Cliff Biffle 9c3d015 
Cliff Biffle e529285 
Cliff Biffle afbe1ef 
Cliff Biffle 46f9829 

Cliff Biffle 82cb6cd 
























Cliff Biffle b10ad2c 
Cliff Biffle 82cb6cd 


Cliff Biffle b10ad2c 


Cliff Biffle 82cb6cd 
Cliff Biffle 46f9829 


Cliff Biffle 82cb6cd 
Cliff Biffle 46f9829 


Cliff Biffle b10ad2c 













Cliff Biffle 46f9829 



Cliff Biffle b10ad2c 
Cliff Biffle 46f9829 
/*
 * Copyright 2010 Cliff L. Biffle.  All Rights Reserved.
 * Use of this source code is governed by the Apache License 2.0,
 * which can be found in the LICENSE file.
 */
 
uniform sampler2D depthTex;
uniform sampler2D colorTex;
uniform bool useTextureFromCamera;

/*
 * Kinect Projection Correction
 *
 * Since the Kinect is, fundamentally, a camera, its data has already
 * undergone perspective projection.  Closer objects seem relatively
 * larger, and distant objects seem small.  If we left the POV fixed,
 * we could simply display the Kinect's raw data using an orthographic
 * projection -- but what's the fun in that?
 *
 * The Kinect's depth output looks a lot like a 16-bit grayscale image.
 * Usefully, the depth samples are a function of 1/Z, and we need 1/Z
 * during the perspective un-projection.
 *
 * The CPU provides the Kinect's depth data in a 16-bit luminance
 * texture, 640x480 pixels in size.  To help us perform the texture
 * lookup here, it also provides a canned array of 640x480 vertices,
 * each of which has...
 *  0 ≤ X, Y ≤ 1.0 - each vertex in the XY plane maps to a single pixel,
 *     in texel coordinates.
 *  Z is garbage.
 *
 * Finally, the CPU uses the texture unit's matrix to pass the
 * un-projection matrix.
 */
vec4 kinect_unproject(sampler2D texture, vec4 point) {
  vec4 sample = texture2D(texture, point.xy);
  vec4 kinectPoint = vec4(point.xy, sample.g, 1.);
  return gl_TextureMatrix[0] * kinectPoint;
}

const float pixelDistanceX = 1. / 640.;
const float pixelDistanceY = 1. / 480.;

/*
 * Vertex Shader entry point.
 */
void main() {
  vec4 vertex = kinect_unproject(depthTex, gl_Vertex);
  
  // Project the point using GL's perspective settings.
  gl_Position = gl_ModelViewProjectionMatrix * vertex;
  
  // Calculate the vertex normal in Kinect-camera space.
  vec4 rightVertex = kinect_unproject(depthTex, gl_Vertex + vec4(pixelDistanceX, 0, 0, 0));
  vec4 downVertex = kinect_unproject(depthTex, gl_Vertex + vec4(0, pixelDistanceY, 0, 0));
  
  // Convert out of homogeneous space for the cross product.
  vec3 right_world = rightVertex.xyz / rightVertex.w;
  vec3 down_world = downVertex.xyz / downVertex.w;
  vec3 vertex_world = vertex.xyz / vertex.w;
  
  vec3 tangent = down_world - vertex_world;
  vec3 bitangent = right_world - vertex_world;
  
  vec3 normal = cross(tangent, bitangent);

  if (vertex.z <= 0.) {
    gl_FrontColor = vec4(0,0,0,0);
  } else {
    gl_FrontColor = light_vertex(normal);
  }
}