Commits

schlangen committed a63dcde Draft

added hidecam project. Use kinect as webcam and modify the stream with custom video filters.

  • Participants
  • Parent commits dbc72f9

Comments (0)

Files changed (6)

+CC = g++
+LINK = g++
+INSTALL = install
+CFLAGS = `pkg-config --cflags opencv` -I../include -I.  -g -I/usr/include/ni -I/usr/X11/include 
+LFLAGS = `pkg-config --libs opencv` -lOpenNI -lXnVNite_1_5_2  -lstdc++
+
+all         = oni2webcam guitest
+
+
+
+
+oni2webcam : push2vloop.o oni2webcam.o 
+	${CC} ${CFLAGS} push2vloop.o oni2webcam.o  ${LFLAGS} -o oni2webcam
+
+oni2webcam.o : oni2webcam.cpp 
+	${CC} ${CFLAGS} -c oni2webcam.cpp
+
+push2vloop.o : push2vloop.cpp push2vloop.h
+	${CC} ${CFLAGS} -c push2vloop.cpp
+
+guitest : guitest.o 
+	${CC} ${CFLAGS} guitest.o ${LFLAGS} -o guitest
+
+guitest.o : guitest.cpp
+	${CC} ${CFLAGS} -c guitest.cpp

hidecam/backgrounds/bg1.png

Added
New image

hidecam/oni2webcam.cpp

+#include <cstdio>
+#include <csignal>
+#include <iostream>
+#include <fstream>
+#include <pthread.h>
+
+#include "push2vloop.h"
+
+#define CV_NO_BACKWARD_COMPATIBILITY
+
+#include <XnCppWrapper.h>
+
+#include "cv.h"
+#include "highgui.h"
+
+
+/***************************************************************/
+/************************* GUI *********************************/
+/***************************************************************/
+
+
+using std::vector;
+
+// 5 effects for 2 regions (head and background)
+int num_effects = 5;
+int num_regions = 2;
+
+vector<vector<int> > args;
+
+int curPart = 0;
+int curEffect = 0;
+int max_param = 100;
+
+char* tb1 = "image region";
+char* tb2 = "effect name";
+char* tb3 = "effect param";
+char* window_name = "hideCam";
+
+void trackbar3(int state){
+    args[curPart][curEffect] = state;
+}
+
+void trackbar2(int state){
+    curEffect = state;
+    int value = args[curPart][curEffect];
+    cvCreateTrackbar( tb3, window_name, &value, max_param,  trackbar3);
+}
+
+void trackbar1(int state){
+    curPart = state;
+    int value = args[curPart][curEffect];
+    cvCreateTrackbar( tb3, window_name, &value, max_param,  trackbar3);
+}
+
+
+void init_gui(){
+    args.resize(num_regions);
+    for (int i = 0; i < num_regions; ++i){
+        args[i].resize(num_effects);
+        }
+    
+    
+    int value = 0;
+    for(int i=0;i<num_regions;i++)
+        for(int k=0;k<num_effects;k++)
+            args[i][k]=0;
+
+    cvNamedWindow(window_name,CV_WINDOW_NORMAL|CV_WINDOW_AUTOSIZE);
+
+    cvCreateTrackbar( tb1, window_name, &value, num_regions-1, trackbar1);
+    cvCreateTrackbar( tb2, window_name, &value, num_effects-1,  trackbar2);
+    cvCreateTrackbar( tb3, window_name, &value, max_param,  trackbar3);
+}
+
+
+/***************************************************************/
+/******************** END  GUI *********************************/
+/***************************************************************/
+
+
+
+
+IplImage* bg1=cvLoadImage("backgrounds/bg1.png");
+
+
+
+
+
+
+float jointCoords[3];
+float jointOrients[9];
+
+// caches all joint coords
+float allJointCoords[24][3];
+
+float posConfidence;
+float orientConfidence;
+
+//Multipliers for coordinate system. This is useful if you use
+//software like animata, that needs OSC messages to use an arbitrary
+//coordinate system.
+double mult_x = 1;
+double mult_y = 1;
+double mult_z = 1;
+
+//Offsets for coordinate system. This is useful if you use software
+//like animata, that needs OSC messages to use an arbitrary coordinate
+//system.
+double off_x = 0.0;
+double off_y = 0.0;
+double off_z = 0.0;
+
+
+
+
+bool kitchenMode = false;
+
+bool mirrorMode = true;
+bool play = false;
+bool record = false;
+bool sendRot = false;
+bool filter = false;
+bool preview = false;
+bool raw = false;
+bool filterLowConfidence = false;
+bool realworld = false;
+bool debugFacts = false;
+bool debugCSV = false;
+bool sendOrient = false;
+int nDimensions = 3;
+
+
+
+xn::Context context;
+xn::DepthGenerator depth;
+xn::DepthMetaData depthMD;
+xn::ImageGenerator image;
+xn::ImageMetaData imageMD;
+xn::UserGenerator userGenerator;
+xn::GestureGenerator gestureGenerator;
+xn::SceneAnalyzer scene;
+xn::SceneMetaData sceneMD;
+
+XnChar g_strPose[20] = "";
+
+
+
+void visualize_current_data();
+
+
+//gesture callbacks
+void XN_CALLBACK_TYPE Gesture_Recognized(xn::GestureGenerator& generator, const XnChar* strGesture, const XnPoint3D* pIDPosition, const XnPoint3D* pEndPosition, void* pCookie) {
+    printf("Gesture recognized: %s\n", strGesture);
+    gestureGenerator.RemoveGesture(strGesture);
+}
+
+void XN_CALLBACK_TYPE Gesture_Process(xn::GestureGenerator& generator, const XnChar* strGesture, const XnPoint3D* pPosition, XnFloat fProgress, void* pCookie) {
+  std::cout <<  "gesture process" <<std::endl;
+}
+
+
+
+// Callback: New user was detected
+void XN_CALLBACK_TYPE new_user(xn::UserGenerator& generator, XnUserID nId, void* pCookie) {
+	printf("New User %d\n", nId);
+	std::cout <<  std::endl;
+	userGenerator.GetSkeletonCap().RequestCalibration(nId, TRUE);
+
+}
+
+
+
+// Callback: An existing user was lost
+void XN_CALLBACK_TYPE lost_user(xn::UserGenerator& generator, XnUserID nId, void* pCookie) {
+	printf("Lost user %d\n", nId);
+	std::cout <<  std::endl;
+
+}
+
+
+
+// Callback: Detected a pose
+void XN_CALLBACK_TYPE pose_detected(xn::PoseDetectionCapability& capability, const XnChar* strPose, XnUserID nId, void* pCookie) {
+	printf("Pose %s detected for user %d\n", strPose, nId);
+	std::cout <<  std::endl;
+	userGenerator.GetPoseDetectionCap().StopPoseDetection(nId);
+	userGenerator.GetSkeletonCap().RequestCalibration(nId, TRUE);
+}
+
+
+
+// Callback: Started calibration
+void XN_CALLBACK_TYPE calibration_started(xn::SkeletonCapability& capability, XnUserID nId, void* pCookie) {
+	printf("Calibration started for user %d\n", nId);
+	std::cout <<  std::endl;
+}
+
+
+
+// Callback: Finished calibration
+void XN_CALLBACK_TYPE calibration_ended(xn::SkeletonCapability& capability, XnUserID nId, XnBool bSuccess, void* pCookie) {
+	if (bSuccess) {
+		printf("Calibration complete, start tracking user %d\n", nId);
+		std::cout <<  std::endl;
+		userGenerator.GetSkeletonCap().StartTracking(nId);
+
+	}
+	else {
+		printf("Calibration failed for user %d\n", nId);
+		userGenerator.GetSkeletonCap().RequestCalibration(nId, TRUE);
+	}
+}
+
+int jointPos(XnUserID player, XnSkeletonJoint eJoint) {
+    //  printf("jointpos for  user %d\n", player);
+
+	XnSkeletonJointTransformation jointTrans;
+
+	userGenerator.GetSkeletonCap().GetSkeletonJoint(player, eJoint, jointTrans);
+
+	posConfidence = jointTrans.position.fConfidence;
+
+	//userID = player;
+
+	if (filterLowConfidence && posConfidence < 0.5) {
+		return 0;
+	}
+
+	if (!raw)
+	{
+	  jointCoords[0] = off_x + (mult_x * (1280 - jointTrans.position.position.X) / 2560); //Normalize coords to 0..1 interval
+	  jointCoords[1] = off_y + (mult_y * (960 - jointTrans.position.position.Y) / 1920); //Normalize coords to 0..1 interval
+	  jointCoords[2] = off_z + (mult_z * jointTrans.position.position.Z * 7.8125 / 10000); //Normalize coords to 0..7.8125 interval
+	}
+	else if (realworld)
+	{
+		XnPoint3D realwordPoint;
+		realwordPoint.X = 0; realwordPoint.Y = 0; realwordPoint.Z = 0;
+		depth.ConvertProjectiveToRealWorld(1, &jointTrans.position.position, &realwordPoint); 
+	  
+	    jointCoords[0] = realwordPoint.X;
+	    jointCoords[1] = realwordPoint.Y;
+	    jointCoords[2] = realwordPoint.Z;
+	}
+	else
+	{
+	  
+	  jointCoords[0] = jointTrans.position.position.X;
+	  jointCoords[1] = jointTrans.position.position.Y;
+	  jointCoords[2] = jointTrans.position.position.Z;
+	}
+
+	if (sendOrient)
+	{
+	  orientConfidence = jointTrans.orientation.fConfidence;
+
+	  for (int i=0; i<9; i++)
+	  {
+	    jointOrients[i] = jointTrans.orientation.orientation.elements[i];
+	  }
+	}
+	
+	allJointCoords[eJoint-1][0] = jointCoords[0];
+	allJointCoords[eJoint-1][1] = jointCoords[1];
+	allJointCoords[eJoint-1][2] = jointCoords[2];
+	
+	//visualize_current_data();
+
+	return 0;
+}
+
+
+void get_body_positions(){
+
+	XnUserID aUsers[15];
+	XnUInt16 nUsers = 15;
+	userGenerator.GetUsers(aUsers, nUsers);
+	for (int i = 0; i < nUsers; ++i) {
+		if (userGenerator.GetSkeletonCap().IsTracking(aUsers[i])) {
+		for(int j=1;j<25;j++){
+		  jointPos(aUsers[i], (XnSkeletonJoint) j);
+		}
+		
+
+		}
+		else {
+			//Send user's center of mass
+			//sendUserPosMsg(aUsers[i]);
+		}
+	}
+}
+
+
+void checkRetVal(XnStatus nRetVal) {
+	if (nRetVal != XN_STATUS_OK) {
+		printf("There was a problem initializing kinect... Make sure you have \
+connected both usb and power cables and that the driver and OpenNI framework \
+are correctly installed.\n\n");
+		exit(1);
+	}
+}
+
+
+
+
+IplImage *img = cvCreateImage(cvSize(640, 480), IPL_DEPTH_8U, 3);
+
+void terminate(int ignored) {
+	context.Shutdown();
+	cvReleaseImage(&img);
+	cvDestroyWindow(window_name);
+	std::cout << "bye" << std::endl;
+	exit(0);
+}
+
+
+
+
+void main_loop() {
+	// Read next available data
+	context.WaitAnyUpdateAll();
+	// Process the data
+	depth.GetMetaData(depthMD);
+	image.GetMetaData(imageMD);
+	scene.GetMetaData(sceneMD);
+    
+	get_body_positions();
+	//visualize_data();
+	//cvShowImage("image", img);
+	
+}
+
+
+
+
+void visualize_current_data(){
+
+    /* draw a red circle */
+    cvCircle(img,                       /* the dest image */
+             cvPoint(jointCoords[0]*500, jointCoords[1]*500), 20,      /* center point and radius */
+             cvScalar(0, 0, 255, 0),    /* the color; red */
+             3, 8, 0);       
+    std::cout << "visualize: " << (jointCoords[0]*500 )<<  (jointCoords[1]*500) << std::endl;
+ 
+}
+
+
+
+
+
+//XnRGB24Pixel* pRgbPixel;
+
+//http://stackoverflow.com/questions/8364558/openni-rgb-image-to-opencv-bgr-iplimage-conversion
+void draw_image(){
+  
+    //pRGbPixel = image.GetRGB24ImageMap();
+    
+   XnUInt8 *pImage = imageMD.WritableData(); 
+   
+   const XnLabel *pLabels = sceneMD.Data();
+   const XnDepthPixel *pDepths = depthMD.Data();
+   
+  /*  XnUInt8 * pImage = new XnUInt8 [640*480*3]; 
+    memcpy(pImage,imageMD.Data(),640*480*3*sizeof(XnUInt8));
+    XnUInt8 temp;
+    for(size_t row=0; row<480; row++){
+        for(size_t col=0;col<3*640; col+=3){
+            size_t index = row*3*640+col;
+            temp = pImage[index];
+            pImage[index] = pImage[index+2];
+            pImage[index+2] = temp;
+        }
+    }*/
+    img->imageData = (char*) pImage;
+    
+    cvCvtColor( img, img, CV_BGR2RGB);
+    
+
+    
+    int scene_index = 0;
+    for(size_t row=0; row<480; row++){
+        for(size_t col=0;col<3*640; col+=3){
+            size_t rgb_index = row*3*640+col;
+
+            if (pLabels[scene_index++] == 0){
+                switch (args[0][0]){
+                    case 0:
+                        img->imageData[rgb_index] = 0;//pDepths[scene_index] % 255;
+                        img->imageData[rgb_index+1] = 0;//pDepths[scene_index] % 255;
+                        img->imageData[rgb_index+2] = 0;//pDepths[scene_index] % 255;
+                        break;
+                    case 1:
+                        img->imageData[rgb_index] = bg1->imageData[rgb_index];//pDepths[scene_index] % 255;
+                        img->imageData[rgb_index+1] = bg1->imageData[rgb_index+1];//pDepths[scene_index] % 255;
+                        img->imageData[rgb_index+2] = bg1->imageData[rgb_index+2];//pDepths[scene_index] % 255;
+                        break;
+                }
+            
+                if (args[0][1] > 1){
+                        int r=0, g=0,b=0,count=0;
+                            int bs = args[0][1];
+                            for(int x = 0; x < bs; x++){
+                                for (int y = 0;y<bs;y++){
+                                    size_t extra_rgb_index = (row - row%bs + y)*3*640+(col-(col%bs + x)*3);
+                                    r += img->imageData[extra_rgb_index];
+                                    g += img->imageData[extra_rgb_index+1];
+                                    b += img->imageData[extra_rgb_index+2];
+                                    count++;
+                                }
+                            }
+                        if (count > 0){
+                            img->imageData[rgb_index] = (int) (r / count);
+                            img->imageData[rgb_index+1] = (int) (g / count);
+                            img->imageData[rgb_index+2] = (int) (b / count);
+                        }
+                    }
+                if (args[0][2] > 1){
+                            int r=0, g=0,b=0,count=0;
+                            int bs = args[0][2];
+                            for(int x = 0; x < bs; x++){
+                                for (int y = 0;y<bs;y++){
+                                    size_t extra_rgb_index = (row - int(bs/2) + y)*3*640+(col-(int(bs/2) + x)*3);
+                                    if (extra_rgb_index >= 0 && extra_rgb_index < 480*640*3){
+                                        r += img->imageData[extra_rgb_index];
+                                        g += img->imageData[extra_rgb_index+1];
+                                        b += img->imageData[extra_rgb_index+2];
+                                        count++;
+                                    }
+                                }
+                            }
+                        if (count > 0){
+                            img->imageData[rgb_index] = (int) (r / count);
+                            img->imageData[rgb_index+1] = (int) (g / count);
+                            img->imageData[rgb_index+2] = (int) (b / count);
+                        }
+                    }
+          }
+      }
+    }
+    
+    // TODO: hier weitermachen!
+    
+    XnUInt16 nUsers = userGenerator.GetNumberOfUsers();
+    XnUserID aUsers[nUsers];
+	userGenerator.GetUsers(aUsers, nUsers);
+    
+    if ( nUsers > 0){
+        //printf("b0\n");
+        int b1 = userGenerator.GetSkeletonCap().IsCalibrated(aUsers[0]);
+        //printf("b1 %d\n", b1);
+        int b2 = userGenerator.GetSkeletonCap().IsTracking(aUsers[0]) ;
+        //printf("b2 %d\n", b2);
+        int b3 = userGenerator.GetSkeletonCap().IsJointAvailable(XN_SKEL_HEAD);
+        //printf("b3 %d\n", b3);
+        //printf("got user");   
+        
+        
+        
+        /*    XnSkeletonJointTransformation jointTrans;
+            
+            userGenerator.GetSkeletonCap().GetSkeletonJointPosition( jointTrans);
+            
+            posConfidence = jointTrans.position.fConfidence;
+            printf("confidence: %f -> ", posConfidence);
+            int isconf = posConfidence==1.0f;
+            printf("%d \n", isconf);
+            if (isconf && b1 && b2 && b3){
+                printf("u\n");
+                XnPoint3D headCenter[2];
+                headCenter[0] = jointTrans.position.position;
+                printf("a %f %f %f \n", headCenter[0].X, headCenter[0].Y, headCenter[0].Z);    
+            
+                printf("x\n");    
+                depth.ConvertRealWorldToProjective(1, headCenter, headCenter);
+                printf("b\n");    
+                
+                 cvCircle(img,                       // the dest image 
+                         cvPoint(headProjective[0].X, headProjective[0].Y), 30,      // center point and radius 
+                         cvScalar(0, 0, 255, 0),    // the color; red 
+                         3, 8, 0);       
+            printf("d\n");    
+        }*/
+        
+        XnSkeletonJointPosition headJoint;
+        userGenerator.GetSkeletonCap().GetSkeletonJointPosition(aUsers[0], XN_SKEL_HEAD,headJoint);
+
+        XnPoint3D pt[1];
+        pt[0] = headJoint.position;
+
+        depth.ConvertRealWorldToProjective(1, pt, pt);    
+        
+        if (args[1][0] != 0){
+            cvCircle(img,                       // the dest image 
+                             cvPoint(pt[0].X, pt[0].Y), args[1][0],      // center point and radius 
+                             cvScalar(0, 0, 255, 0),    // the color; red 
+                             3, 8, 0);   
+              }
+    }
+    /*
+    XN_SKEL_HEAD 	
+XN_SKEL_NECK 	
+XN_SKEL_TORSO 
+XN_LEFT_SHOULDER*/
+    
+    
+    
+}
+
+
+
+
+int main(int argc, char **argv) {
+    
+    if(argc<2)
+    {
+        printf("Usage: kinect_loopback <video_device>\n");
+        return 0;
+    }
+    printf("RGB Fake video device %s\n",argv[1]);
+    
+	printf("Initializing...\n");
+	unsigned int arg = 1,
+				 require_argument = 0,
+				 port_argument = 0;
+	XnMapOutputMode mapMode;
+	XnStatus nRetVal = XN_STATUS_OK;
+	XnCallbackHandle hUserCallbacks, hCalibrationCallbacks, hPoseCallbacks, hGestureCallbacks;
+	xn::Recorder recorder;
+
+	context.Init();
+
+	
+
+	depth.Create(context);
+	image.Create(context);
+    scene.Create(context);
+	
+	init_vloop();
+
+    mapMode.nXRes = XN_VGA_X_RES;
+    mapMode.nYRes = XN_VGA_Y_RES;
+    mapMode.nFPS = 30;
+    depth.SetMapOutputMode(mapMode);
+    image.SetMapOutputMode(mapMode);
+    scene.SetMapOutputMode(mapMode);
+
+    depth.GetAlternativeViewPointCap().SetViewPoint(image); 
+
+		nRetVal = context.FindExistingNode(XN_NODE_TYPE_USER, userGenerator);
+		if (nRetVal != XN_STATUS_OK)
+			nRetVal = userGenerator.Create(context);
+
+		checkRetVal(userGenerator.RegisterUserCallbacks(new_user, lost_user, NULL, hUserCallbacks));
+		checkRetVal(userGenerator.GetSkeletonCap().RegisterCalibrationCallbacks(calibration_started, calibration_ended, NULL, hCalibrationCallbacks));
+		checkRetVal(userGenerator.GetPoseDetectionCap().RegisterToPoseCallbacks(pose_detected, NULL, NULL, hPoseCallbacks));
+		checkRetVal(userGenerator.GetSkeletonCap().GetCalibrationPose(g_strPose));
+		checkRetVal(userGenerator.GetSkeletonCap().SetSkeletonProfile(XN_SKEL_PROFILE_ALL));
+		if (filter)
+			userGenerator.GetSkeletonCap().SetSmoothing(0.8);
+	
+
+	xnSetMirror(depth, mirrorMode);
+	xnSetMirror(image, mirrorMode);
+    xnSetMirror(scene, mirrorMode);
+
+	
+	signal(SIGTERM, terminate);
+	signal(SIGINT, terminate);
+	
+	
+	struct sigaction sa;
+	memset(&sa, 0, sizeof(sa));
+	sa.sa_handler = &terminate;
+	sigaction(SIGINT, &sa,NULL);
+
+	
+
+	
+	printf("Initialized Kinect, looking for users...\n\n");
+	context.StartGeneratingAll();
+
+	init_gui();
+
+	
+	while(true){
+		cvSet(img, CV_RGB(0, 0, 0));
+		
+		
+		main_loop();
+		
+		draw_image();
+		
+
+		cvShowImage(window_name, img);
+        cvCvtColor(img,img,CV_RGB2BGR);
+        push2vloop(img);
+		int k = cvWaitKey(1);
+		
+		if (k != -1){// 27){
+		  std::cout<<"got key: " << k <<std::endl;
+		  terminate(0);
+		}
+
+	}
+	
+
+	terminate(0);
+}

hidecam/push2vloop.cpp

+
+
+#include <sys/ioctl.h>
+#include <linux/videodev2.h>
+#include <fcntl.h>
+
+#include "push2vloop.h"
+
+
+# define VIDEO_DEVICE "/dev/video2"
+# define FRAME_FORMAT V4L2_PIX_FMT_RGB24
+# define FRAME_WIDTH  640
+# define FRAME_HEIGHT 480
+
+struct v4l2_capability vid_caps;
+struct v4l2_format vid_format;
+const char*video_device=VIDEO_DEVICE;
+int fdwr;
+size_t framesize = FRAME_WIDTH * FRAME_HEIGHT * 3;
+size_t linewidth = FRAME_WIDTH * 3;
+
+
+
+void init_vloop(){
+    printf("init video...\n");
+	__u8*buffer;
+	__u8*check_buffer;
+
+
+	buffer=(__u8*)malloc(sizeof(__u8)*framesize);
+	check_buffer=(__u8*)malloc(sizeof(__u8)*framesize);
+
+	int i;
+	memset(buffer, 0, framesize);
+	memset(check_buffer, 0, framesize);
+
+	fdwr = open(video_device, O_RDWR);
+	printf("fdwr: %i\n",fdwr);
+	assert(fdwr >= 0);
+
+	int ret_code = ioctl(fdwr, VIDIOC_QUERYCAP, &vid_caps);
+	//assert(ret_code != -1);
+
+	memset(&vid_format, 0, sizeof(vid_format));
+
+	vid_format.type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+	vid_format.fmt.pix.width = FRAME_WIDTH;
+	vid_format.fmt.pix.height = FRAME_HEIGHT;
+	vid_format.fmt.pix.pixelformat = FRAME_FORMAT;
+	vid_format.fmt.pix.sizeimage = framesize;
+	vid_format.fmt.pix.field = V4L2_FIELD_NONE;
+	vid_format.fmt.pix.bytesperline = linewidth;
+	vid_format.fmt.pix.colorspace = V4L2_COLORSPACE_SRGB;
+	ret_code = ioctl(fdwr, VIDIOC_S_FMT, &vid_format);
+    printf("video init done!\n");
+}
+
+
+void push2vloop(IplImage *img){
+        //lock mutex for depth image
+             //   pthread_mutex_lock( &mutex_depth );
+
+                write(fdwr, img->imageData, framesize);
+
+                //unlock mutex for depth image
+            //    pthread_mutex_unlock( &mutex_depth );
+}

hidecam/push2vloop.h

+#ifndef PUSH2VLOOP
+#define PUSH2VLOOP
+
+#include "cv.h"
+void init_vloop();
+
+void push2vloop(IplImage*);
+#endif

loopback/Makefile

 CC = g++
 LINK = g++
 INSTALL = install
-CFLAGS = `pkg-config --cflags opencv libfreenect` -I../include -I.
+CFLAGS = `pkg-config --cflags opencv libfreenect` -I../include -I. 
 LFLAGS = `pkg-config --libs opencv libfreenect`
 
-all         = kinect2webcam
+all         = kinect2webcam oni2webcam
 
 all: $(all)
 
 
 kinect2webcam.o: kinect2webcam.cpp
 	$(CC) $(CFLAGS) -o $@ -c $^
+	
+oni2webcam: oni2webcam.o 
+	$(LINK) -o $@ $^ $(LFLAGS)  -lOpenNI -lXnVNite_1_5_2 -lstdc++
+
+oni2webcam.o: oni2webcam.cpp
+	$(CC) $(CFLAGS) -o $@ -c  -I/usr/include/ni  $^