Commits

schlangen committed 4f12cf0 Draft

Added Hidecam2, now with qt gui.

Comments (0)

Files changed (12)

hidecam/oni2webcam.cpp

 
 
 IplImage* bg1=cvLoadImage("backgrounds/bg1.png");
+IplImage* mask1=cvLoadImage("masks/mask1.png");
+IplImage* mask2=cvLoadImage("masks/mask2.png");
 
 
 
             printf("d\n");    
         }*/
         
-        XnSkeletonJointPosition headJoint;
+        XnSkeletonJointPosition headJoint, neckJoint;
         userGenerator.GetSkeletonCap().GetSkeletonJointPosition(aUsers[0], XN_SKEL_HEAD,headJoint);
+        userGenerator.GetSkeletonCap().GetSkeletonJointPosition(aUsers[0], XN_SKEL_NECK,neckJoint);
 
-        XnPoint3D pt[1];
+        XnPoint3D pt[2];
         pt[0] = headJoint.position;
+        pt[1] = neckJoint.position;
 
-        depth.ConvertRealWorldToProjective(1, pt, pt);    
+        depth.ConvertRealWorldToProjective(2, pt, pt);    
         
         if (args[1][0] != 0){
-            cvCircle(img,                       // the dest image 
-                             cvPoint(pt[0].X, pt[0].Y), args[1][0],      // center point and radius 
-                             cvScalar(0, 0, 255, 0),    // the color; red 
-                             3, 8, 0);   
+            int dest_x = (int)((pt[0].X * (100-args[1][2])+pt[1].X*args[1][2])/100.0);
+            int dest_y = (int)((pt[0].Y * (100-args[1][2])+pt[1].Y*args[1][2])/100.0);
+            switch (args[1][1]){
+                case 0:
+                    cvCircle(img,                       // the dest image 
+                                     cvPoint(dest_x, dest_y), args[1][0],      // center point and radius 
+                                     cvScalar(0, 0, 255, 0),    // the color; red 
+                                     3, 8, 0);   
+                    break;
+                case 1:
+                case 2:
+                    int mask_size = args[1][0] * 2;
+                    mask_size += mask_size % 4; // make even
+                    IplImage *src = args[1][1] == 1 ? mask1 : mask2;
+                    IplImage *destination = cvCreateImage
+                        ( cvSize(mask_size,mask_size ),
+                                     src->depth, src->nChannels );
+                    cvResize(src, destination);
+                    
+                
+                    int x0 = dest_x - args[1][0];
+                    int y0 = dest_y - args[1][0];
+                    int r,g,b;
+                    for(int x =0;x< mask_size;x++){
+                        for(int y=0;y<mask_size;y++){
+                            size_t rgb_index = 3*((y0+y)*640+(x0+x));
+                            size_t src_index = 3*(y*mask_size+x);
+                            r = destination->imageData[src_index];
+                            g = destination->imageData[src_index+1];
+                            b = destination->imageData[src_index+2];
+
+                            if (! (r==-1&&g==0&&b==-1)){
+                                img->imageData[rgb_index] = r;
+                                img->imageData[rgb_index+1] = g;
+                                img->imageData[rgb_index+2] = b;
+                            }
+                        }
+                    }
+                   break;
               }
+            }
     }
     /*
     XN_SKEL_HEAD 	
 
 	
 	while(true){
+        context.WaitAnyUpdateAll();
 		cvSet(img, CV_RGB(0, 0, 0));
 		
 		

hidecam/push2vloop.cpp

 
 	fdwr = open(video_device, O_RDWR);
 	printf("fdwr: %i\n",fdwr);
-	assert(fdwr >= 0);
+	//assert(fdwr >= 0);
 
 	int ret_code = ioctl(fdwr, VIDIOC_QUERYCAP, &vid_caps);
 	//assert(ret_code != -1);

hidecam2/kinectbinding.cpp

+#include "kinectbinding.h"
+
+KinectBinding::KinectBinding()
+{
+     img = cvCreateImage(cvSize(640, 480), IPL_DEPTH_8U, 3);
+
+     // remove or make nice later:
+
+     bg1=cvLoadImage("backgrounds/bg1.png");
+     mask1=cvLoadImage("masks/mask1.png");
+     mask2=cvLoadImage("masks/mask2.png");
+
+      kitchenMode = false;
+
+      mirrorMode = true;
+      play = false;
+      record = false;
+      sendRot = false;
+      filter = false;
+      preview = false;
+      raw = false;
+      filterLowConfidence = false;
+      realworld = false;
+      debugFacts = false;
+      debugCSV = false;
+      sendOrient = false;
+
+     // end ugly init stuff
+
+            printf("Initializing...\n");
+            unsigned int arg = 1,
+                                     require_argument = 0,
+                                     port_argument = 0;
+            XnMapOutputMode mapMode;
+            XnStatus nRetVal = XN_STATUS_OK;
+            XnCallbackHandle hUserCallbacks, hCalibrationCallbacks, hPoseCallbacks, hGestureCallbacks;
+            xn::Recorder recorder;
+
+            context.Init();
+
+
+
+            depth.Create(context);
+            image.Create(context);
+            scene.Create(context);
+
+            init_vloop();
+
+        mapMode.nXRes = XN_VGA_X_RES;
+        mapMode.nYRes = XN_VGA_Y_RES;
+        mapMode.nFPS = 30;
+        depth.SetMapOutputMode(mapMode);
+        image.SetMapOutputMode(mapMode);
+        scene.SetMapOutputMode(mapMode);
+
+        depth.GetAlternativeViewPointCap().SetViewPoint(image);
+
+                    nRetVal = context.FindExistingNode(XN_NODE_TYPE_USER, userGenerator);
+                    if (nRetVal != XN_STATUS_OK)
+                            nRetVal = userGenerator.Create(context);
+
+                    checkRetVal(userGenerator.RegisterUserCallbacks(New_User, Lost_User, NULL, hUserCallbacks));
+                    checkRetVal(userGenerator.GetSkeletonCap().RegisterCalibrationCallbacks(Calibration_Started, Calibration_Ended, NULL, hCalibrationCallbacks));
+                    checkRetVal(userGenerator.GetPoseDetectionCap().RegisterToPoseCallbacks(Pose_Detected, NULL, NULL, hPoseCallbacks));
+                    //checkRetVal(userGenerator.GetSkeletonCap().GetCalibrationPose(g_strPose));
+                    checkRetVal(userGenerator.GetSkeletonCap().SetSkeletonProfile(XN_SKEL_PROFILE_ALL));
+                    if (filter)
+                            userGenerator.GetSkeletonCap().SetSmoothing(0.8);
+
+
+            xnSetMirror(depth, mirrorMode);
+            xnSetMirror(image, mirrorMode);
+        xnSetMirror(scene, mirrorMode);
+
+
+         /*   signal(SIGTERM, terminate);
+            signal(SIGINT, terminate);
+
+
+            struct sigaction sa;
+            memset(&sa, 0, sizeof(sa));
+            sa.sa_handler = &terminate;
+            sigaction(SIGINT, &sa,NULL);
+*/
+
+
+
+            printf("Initialized Kinect, looking for users...\n\n");
+            context.StartGeneratingAll();
+
+}
+
+KinectBinding::~KinectBinding(){
+    context.Shutdown();
+    cvReleaseImage(&img);
+}
+
+ void KinectBinding::loop_step(){
+
+
+
+    // Read next available data
+    context.WaitAnyUpdateAll();
+    // Process the data
+    depth.GetMetaData(depthMD);
+    image.GetMetaData(imageMD);
+    scene.GetMetaData(sceneMD);
+
+    get_body_positions();
+
+    draw_image();
+
+   // cvCvtColor(img,img,CV_RGB2BGR);
+    push2vloop(img);
+}
+
+
+
+void XN_CALLBACK_TYPE KinectBinding::Gesture_Recognized(xn::GestureGenerator& generator, const XnChar* strGesture, const XnPoint3D* pIDPosition, const XnPoint3D* pEndPosition, void* pCookie) {
+    printf("Gesture recognized: %s\n", strGesture);
+    generator.RemoveGesture(strGesture);
+}
+
+void XN_CALLBACK_TYPE KinectBinding::Gesture_Process(xn::GestureGenerator& generator, const XnChar* strGesture, const XnPoint3D* pPosition, XnFloat fProgress, void* pCookie) {
+  std::cout <<  "gesture process" <<std::endl;
+}
+
+
+
+// Callback: New user was detected
+void XN_CALLBACK_TYPE KinectBinding::New_User(xn::UserGenerator& generator, XnUserID nId, void* pCookie) {
+        printf("New User %d\n", nId);
+        std::cout <<  std::endl;
+        generator.GetSkeletonCap().RequestCalibration(nId, TRUE);
+
+}
+
+
+
+// Callback: An existing user was lost
+void XN_CALLBACK_TYPE KinectBinding::Lost_User(xn::UserGenerator& generator, XnUserID nId, void* pCookie) {
+        printf("Lost user %d\n", nId);
+        std::cout <<  std::endl;
+
+}
+
+
+
+// Callback: Detected a pose
+void XN_CALLBACK_TYPE KinectBinding::Pose_Detected(xn::PoseDetectionCapability& capability, const XnChar* strPose, XnUserID nId, void* pCookie) {
+        printf("Pose %s detected for user %d\n", strPose, nId);
+        std::cout <<  std::endl;
+        capability.StopPoseDetection(nId);
+        // TODO? do we ever get here?
+        //userGenerator.GetSkeletonCap().RequestCalibration(nId, TRUE);
+}
+
+
+
+// Callback: Started calibration
+void XN_CALLBACK_TYPE KinectBinding::Calibration_Started(xn::SkeletonCapability& capability, XnUserID nId, void* pCookie) {
+        printf("Calibration started for user %d\n", nId);
+        std::cout <<  std::endl;
+}
+
+
+
+// Callback: Finished calibration
+void XN_CALLBACK_TYPE KinectBinding::Calibration_Ended(xn::SkeletonCapability& capability, XnUserID nId, XnBool bSuccess, void* pCookie) {
+        if (bSuccess) {
+                printf("Calibration complete, start tracking user %d\n", nId);
+                std::cout <<  std::endl;
+                capability.StartTracking(nId);
+
+        }
+        else {
+                printf("Calibration failed for user %d\n", nId);
+                capability.RequestCalibration(nId, TRUE);
+        }
+}
+
+
+
+
+
+
+
+int KinectBinding::jointPos(XnUserID player, XnSkeletonJoint eJoint) {
+    //  printf("jointpos for  user %d\n", player);
+
+        XnSkeletonJointTransformation jointTrans;
+
+        userGenerator.GetSkeletonCap().GetSkeletonJoint(player, eJoint, jointTrans);
+
+        posConfidence = jointTrans.position.fConfidence;
+
+        //userID = player;
+
+        if (filterLowConfidence && posConfidence < 0.5) {
+                return 0;
+        }
+
+        if (!raw)
+        {
+          jointCoords[0] =  ( (1280 - jointTrans.position.position.X) / 2560); //Normalize coords to 0..1 interval
+          jointCoords[1] =  ( (960 - jointTrans.position.position.Y) / 1920); //Normalize coords to 0..1 interval
+          jointCoords[2] =  (jointTrans.position.position.Z * 7.8125 / 10000); //Normalize coords to 0..7.8125 interval
+        }
+        else if (realworld)
+        {
+                XnPoint3D realwordPoint;
+                realwordPoint.X = 0; realwordPoint.Y = 0; realwordPoint.Z = 0;
+                depth.ConvertProjectiveToRealWorld(1, &jointTrans.position.position, &realwordPoint);
+
+            jointCoords[0] = realwordPoint.X;
+            jointCoords[1] = realwordPoint.Y;
+            jointCoords[2] = realwordPoint.Z;
+        }
+        else
+        {
+
+          jointCoords[0] = jointTrans.position.position.X;
+          jointCoords[1] = jointTrans.position.position.Y;
+          jointCoords[2] = jointTrans.position.position.Z;
+        }
+
+        if (sendOrient)
+        {
+          orientConfidence = jointTrans.orientation.fConfidence;
+
+          for (int i=0; i<9; i++)
+          {
+            jointOrients[i] = jointTrans.orientation.orientation.elements[i];
+          }
+        }
+
+        allJointCoords[eJoint-1][0] = jointCoords[0];
+        allJointCoords[eJoint-1][1] = jointCoords[1];
+        allJointCoords[eJoint-1][2] = jointCoords[2];
+
+        //visualize_current_data();
+
+        return 0;
+}
+
+
+void KinectBinding::get_body_positions(){
+
+        XnUserID aUsers[15];
+        XnUInt16 nUsers = 15;
+        userGenerator.GetUsers(aUsers, nUsers);
+        for (int i = 0; i < nUsers; ++i) {
+                if (userGenerator.GetSkeletonCap().IsTracking(aUsers[i])) {
+                for(int j=1;j<25;j++){
+                  jointPos(aUsers[i], (XnSkeletonJoint) j);
+                }
+
+
+                }
+                else {
+                        //Send user's center of mass
+                        //sendUserPosMsg(aUsers[i]);
+                }
+        }
+}
+
+
+void KinectBinding::checkRetVal(XnStatus nRetVal) {
+        if (nRetVal != XN_STATUS_OK) {
+                printf("There was a problem initializing kinect... Make sure you have \
+connected both usb and power cables and that the driver and OpenNI framework \
+are correctly installed.\n\n");
+                exit(1);
+        }
+}
+
+
+//http://stackoverflow.com/questions/8364558/openni-rgb-image-to-opencv-bgr-iplimage-conversion
+void KinectBinding::draw_image(){
+
+    //pRGbPixel = image.GetRGB24ImageMap();
+
+   XnUInt8 *pImage = imageMD.WritableData();
+
+   const XnLabel *pLabels = sceneMD.Data();
+   const XnDepthPixel *pDepths = depthMD.Data();
+
+  /*  XnUInt8 * pImage = new XnUInt8 [640*480*3];
+    memcpy(pImage,imageMD.Data(),640*480*3*sizeof(XnUInt8));
+    XnUInt8 temp;
+    for(size_t row=0; row<480; row++){
+        for(size_t col=0;col<3*640; col+=3){
+            size_t index = row*3*640+col;
+            temp = pImage[index];
+            pImage[index] = pImage[index+2];
+            pImage[index+2] = temp;
+        }
+    }*/
+    img->imageData = (char*) pImage;
+
+    cvCvtColor( img, img, CV_BGR2RGB);
+
+
+/*
+    int scene_index = 0;
+    for(size_t row=0; row<480; row++){
+        for(size_t col=0;col<3*640; col+=3){
+            size_t rgb_index = row*3*640+col;
+
+            if (pLabels[scene_index++] == 0){
+                switch (args[0][0]){
+                    case 0:
+                        img->imageData[rgb_index] = 0;//pDepths[scene_index] % 255;
+                        img->imageData[rgb_index+1] = 0;//pDepths[scene_index] % 255;
+                        img->imageData[rgb_index+2] = 0;//pDepths[scene_index] % 255;
+                        break;
+                    case 1:
+                        img->imageData[rgb_index] = bg1->imageData[rgb_index];//pDepths[scene_index] % 255;
+                        img->imageData[rgb_index+1] = bg1->imageData[rgb_index+1];//pDepths[scene_index] % 255;
+                        img->imageData[rgb_index+2] = bg1->imageData[rgb_index+2];//pDepths[scene_index] % 255;
+                        break;
+                }
+
+                if (args[0][1] > 1){
+                        int r=0, g=0,b=0,count=0;
+                            int bs = args[0][1];
+                            for(int x = 0; x < bs; x++){
+                                for (int y = 0;y<bs;y++){
+                                    size_t extra_rgb_index = (row - row%bs + y)*3*640+(col-(col%bs + x)*3);
+                                    r += img->imageData[extra_rgb_index];
+                                    g += img->imageData[extra_rgb_index+1];
+                                    b += img->imageData[extra_rgb_index+2];
+                                    count++;
+                                }
+                            }
+                        if (count > 0){
+                            img->imageData[rgb_index] = (int) (r / count);
+                            img->imageData[rgb_index+1] = (int) (g / count);
+                            img->imageData[rgb_index+2] = (int) (b / count);
+                        }
+                    }
+                if (args[0][2] > 1){
+                            int r=0, g=0,b=0,count=0;
+                            int bs = args[0][2];
+                            for(int x = 0; x < bs; x++){
+                                for (int y = 0;y<bs;y++){
+                                    size_t extra_rgb_index = (row - int(bs/2) + y)*3*640+(col-(int(bs/2) + x)*3);
+                                    if (extra_rgb_index >= 0 && extra_rgb_index < 480*640*3){
+                                        r += img->imageData[extra_rgb_index];
+                                        g += img->imageData[extra_rgb_index+1];
+                                        b += img->imageData[extra_rgb_index+2];
+                                        count++;
+                                    }
+                                }
+                            }
+                        if (count > 0){
+                            img->imageData[rgb_index] = (int) (r / count);
+                            img->imageData[rgb_index+1] = (int) (g / count);
+                            img->imageData[rgb_index+2] = (int) (b / count);
+                        }
+                    }
+          }
+      }
+    }
+*/
+    // TODO: hier weitermachen!
+
+    XnUInt16 nUsers = userGenerator.GetNumberOfUsers();
+    XnUserID aUsers[nUsers];
+        userGenerator.GetUsers(aUsers, nUsers);
+
+    if ( nUsers > 0){
+        //printf("b0\n");
+        int b1 = userGenerator.GetSkeletonCap().IsCalibrated(aUsers[0]);
+        //printf("b1 %d\n", b1);
+        int b2 = userGenerator.GetSkeletonCap().IsTracking(aUsers[0]) ;
+        //printf("b2 %d\n", b2);
+        int b3 = userGenerator.GetSkeletonCap().IsJointAvailable(XN_SKEL_HEAD);
+        //printf("b3 %d\n", b3);
+        //printf("got user");
+
+
+
+        /*    XnSkeletonJointTransformation jointTrans;
+
+            userGenerator.GetSkeletonCap().GetSkeletonJointPosition( jointTrans);
+
+            posConfidence = jointTrans.position.fConfidence;
+            printf("confidence: %f -> ", posConfidence);
+            int isconf = posConfidence==1.0f;
+            printf("%d \n", isconf);
+            if (isconf && b1 && b2 && b3){
+                printf("u\n");
+                XnPoint3D headCenter[2];
+                headCenter[0] = jointTrans.position.position;
+                printf("a %f %f %f \n", headCenter[0].X, headCenter[0].Y, headCenter[0].Z);
+
+                printf("x\n");
+                depth.ConvertRealWorldToProjective(1, headCenter, headCenter);
+                printf("b\n");
+
+                 cvCircle(img,                       // the dest image
+                         cvPoint(headProjective[0].X, headProjective[0].Y), 30,      // center point and radius
+                         cvScalar(0, 0, 255, 0),    // the color; red
+                         3, 8, 0);
+            printf("d\n");
+        }*/
+
+        XnSkeletonJointPosition headJoint, neckJoint;
+        userGenerator.GetSkeletonCap().GetSkeletonJointPosition(aUsers[0], XN_SKEL_HEAD,headJoint);
+        userGenerator.GetSkeletonCap().GetSkeletonJointPosition(aUsers[0], XN_SKEL_NECK,neckJoint);
+
+        XnPoint3D pt[2];
+        pt[0] = headJoint.position;
+        pt[1] = neckJoint.position;
+
+        depth.ConvertRealWorldToProjective(2, pt, pt);
+/*
+        if (args[1][0] != 0){
+            int dest_x = (int)((pt[0].X * (100-args[1][2])+pt[1].X*args[1][2])/100.0);
+            int dest_y = (int)((pt[0].Y * (100-args[1][2])+pt[1].Y*args[1][2])/100.0);
+            switch (args[1][1]){
+                case 0:
+                    cvCircle(img,                       // the dest image
+                                     cvPoint(dest_x, dest_y), args[1][0],      // center point and radius
+                                     cvScalar(0, 0, 255, 0),    // the color; red
+                                     3, 8, 0);
+                    break;
+                case 1:
+                case 2:
+                    int mask_size = args[1][0] * 2;
+                    mask_size += mask_size % 4; // make even
+                    IplImage *src = args[1][1] == 1 ? mask1 : mask2;
+                    IplImage *destination = cvCreateImage
+                        ( cvSize(mask_size,mask_size ),
+                                     src->depth, src->nChannels );
+                    cvResize(src, destination);
+
+
+                    int x0 = dest_x - args[1][0];
+                    int y0 = dest_y - args[1][0];
+                    int r,g,b;
+                    for(int x =0;x< mask_size;x++){
+                        for(int y=0;y<mask_size;y++){
+                            size_t rgb_index = 3*((y0+y)*640+(x0+x));
+                            size_t src_index = 3*(y*mask_size+x);
+                            r = destination->imageData[src_index];
+                            g = destination->imageData[src_index+1];
+                            b = destination->imageData[src_index+2];
+
+                            if (! (r==-1&&g==0&&b==-1)){
+                                img->imageData[rgb_index] = r;
+                                img->imageData[rgb_index+1] = g;
+                                img->imageData[rgb_index+2] = b;
+                            }
+                        }
+                    }
+                   break;
+              }
+            }*/
+    }
+    /*
+    XN_SKEL_HEAD
+XN_SKEL_NECK
+XN_SKEL_TORSO
+XN_LEFT_SHOULDER*/
+}

hidecam2/kinectbinding.h

+#ifndef KINECTBINDING_H
+#define KINECTBINDING_H
+
+
+#include <cstdio>
+#include <csignal>
+#include <iostream>
+#include <fstream>
+#include <pthread.h>
+
+#include "push2vloop.h"
+
+#define CV_NO_BACKWARD_COMPATIBILITY
+
+#include <XnCppWrapper.h>
+
+#include "cv.h"
+#include "highgui.h"
+
+class KinectBinding
+{
+    public:
+        KinectBinding();
+        ~KinectBinding();
+
+        IplImage *img;
+
+        void loop_step();
+
+
+    private:
+        xn::Context context;
+        xn::DepthGenerator depth;
+        xn::DepthMetaData depthMD;
+        xn::ImageGenerator image;
+        xn::ImageMetaData imageMD;
+        xn::UserGenerator userGenerator;
+        xn::GestureGenerator gestureGenerator;
+        xn::SceneAnalyzer scene;
+        xn::SceneMetaData sceneMD;
+
+
+        static void XN_CALLBACK_TYPE Gesture_Recognized(xn::GestureGenerator& generator, const XnChar* strGesture, const XnPoint3D* pIDPosition, const XnPoint3D* pEndPosition, void* pCookie);
+        static void XN_CALLBACK_TYPE Gesture_Process(xn::GestureGenerator& generator, const XnChar* strGesture, const XnPoint3D* pPosition, XnFloat fProgress, void* pCookie);
+        static void XN_CALLBACK_TYPE New_User(xn::UserGenerator& generator, XnUserID nId, void* pCookie);
+        static void XN_CALLBACK_TYPE Lost_User(xn::UserGenerator& generator, XnUserID nId, void* pCookie);
+        static void XN_CALLBACK_TYPE Pose_Detected(xn::PoseDetectionCapability& capability, const XnChar* strPose, XnUserID nId, void* pCookie);
+        static void XN_CALLBACK_TYPE Calibration_Started(xn::SkeletonCapability& capability, XnUserID nId, void* pCookie);
+        static void XN_CALLBACK_TYPE Calibration_Ended(xn::SkeletonCapability& capability, XnUserID nId, XnBool bSuccess, void* pCookie);
+
+
+
+
+        void draw_image();
+
+        int jointPos(XnUserID player, XnSkeletonJoint eJoint);
+        void get_body_positions();
+
+
+        void checkRetVal(XnStatus nRetVal);
+
+
+        // check/remove later
+
+        IplImage *bg1, *mask1, *mask2;
+
+        float jointCoords[3];
+        float jointOrients[9];
+
+        // caches all joint coords
+        float allJointCoords[24][3];
+
+        float posConfidence;
+        float orientConfidence;
+
+        bool kitchenMode, mirrorMode, play, record, sendRot, filter, preview, raw, filterLowConfidence, realworld, debugFacts, debugCSV, sendOrient;
+
+
+        const static int nDimensions = 3;
+};
+
+#endif // KINECTBINDING_H

hidecam2/main.cpp

+#include <QtGui/QApplication>
+#include <QObject>
+#include <QtDebug>
+#include "cv.h"
+#include "mainwindow.h"
+
+int main(int argc, char *argv[])
+{
+    QApplication a(argc, argv);
+    MainWindow w;
+
+
+
+    WorkerThread worker_thread;
+    QObject::connect(&worker_thread, SIGNAL(ImageChanged(IplImage*)), &w, SLOT(updateImage(IplImage*)), Qt::QueuedConnection);
+    worker_thread.start();
+
+    w.show();
+    a.exec();
+    worker_thread.stop();
+    // timeout after 3 seconds
+    worker_thread.wait(3000);
+    return 0;
+}

hidecam2/mainwindow.cpp

+#include "mainwindow.h"
+#include "ui_mainwindow.h"
+
+
+MainWindow::MainWindow(QWidget *parent) :
+    QMainWindow(parent),
+    ui(new Ui::MainWindow)
+{
+    ui->setupUi(this);
+
+
+    dframe = cvCreateImage(cvSize(640, 480), IPL_DEPTH_8U, 4);
+    // the individual channels for the IplImage
+    tchannel0 = cvCreateImage(cvSize(640, 480), IPL_DEPTH_8U, 1);
+    tchannel1 = cvCreateImage(cvSize(640, 480), IPL_DEPTH_8U, 1);
+    tchannel2 = cvCreateImage(cvSize(640, 480), IPL_DEPTH_8U, 1);
+    tchannel3 = cvCreateImage(cvSize(640, 480), IPL_DEPTH_8U, 1);
+}
+
+MainWindow::~MainWindow()
+{
+    delete ui;
+}
+
+void MainWindow::updateImage(IplImage *img)
+{
+    // from http://www.qtcentre.org/threads/11655-OpenCV-integration
+
+
+
+    // set all elements in tchannel0 (alpha channel) to 255
+    cvSet(tchannel0,cvScalarAll(255),0);
+
+    // with cframe being the captured frame (3 channel RGB)
+    // and dframe the frame to be displayed
+    cvSplit(img, tchannel1, tchannel2, tchannel3, NULL);
+    cvMerge(tchannel1, tchannel2, tchannel3, tchannel0, dframe);
+
+    // point to the image data stored in the IplImage*
+       const unsigned char * data = (unsigned char *)(dframe->imageData);
+
+
+    // read other parameters in local variables
+    int width = 640;
+    int height = 480;
+    int bytesPerLine = width*4;
+
+    // imageframe is my QLabel object
+    QImage qimage( data, width, height, bytesPerLine, QImage::Format_RGB32 );
+    QPixmap pixmap;
+    ui->imageframe->setPixmap(pixmap.fromImage(qimage, 0));
+}

hidecam2/mainwindow.h

+#ifndef MAINWINDOW_H
+#define MAINWINDOW_H
+
+#include <QMainWindow>
+#include <QtDebug>
+#include <workerthread.h>
+
+#include "cv.h"
+
+namespace Ui {
+    class MainWindow;
+}
+
+class MainWindow : public QMainWindow
+{
+    Q_OBJECT
+
+public:
+    explicit MainWindow(QWidget *parent = 0);
+    ~MainWindow();
+
+public slots:
+    void updateImage(IplImage *img);
+
+private:
+    Ui::MainWindow *ui;
+
+     IplImage *dframe, *tchannel0, *tchannel1,*tchannel2,*tchannel3;
+};
+
+#endif // MAINWINDOW_H

hidecam2/mainwindow.ui

+<?xml version="1.0" encoding="UTF-8"?>
+<ui version="4.0">
+ <class>MainWindow</class>
+ <widget class="QMainWindow" name="MainWindow">
+  <property name="geometry">
+   <rect>
+    <x>0</x>
+    <y>0</y>
+    <width>966</width>
+    <height>576</height>
+   </rect>
+  </property>
+  <property name="windowTitle">
+   <string>MainWindow</string>
+  </property>
+  <widget class="QWidget" name="centralWidget">
+   <widget class="QGroupBox" name="groupBox">
+    <property name="enabled">
+     <bool>true</bool>
+    </property>
+    <property name="geometry">
+     <rect>
+      <x>670</x>
+      <y>30</y>
+      <width>291</width>
+      <height>251</height>
+     </rect>
+    </property>
+    <property name="title">
+     <string>head overlay</string>
+    </property>
+    <property name="flat">
+     <bool>false</bool>
+    </property>
+    <property name="checkable">
+     <bool>true</bool>
+    </property>
+    <property name="checked">
+     <bool>true</bool>
+    </property>
+    <widget class="QWidget" name="formLayoutWidget">
+     <property name="enabled">
+      <bool>true</bool>
+     </property>
+     <property name="geometry">
+      <rect>
+       <x>10</x>
+       <y>30</y>
+       <width>271</width>
+       <height>211</height>
+      </rect>
+     </property>
+     <layout class="QFormLayout" name="formLayout">
+      <property name="fieldGrowthPolicy">
+       <enum>QFormLayout::ExpandingFieldsGrow</enum>
+      </property>
+      <property name="labelAlignment">
+       <set>Qt::AlignLeading|Qt::AlignLeft|Qt::AlignVCenter</set>
+      </property>
+      <property name="formAlignment">
+       <set>Qt::AlignHCenter|Qt::AlignTop</set>
+      </property>
+      <item row="0" column="0">
+       <widget class="QLabel" name="label">
+        <property name="text">
+         <string>overlay</string>
+        </property>
+       </widget>
+      </item>
+      <item row="0" column="1">
+       <widget class="QComboBox" name="comboBox">
+        <property name="enabled">
+         <bool>true</bool>
+        </property>
+        <item>
+         <property name="text">
+          <string>Tux</string>
+         </property>
+        </item>
+        <item>
+         <property name="text">
+          <string>Guy Fawkes</string>
+         </property>
+        </item>
+        <item>
+         <property name="text">
+          <string>Circle</string>
+         </property>
+        </item>
+        <item>
+         <property name="text">
+          <string>censor bar</string>
+         </property>
+        </item>
+        <item>
+         <property name="text">
+          <string>blur</string>
+         </property>
+        </item>
+        <item>
+         <property name="text">
+          <string>pixel</string>
+         </property>
+        </item>
+       </widget>
+      </item>
+      <item row="1" column="0">
+       <widget class="QLabel" name="label_2">
+        <property name="text">
+         <string>horizontal
+offset:</string>
+        </property>
+       </widget>
+      </item>
+      <item row="1" column="1">
+       <widget class="QLabel" name="label_head_offx_value">
+        <property name="enabled">
+         <bool>true</bool>
+        </property>
+        <property name="sizePolicy">
+         <sizepolicy hsizetype="MinimumExpanding" vsizetype="Preferred">
+          <horstretch>0</horstretch>
+          <verstretch>0</verstretch>
+         </sizepolicy>
+        </property>
+        <property name="autoFillBackground">
+         <bool>false</bool>
+        </property>
+        <property name="text">
+         <string>0</string>
+        </property>
+        <property name="alignment">
+         <set>Qt::AlignCenter</set>
+        </property>
+       </widget>
+      </item>
+      <item row="2" column="1">
+       <widget class="QSlider" name="slider_head_offx">
+        <property name="enabled">
+         <bool>true</bool>
+        </property>
+        <property name="minimum">
+         <number>-50</number>
+        </property>
+        <property name="maximum">
+         <number>50</number>
+        </property>
+        <property name="singleStep">
+         <number>1</number>
+        </property>
+        <property name="orientation">
+         <enum>Qt::Horizontal</enum>
+        </property>
+       </widget>
+      </item>
+      <item row="3" column="0">
+       <widget class="QLabel" name="label_3">
+        <property name="text">
+         <string>vertical
+offset:</string>
+        </property>
+       </widget>
+      </item>
+      <item row="3" column="1">
+       <widget class="QLabel" name="label_head_offy_value">
+        <property name="enabled">
+         <bool>true</bool>
+        </property>
+        <property name="sizePolicy">
+         <sizepolicy hsizetype="MinimumExpanding" vsizetype="Preferred">
+          <horstretch>0</horstretch>
+          <verstretch>0</verstretch>
+         </sizepolicy>
+        </property>
+        <property name="autoFillBackground">
+         <bool>false</bool>
+        </property>
+        <property name="text">
+         <string>0</string>
+        </property>
+        <property name="alignment">
+         <set>Qt::AlignCenter</set>
+        </property>
+       </widget>
+      </item>
+      <item row="4" column="1">
+       <widget class="QSlider" name="slider_head_offy">
+        <property name="enabled">
+         <bool>true</bool>
+        </property>
+        <property name="minimum">
+         <number>-50</number>
+        </property>
+        <property name="maximum">
+         <number>50</number>
+        </property>
+        <property name="singleStep">
+         <number>1</number>
+        </property>
+        <property name="orientation">
+         <enum>Qt::Horizontal</enum>
+        </property>
+       </widget>
+      </item>
+     </layout>
+    </widget>
+   </widget>
+   <widget class="QLabel" name="imageframe">
+    <property name="geometry">
+     <rect>
+      <x>20</x>
+      <y>30</y>
+      <width>640</width>
+      <height>480</height>
+     </rect>
+    </property>
+    <property name="text">
+     <string>[image]</string>
+    </property>
+   </widget>
+   <zorder>imageframe</zorder>
+   <zorder>groupBox</zorder>
+  </widget>
+  <widget class="QMenuBar" name="menuBar">
+   <property name="geometry">
+    <rect>
+     <x>0</x>
+     <y>0</y>
+     <width>966</width>
+     <height>20</height>
+    </rect>
+   </property>
+  </widget>
+  <widget class="QToolBar" name="mainToolBar">
+   <attribute name="toolBarArea">
+    <enum>TopToolBarArea</enum>
+   </attribute>
+   <attribute name="toolBarBreak">
+    <bool>false</bool>
+   </attribute>
+  </widget>
+  <widget class="QStatusBar" name="statusBar"/>
+ </widget>
+ <layoutdefault spacing="6" margin="11"/>
+ <resources/>
+ <connections>
+  <connection>
+   <sender>slider_head_offx</sender>
+   <signal>valueChanged(int)</signal>
+   <receiver>label_head_offx_value</receiver>
+   <slot>setNum(int)</slot>
+   <hints>
+    <hint type="sourcelabel">
+     <x>649</x>
+     <y>133</y>
+    </hint>
+    <hint type="destinationlabel">
+     <x>649</x>
+     <y>110</y>
+    </hint>
+   </hints>
+  </connection>
+  <connection>
+   <sender>slider_head_offy</sender>
+   <signal>valueChanged(int)</signal>
+   <receiver>label_head_offy_value</receiver>
+   <slot>setNum(int)</slot>
+   <hints>
+    <hint type="sourcelabel">
+     <x>649</x>
+     <y>178</y>
+    </hint>
+    <hint type="destinationlabel">
+     <x>649</x>
+     <y>155</y>
+    </hint>
+   </hints>
+  </connection>
+ </connections>
+</ui>

hidecam2/push2vloop.cpp

+
+
+#include <sys/ioctl.h>
+#include <linux/videodev2.h>
+#include <fcntl.h>
+
+#include "push2vloop.h"
+
+
+# define VIDEO_DEVICE "/dev/video0"
+# define FRAME_FORMAT V4L2_PIX_FMT_RGB24
+# define FRAME_WIDTH  640
+# define FRAME_HEIGHT 480
+
+struct v4l2_capability vid_caps;
+struct v4l2_format vid_format;
+const char*video_device=VIDEO_DEVICE;
+int fdwr;
+size_t framesize = FRAME_WIDTH * FRAME_HEIGHT * 3;
+size_t linewidth = FRAME_WIDTH * 3;
+
+
+
+void init_vloop(){
+    printf("init video...\n");
+	__u8*buffer;
+	__u8*check_buffer;
+
+
+	buffer=(__u8*)malloc(sizeof(__u8)*framesize);
+	check_buffer=(__u8*)malloc(sizeof(__u8)*framesize);
+
+	int i;
+	memset(buffer, 0, framesize);
+	memset(check_buffer, 0, framesize);
+
+	fdwr = open(video_device, O_RDWR);
+	printf("fdwr: %i\n",fdwr);
+	//assert(fdwr >= 0);
+
+	int ret_code = ioctl(fdwr, VIDIOC_QUERYCAP, &vid_caps);
+	//assert(ret_code != -1);
+
+	memset(&vid_format, 0, sizeof(vid_format));
+
+	vid_format.type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+	vid_format.fmt.pix.width = FRAME_WIDTH;
+	vid_format.fmt.pix.height = FRAME_HEIGHT;
+	vid_format.fmt.pix.pixelformat = FRAME_FORMAT;
+	vid_format.fmt.pix.sizeimage = framesize;
+	vid_format.fmt.pix.field = V4L2_FIELD_NONE;
+	vid_format.fmt.pix.bytesperline = linewidth;
+	vid_format.fmt.pix.colorspace = V4L2_COLORSPACE_SRGB;
+	ret_code = ioctl(fdwr, VIDIOC_S_FMT, &vid_format);
+    printf("video init done!\n");
+}
+
+
+void push2vloop(IplImage *img){
+        //lock mutex for depth image
+             //   pthread_mutex_lock( &mutex_depth );
+
+                write(fdwr, img->imageData, framesize);
+
+                //unlock mutex for depth image
+            //    pthread_mutex_unlock( &mutex_depth );
+}

hidecam2/push2vloop.h

+#ifndef PUSH2VLOOP
+#define PUSH2VLOOP
+
+#include "cv.h"
+void init_vloop();
+
+void push2vloop(IplImage*);
+#endif

hidecam2/workerthread.cpp

+#include "workerthread.h"
+
+WorkerThread::WorkerThread(QObject *parent) :
+    QThread(parent)
+{
+    kbinding = new KinectBinding();
+}
+
+void WorkerThread::run()
+{
+    this->running = true;
+    while (this->running){
+     //   qDebug() << "worker thread runs";
+        kbinding->loop_step();
+        emit ImageChanged(kbinding->img);
+        sleep(0.5);
+
+
+    }
+    // do cleanup stuff
+}
+
+void WorkerThread::stop()
+{
+    this->running = false;
+}

hidecam2/workerthread.h

+#ifndef WORKERTHREAD_H
+#define WORKERTHREAD_H
+
+#include <QThread>
+#include <QtDebug>
+#include "cv.h"
+#include "kinectbinding.h"
+
+class WorkerThread : public QThread
+{
+    Q_OBJECT
+public:
+    explicit WorkerThread(QObject *parent = 0);
+    void stop();
+
+private:
+    void run();
+    bool running;
+    KinectBinding *kbinding;
+
+signals:
+    void ImageChanged(IplImage *img);
+
+public slots:
+
+
+};
+
+#endif // WORKERTHREAD_H