前言

前面的博客中介绍了如何通过Kinect获得彩色图像、深度图像以及人体骨骼点:

  • Kinect学习(三):获取RGB颜色数据
  • Kinect学习(四):提取深度数据
  • Kinect学习(五):提取带用户ID的深度数据
  • Kinect学习(六):提取人体关节点数据

这次要将这几者综合起来,同时从Kinect那里拿来这些数据。

代码

这里的代码只是将前面几篇博客中的内容整合了一下,就不做过多说明了。

#include <Windows.h>
#include <iostream>
#include <NuiApi.h>
#include <opencv2/opencv.hpp>using namespace std;
using namespace cv;typedef struct structBGR {BYTE blue;BYTE green;BYTE red;BYTE player;
} BGR;bool tracked[NUI_SKELETON_COUNT] = { FALSE };
cv::Point skeletonPoint[NUI_SKELETON_COUNT][NUI_SKELETON_POSITION_COUNT] = { cv::Point(0, 0) };
cv::Point colorPoint[NUI_SKELETON_COUNT][NUI_SKELETON_POSITION_COUNT] = { cv::Point(0, 0) };void getColorImage(HANDLE & colorStreamHandle, cv::Mat & colorImg);
BGR Depth2RGB(USHORT depthID);
void getDepthImage(HANDLE & depthStreamHandle, cv::Mat & depthImg, cv::Mat & mask);
void drawSkeleton(cv::Mat &img, cv::Point pointSet[], int which_one);
void getSkeletonImage(cv::Mat & skeletonImg, cv::Mat & colorImg);int main(int argc, char* argv[])
{cv::Mat colorImg;colorImg.create(480, 640, CV_8UC3);cv::Mat depthImg;depthImg.create(240, 320, CV_8UC3);cv::Mat skeletonImg;skeletonImg.create(240, 320, CV_8UC3);cv::Mat mask;mask.create(240, 320, CV_8UC3);HANDLE colorEvent = CreateEvent(NULL, TRUE, FALSE, NULL);HANDLE depthEvent = CreateEvent(NULL, TRUE, FALSE, NULL);HANDLE skeletonEvent = CreateEvent(NULL, TRUE, FALSE, NULL);HANDLE colorStreamHandle = NULL;HANDLE depthStreamHandle = NULL;HRESULT hr;hr = NuiInitialize(NUI_INITIALIZE_FLAG_USES_COLOR | NUI_INITIALIZE_FLAG_USES_DEPTH_AND_PLAYER_INDEX| NUI_INITIALIZE_FLAG_USES_SKELETON);if (FAILED(hr)){cout << "Nui initialize failed." << endl;return hr;}hr = NuiImageStreamOpen(NUI_IMAGE_TYPE_COLOR, NUI_IMAGE_RESOLUTION_640x480, 0, 2, colorEvent, &colorStreamHandle);if (FAILED(hr)){cout << "Can not open color stream." << endl;return hr;}hr = NuiImageStreamOpen(NUI_IMAGE_TYPE_DEPTH_AND_PLAYER_INDEX, NUI_IMAGE_RESOLUTION_320x240, 0, 2, depthEvent, &depthStreamHandle);if (FAILED(hr)){cout << "Can not open depth stream." << endl;return hr;}hr = NuiSkeletonTrackingEnable(skeletonEvent, 0);if (FAILED(hr)){cout << "Can not enable skeleton tracking." << endl;return hr;}cv::namedWindow("mask", CV_WINDOW_AUTOSIZE);cv::namedWindow("colorImg", CV_WINDOW_AUTOSIZE);cv::namedWindow("depthImg", CV_WINDOW_AUTOSIZE);cv::namedWindow("skeletonImg", CV_WINDOW_AUTOSIZE);while (1){if (WaitForSingleObject(colorEvent, 0) == 0){getColorImage(colorStreamHandle, colorImg);}if (WaitForSingleObject(depthEvent, 0) == 0){getDepthImage(depthStreamHandle, depthImg, mask);}if (WaitForSingleObject(skeletonEvent, 0) == 0){getSkeletonImage(skeletonImg, colorImg);}cv::imshow("mask", mask);cv::imshow("colorImg", colorImg);cv::imshow("depthImg", depthImg);cv::imshow("skeletonImg", skeletonImg);if (cv::waitKey(20) == 27)break;}NuiShutdown();cv::destroyAllWindows();return 0;
}void getColorImage(HANDLE & colorStreamHandle, cv::Mat & colorImg)
{const NUI_IMAGE_FRAME * pImageFrame = NULL;HRESULT hr = NuiImageStreamGetNextFrame(colorStreamHandle, 0, &pImageFrame);if (FAILED(hr)){cout << "Could not get color image" << endl;NuiShutdown();return;}INuiFrameTexture * pTexture = pImageFrame->pFrameTexture;NUI_LOCKED_RECT LockedRect;pTexture->LockRect(0, &LockedRect, NULL, 0);if (LockedRect.Pitch != 0){for (int i = 0; i < colorImg.rows; i++){uchar *ptr = colorImg.ptr<uchar>(i);    //第i行的指针//每个字节代表一个颜色信息,直接使用ucharuchar *pBuffer = (uchar*)(LockedRect.pBits) + i * LockedRect.Pitch;for (int j = 0;j < colorImg.cols;j++){//内部数据是4个字节,0-1-2是BGR,第4个现在未使用ptr[3 * j] = pBuffer[4 * j];ptr[3 * j + 1] = pBuffer[4 * j + 1];ptr[3 * j + 2] = pBuffer[4 * j + 2];}}}else{cout << "捕获彩色图像出错" << endl;}pTexture->UnlockRect(0);NuiImageStreamReleaseFrame(colorStreamHandle, pImageFrame);
}// 处理深度数据的每一个像素,如果属于同一个用户的ID,那么像素就标为同种颜色,不同的用户,
// 其ID不一样,颜色的标示也不一样,如果不属于某个用户的像素,那么就采用原来的深度值
BGR Depth2RGB(USHORT depthID)
{//每像素共16bit的信息,其中最低3位是ID(所捕捉到的人的ID),剩下的13位才是信息USHORT realDepth = (depthID & 0xfff8) >> 3; //深度信息,高13位USHORT player = depthID & 0x0007;   //提取用户ID信息,低3位//因为提取的信息是距离信息,为了便于显示,这里归一化为0-255BYTE depth = (BYTE)(255 * realDepth / 0x1fff);BGR color_data;color_data.blue = color_data.green = color_data.red = 0;color_data.player = player;//RGB三个通道的值都是相等的话,就是灰度的//Kinect系统能够处理辨识传感器前多至6个人物的信息,但同一时刻最多只有2个玩家可被追踪(即骨骼跟踪)switch (player){case 0:color_data.blue = depth / 2;color_data.green = depth / 2;color_data.red = depth / 2;break;case 1:color_data.blue = depth;break;case 2:color_data.green = depth;break;case 3:color_data.red = depth;break;case 4:color_data.blue = depth;color_data.green = depth;color_data.red = depth / 4;break;case 5:color_data.blue = depth;color_data.green = depth / 4;color_data.red = depth;break;case 6:color_data.blue = depth / 4;color_data.green = depth;color_data.red = depth;break;}return color_data;
}void getDepthImage(HANDLE & depthStreamHandle, cv::Mat & depthImg, cv::Mat & mask)
{const NUI_IMAGE_FRAME * pImageFrame = NULL;HRESULT hr = NuiImageStreamGetNextFrame(depthStreamHandle, 0, &pImageFrame);if (FAILED(hr)){cout << "Could not get depth image" << endl;NuiShutdown();return;}INuiFrameTexture * pTexture = pImageFrame->pFrameTexture;NUI_LOCKED_RECT LockedRect;pTexture->LockRect(0, &LockedRect, NULL, 0);if (LockedRect.Pitch != 0){for (int i = 0;i < depthImg.rows;i++){uchar * ptr = depthImg.ptr<uchar>(i);uchar * ptr_mask = mask.ptr<uchar>(i);uchar *pBufferRun = (uchar*)(LockedRect.pBits) + i * LockedRect.Pitch;USHORT * pBuffer = (USHORT*)pBufferRun;for (int j = 0;j < depthImg.cols;j++){// ptr[j] = 255 - (uchar)(255 * pBuffer[j] / 0x0fff);   //直接将数据归一化处理// ptr[j] = (uchar)(255 * pBuffer[j] / 0x0fff); //直接将数据归一化处理BGR rgb = Depth2RGB(pBuffer[j]);ptr[3 * j] = rgb.blue;ptr[3 * j + 1] = rgb.green;ptr[3 * j + 2] = rgb.red;switch (rgb.player){case 0:ptr_mask[3 * j] = 0;ptr_mask[3 * j + 1] = 0;ptr_mask[3 * j + 2] = 0;break;case 1:ptr_mask[3 * j] = 255;ptr_mask[3 * j + 1] = 0;ptr_mask[3 * j + 2] = 0;break;case 2:ptr_mask[3 * j] = 0;ptr_mask[3 * j + 1] = 255;ptr_mask[3 * j + 2] = 0;break;case 3:ptr_mask[3 * j] = 0;ptr_mask[3 * j + 1] = 0;ptr_mask[3 * j + 2] = 255;break;case 4:ptr_mask[3 * j] = 255;ptr_mask[3 * j + 1] = 255;ptr_mask[3 * j + 2] = 0;break;case 5:ptr_mask[3 * j] = 255;ptr_mask[3 * j + 1] = 0;ptr_mask[3 * j + 2] = 255;break;case 6:ptr_mask[3 * j] = 0;ptr_mask[3 * j + 1] = 255;ptr_mask[3 * j + 2] = 255;break;default:ptr_mask[3 * j] = 0;ptr_mask[3 * j + 1] = 0;ptr_mask[3 * j + 2] = 0;break;}}}}else{cout << "捕获深度图像出错" << endl;}pTexture->UnlockRect(0);NuiImageStreamReleaseFrame(depthStreamHandle, pImageFrame);}void drawSkeleton(cv::Mat &img, cv::Point pointSet[], int which_one)
{cv::Scalar color;switch (which_one){case 0:color = cv::Scalar(255, 0, 0);break;case 1:color = cv::Scalar(0, 255, 0);break;case 2:color = cv::Scalar(0, 0, 255);break;case 3:color = cv::Scalar(255, 255, 0);break;case 4:color = cv::Scalar(255, 0, 255);break;case 5:color = cv::Scalar(0, 255, 255);break;}// 脊柱if ((pointSet[NUI_SKELETON_POSITION_HEAD].x != 0 || pointSet[NUI_SKELETON_POSITION_HEAD].y != 0) &&(pointSet[NUI_SKELETON_POSITION_SHOULDER_CENTER].x != 0 || pointSet[NUI_SKELETON_POSITION_SHOULDER_CENTER].y != 0))cv::line(img, pointSet[NUI_SKELETON_POSITION_HEAD], pointSet[NUI_SKELETON_POSITION_SHOULDER_CENTER], color, 2);if ((pointSet[NUI_SKELETON_POSITION_SHOULDER_CENTER].x != 0 || pointSet[NUI_SKELETON_POSITION_SHOULDER_CENTER].y != 0) &&(pointSet[NUI_SKELETON_POSITION_SPINE].x != 0 || pointSet[NUI_SKELETON_POSITION_SPINE].y != 0))cv::line(img, pointSet[NUI_SKELETON_POSITION_SHOULDER_CENTER], pointSet[NUI_SKELETON_POSITION_SPINE], color, 2);if ((pointSet[NUI_SKELETON_POSITION_SPINE].x != 0 || pointSet[NUI_SKELETON_POSITION_SPINE].y != 0) &&(pointSet[NUI_SKELETON_POSITION_HIP_CENTER].x != 0 || pointSet[NUI_SKELETON_POSITION_HIP_CENTER].y != 0))cv::line(img, pointSet[NUI_SKELETON_POSITION_SPINE], pointSet[NUI_SKELETON_POSITION_HIP_CENTER], color, 2);// 左上肢if ((pointSet[NUI_SKELETON_POSITION_SHOULDER_CENTER].x != 0 || pointSet[NUI_SKELETON_POSITION_SHOULDER_CENTER].y != 0) &&(pointSet[NUI_SKELETON_POSITION_SHOULDER_LEFT].x != 0 || pointSet[NUI_SKELETON_POSITION_SHOULDER_LEFT].y != 0))cv::line(img, pointSet[NUI_SKELETON_POSITION_SHOULDER_CENTER], pointSet[NUI_SKELETON_POSITION_SHOULDER_LEFT], color, 2);if ((pointSet[NUI_SKELETON_POSITION_SHOULDER_LEFT].x != 0 || pointSet[NUI_SKELETON_POSITION_SHOULDER_LEFT].y != 0) &&(pointSet[NUI_SKELETON_POSITION_ELBOW_LEFT].x != 0 || pointSet[NUI_SKELETON_POSITION_ELBOW_LEFT].y != 0))cv::line(img, pointSet[NUI_SKELETON_POSITION_SHOULDER_LEFT], pointSet[NUI_SKELETON_POSITION_ELBOW_LEFT], color, 2);if ((pointSet[NUI_SKELETON_POSITION_ELBOW_LEFT].x != 0 || pointSet[NUI_SKELETON_POSITION_ELBOW_LEFT].y != 0) &&(pointSet[NUI_SKELETON_POSITION_WRIST_LEFT].x != 0 || pointSet[NUI_SKELETON_POSITION_WRIST_LEFT].y != 0))cv::line(img, pointSet[NUI_SKELETON_POSITION_ELBOW_LEFT], pointSet[NUI_SKELETON_POSITION_WRIST_LEFT], color, 2);if ((pointSet[NUI_SKELETON_POSITION_WRIST_LEFT].x != 0 || pointSet[NUI_SKELETON_POSITION_WRIST_LEFT].y != 0) &&(pointSet[NUI_SKELETON_POSITION_HAND_LEFT].x != 0 || pointSet[NUI_SKELETON_POSITION_HAND_LEFT].y != 0))cv::line(img, pointSet[NUI_SKELETON_POSITION_WRIST_LEFT], pointSet[NUI_SKELETON_POSITION_HAND_LEFT], color, 2);// 右上肢if ((pointSet[NUI_SKELETON_POSITION_SHOULDER_CENTER].x != 0 || pointSet[NUI_SKELETON_POSITION_SHOULDER_CENTER].y != 0) &&(pointSet[NUI_SKELETON_POSITION_SHOULDER_RIGHT].x != 0 || pointSet[NUI_SKELETON_POSITION_SHOULDER_RIGHT].y != 0))cv::line(img, pointSet[NUI_SKELETON_POSITION_SHOULDER_CENTER], pointSet[NUI_SKELETON_POSITION_SHOULDER_RIGHT], color, 2);if ((pointSet[NUI_SKELETON_POSITION_SHOULDER_RIGHT].x != 0 || pointSet[NUI_SKELETON_POSITION_SHOULDER_RIGHT].y != 0) &&(pointSet[NUI_SKELETON_POSITION_ELBOW_RIGHT].x != 0 || pointSet[NUI_SKELETON_POSITION_ELBOW_RIGHT].y != 0))cv::line(img, pointSet[NUI_SKELETON_POSITION_SHOULDER_RIGHT], pointSet[NUI_SKELETON_POSITION_ELBOW_RIGHT], color, 2);if ((pointSet[NUI_SKELETON_POSITION_ELBOW_RIGHT].x != 0 || pointSet[NUI_SKELETON_POSITION_ELBOW_RIGHT].y != 0) &&(pointSet[NUI_SKELETON_POSITION_WRIST_RIGHT].x != 0 || pointSet[NUI_SKELETON_POSITION_WRIST_RIGHT].y != 0))cv::line(img, pointSet[NUI_SKELETON_POSITION_ELBOW_RIGHT], pointSet[NUI_SKELETON_POSITION_WRIST_RIGHT], color, 2);if ((pointSet[NUI_SKELETON_POSITION_WRIST_RIGHT].x != 0 || pointSet[NUI_SKELETON_POSITION_WRIST_RIGHT].y != 0) &&(pointSet[NUI_SKELETON_POSITION_HAND_RIGHT].x != 0 || pointSet[NUI_SKELETON_POSITION_HAND_RIGHT].y != 0))cv::line(img, pointSet[NUI_SKELETON_POSITION_WRIST_RIGHT], pointSet[NUI_SKELETON_POSITION_HAND_RIGHT], color, 2);// 左下肢if ((pointSet[NUI_SKELETON_POSITION_HIP_CENTER].x != 0 || pointSet[NUI_SKELETON_POSITION_HIP_CENTER].y != 0) &&(pointSet[NUI_SKELETON_POSITION_HIP_LEFT].x != 0 || pointSet[NUI_SKELETON_POSITION_HIP_LEFT].y != 0))cv::line(img, pointSet[NUI_SKELETON_POSITION_HIP_CENTER], pointSet[NUI_SKELETON_POSITION_HIP_LEFT], color, 2);if ((pointSet[NUI_SKELETON_POSITION_HIP_LEFT].x != 0 || pointSet[NUI_SKELETON_POSITION_HIP_LEFT].y != 0) &&(pointSet[NUI_SKELETON_POSITION_KNEE_LEFT].x != 0 || pointSet[NUI_SKELETON_POSITION_KNEE_LEFT].y != 0))cv::line(img, pointSet[NUI_SKELETON_POSITION_HIP_LEFT], pointSet[NUI_SKELETON_POSITION_KNEE_LEFT], color, 2);if ((pointSet[NUI_SKELETON_POSITION_KNEE_LEFT].x != 0 || pointSet[NUI_SKELETON_POSITION_KNEE_LEFT].y != 0) &&(pointSet[NUI_SKELETON_POSITION_ANKLE_LEFT].x != 0 || pointSet[NUI_SKELETON_POSITION_ANKLE_LEFT].y != 0))cv::line(img, pointSet[NUI_SKELETON_POSITION_KNEE_LEFT], pointSet[NUI_SKELETON_POSITION_ANKLE_LEFT], color, 2);if ((pointSet[NUI_SKELETON_POSITION_ANKLE_LEFT].x != 0 || pointSet[NUI_SKELETON_POSITION_ANKLE_LEFT].y != 0) &&(pointSet[NUI_SKELETON_POSITION_FOOT_LEFT].x != 0 || pointSet[NUI_SKELETON_POSITION_FOOT_LEFT].y != 0))cv::line(img, pointSet[NUI_SKELETON_POSITION_ANKLE_LEFT], pointSet[NUI_SKELETON_POSITION_FOOT_LEFT], color, 2);// 右下肢if ((pointSet[NUI_SKELETON_POSITION_HIP_CENTER].x != 0 || pointSet[NUI_SKELETON_POSITION_HIP_CENTER].y != 0) &&(pointSet[NUI_SKELETON_POSITION_HIP_RIGHT].x != 0 || pointSet[NUI_SKELETON_POSITION_HIP_RIGHT].y != 0))cv::line(img, pointSet[NUI_SKELETON_POSITION_HIP_CENTER], pointSet[NUI_SKELETON_POSITION_HIP_RIGHT], color, 2);if ((pointSet[NUI_SKELETON_POSITION_HIP_RIGHT].x != 0 || pointSet[NUI_SKELETON_POSITION_HIP_RIGHT].y != 0) &&(pointSet[NUI_SKELETON_POSITION_KNEE_RIGHT].x != 0 || pointSet[NUI_SKELETON_POSITION_KNEE_RIGHT].y != 0))cv::line(img, pointSet[NUI_SKELETON_POSITION_HIP_RIGHT], pointSet[NUI_SKELETON_POSITION_KNEE_RIGHT], color, 2);if ((pointSet[NUI_SKELETON_POSITION_KNEE_RIGHT].x != 0 || pointSet[NUI_SKELETON_POSITION_KNEE_RIGHT].y != 0) &&(pointSet[NUI_SKELETON_POSITION_ANKLE_RIGHT].x != 0 || pointSet[NUI_SKELETON_POSITION_ANKLE_RIGHT].y != 0))cv::line(img, pointSet[NUI_SKELETON_POSITION_KNEE_RIGHT], pointSet[NUI_SKELETON_POSITION_ANKLE_RIGHT], color, 2);if ((pointSet[NUI_SKELETON_POSITION_ANKLE_RIGHT].x != 0 || pointSet[NUI_SKELETON_POSITION_ANKLE_RIGHT].y != 0) &&(pointSet[NUI_SKELETON_POSITION_FOOT_RIGHT].x != 0 || pointSet[NUI_SKELETON_POSITION_FOOT_RIGHT].y != 0))cv::line(img, pointSet[NUI_SKELETON_POSITION_ANKLE_RIGHT], pointSet[NUI_SKELETON_POSITION_FOOT_RIGHT], color, 2);
}void getSkeletonImage(cv::Mat & skeletonImg, cv::Mat & colorImg)
{NUI_SKELETON_FRAME skeletonFrame = { 0 };   //骨骼帧的定义bool foundSkeleton = false;HRESULT hr = NuiSkeletonGetNextFrame(0, &skeletonFrame);if (SUCCEEDED(hr)){//NUI_SKELETON_COUNT是检测到的骨骼数(即,跟踪到的人数)for (int i = 0;i < NUI_SKELETON_COUNT;i++){NUI_SKELETON_TRACKING_STATE trackingState = skeletonFrame.SkeletonData[i].eTrackingState;// Kinect最多检测到6个人,但只能跟踪2个人的骨骼,再检查是否跟踪到了if (trackingState == NUI_SKELETON_TRACKED){foundSkeleton = true;}}}if (!foundSkeleton){return;}NuiTransformSmooth(&skeletonFrame, NULL);skeletonImg.setTo(0);for (int i = 0;i < NUI_SKELETON_COUNT;i++){// 判断是否是一个正确骨骼的条件:骨骼被跟踪到并且肩部中心(颈部位置)必须跟踪到if (skeletonFrame.SkeletonData[i].eTrackingState == NUI_SKELETON_TRACKED && skeletonFrame.SkeletonData[i].eSkeletonPositionTrackingState[NUI_SKELETON_POSITION_SHOULDER_CENTER] != NUI_SKELETON_POSITION_NOT_TRACKED){float fx, fy;// 拿到所有跟踪到的关节点的坐标,并转换为我们的深度空间的坐标,因为我们是在深度图像中// 把这些关节点标记出来的// NUI_SKELETON_POSITION_COUNT为跟踪到的一个骨骼的关节点的数目,为20for (int j = 0;j < NUI_SKELETON_POSITION_COUNT;j++){NuiTransformSkeletonToDepthImage(skeletonFrame.SkeletonData[i].SkeletonPositions[j], &fx, &fy);skeletonPoint[i][j].x = (int)fx;skeletonPoint[i][j].y = (int)fy;}for (int j = 0;j < NUI_SKELETON_POSITION_COUNT;j++){if (skeletonFrame.SkeletonData[i].eSkeletonPositionTrackingState[j] != NUI_SKELETON_POSITION_NOT_TRACKED){cv::circle(skeletonImg, skeletonPoint[i][j], 3, cv::Scalar(0, 255, 255), 1, 8, 0);tracked[i] = true;// 在彩色图中也绘制骨骼关键点LONG color_x, color_y;NuiImageGetColorPixelCoordinatesFromDepthPixel(NUI_IMAGE_RESOLUTION_640x480, 0,skeletonPoint[i][j].x, skeletonPoint[i][j].y, 0, &color_x, &color_y);colorPoint[i][j].x = (int)color_x;colorPoint[i][j].y = (int)color_y;cv::circle(colorImg, colorPoint[i][j], 4, cv::Scalar(0, 255, 255), 1, 8, 0);}}drawSkeleton(skeletonImg, skeletonPoint[i], i);drawSkeleton(colorImg, colorPoint[i], i);}}}

结果

(由于有我同学的照片,所以打了码)

参考资料

  1. https://blog.csdn.net/zouxy09/article/details/8163265
  2. https://blog.csdn.net/timebomb/article/details/7169372

Kinect学习(七):综合提取彩色、深度、人体骨骼点相关推荐

  1. Kinect学习(四):提取深度数据

    前言 前面试着提取了Kinect的彩色数据:Kinect学习(三):获取RGB颜色数据.这次,要试着提取深度数据. Depth Map(深度图)是包含与视点的场景对象的表面的距离有关的信息的图像或图像 ...

  2. Kinect学习(五):提取带用户ID的深度数据

    前言 在前面的一篇文章中讨论了如何从Kinect获取深度图:Kinect学习(四):提取深度数据. 这里要对其进行拓展,Kinect可以获取两种格式的深度图: 不带用户ID的深度数据,也是存储在16位 ...

  3. Kinect开发学习笔记之(八)彩色、深度、骨骼和用户抠图结合

    Kinect开发学习笔记之(八)彩色.深度.骨骼和用户抠图结合 zouxy09@qq.com http://blog.csdn.net/zouxy09 我的Kinect开发平台是: Win7 x86 ...

  4. Kinect SDK(1):读取彩色、深度、骨骼信息并用OpenCV显示

    转自:http://www.sjsjw.com/kf_other/article/41_23129_23822.asp Kinect SDK 读取彩色.深度.骨骼信息并用OpenCV显示 一.原理说明 ...

  5. Kinect Azure DK获取深度图、深度数据和人体骨骼关键点三维坐标

    使用环境:Azure Kinect SDK v1.4.1 + Azure Kinect Body Tracking SDK 1.0.1 + VS2019 + Opencv 文章目录 一.获取深度图 二 ...

  6. 深度学习-第T2周——彩色图片分类

    深度学习-第T2周--彩色图片分类 深度学习-第P1周--实现mnist手写数字识别 一.前言 二.我的环境 三.前期工作 1.导入依赖项并设置GPU 2.导入数据集 3.归一化 4.可视化图片 四. ...

  7. 基于矢量成果从影像提取中深度学习样本库

    大数据之:影像提取中深度学习样本库获取的思考 话说,虾神一直是做空间统计和数据分析的,对于深度学习这个热门学科,一直以来也就停留在"了解"阶段,虽然这个平展开来,里面比较核心的技术 ...

  8. 深度学习(七十三)pytorch学习笔记

    先吐槽一下,深度学习发展速度真的很快,深度学习框架也在逐步迭代,真的是苦了俺这搞深度学习程序员.本人从三年前开始学深度学习开始,这些深度学习框架也是一个换过一个,从keras.theano.caffe ...

  9. 什么是深度学习?45分钟理解深度神经网络和深度学习 刘利刚教授

    什么是深度学习? - 45分钟理解深度神经网络和深度学习 刘利刚 中国科学技术大学图形与几何计算实验室 http://staff.ustc.edu.cn/~lgliu [绪言] 近年来,人工智能(Ar ...

最新文章

  1. BZOJ 3576 江南乐
  2. Eclipse编译时保留方法的形参
  3. 【三分钟讲清区块链/比特币】之一:区块链入门教程
  4. jmeter实现翻页功能_JMeter定制功能实现
  5. 深浅拷贝的使用场景分析
  6. 纯CSS实现鼠标滑过缩略图放大显示代码
  7. JAVA实现的小程序
  8. CSS 标签左右分布的多种方法
  9. MySQL嵌套查询(子查询)
  10. linux开发板网口连接测试方法
  11. 误把电脑desktop文件删了怎么处理
  12. Linux: meld 对比工具的安装和使用
  13. BDC的执行模式与更新模式
  14. 这些都是我自学时私藏的学习网站/实用工具网站/技术网站,非常适合自学
  15. cesium常见操作:鼠标点击获取对象
  16. ChinaSkills技能大赛网络系统管理Debian模块(样题一)||初始化环境之登录欢迎语
  17. 婚车租赁APP开发源码部署
  18. WayOs流量修正版,直接支持帐号限速功能,方便不使用计费系统的用户
  19. html幸运大转盘,HTML5+Fontawesome 幸运大转盘
  20. stm32中断方式的串口通信——上位机控制串口收发

热门文章

  1. Caffe代码导读(1):Protobuf例子
  2. c# partial class 的用法
  3. Vue.js 单元测试
  4. Spring - Java/J2EE Application Framework 应用框架 第 15 章 EJB的存取和实现
  5. 稀疏表示介绍(中)、(下)
  6. 鸟哥的Linux私房菜(服务器)- 第四章、连上 Internet
  7. 程序员面试题精选100题(16)-O(logn)求Fibonacci数列[算法]
  8. 【OpenCV3】cv::divide()使用详解
  9. Php的基本语法学习
  10. SpringMVC简单项目配置