先看SingleFace.cpp

初始化InitInstance函数, 返回

    return SUCCEEDED(m_FTHelper.Init(m_hWnd,FTHelperCallingBack,this,m_depthType,m_depthRes,m_bNearMode,TRUE, // if near mode doesn't work, fall back to default modem_colorType,m_colorRes,m_bSeatedSkeletonMode));

回调函数长这样, 其会在后面的SubmitFraceTrackingResult函数里面执行, 主要设置鸡蛋脸的一些参数

/*
* The "Face Tracker" helper class is generic. It will call back this function
* after a face has been successfully tracked. The code in the call back passes the parameters
* to the Egg Avatar, so it can be animated.
*/
void SingleFace::FTHelperCallingBack(PVOID pVoid)
{SingleFace* pApp = reinterpret_cast<SingleFace*>(pVoid);if (pApp){IFTResult* pResult = pApp->m_FTHelper.GetResult();if (pResult && SUCCEEDED(pResult->GetStatus())){FLOAT* pAU = NULL;UINT numAU;pResult->GetAUCoefficients(&pAU, &numAU);pApp->m_eggavatar.SetCandideAU(pAU, numAU);FLOAT scale;FLOAT rotationXYZ[3];FLOAT translationXYZ[3];pResult->Get3DPose(&scale, rotationXYZ, translationXYZ);pApp->m_eggavatar.SetTranslations(translationXYZ[0], translationXYZ[1], translationXYZ[2]);pApp->m_eggavatar.SetRotations(rotationXYZ[0], rotationXYZ[1], rotationXYZ[2]);}}
}

在Helper的init函数里面创建了一个线程

HRESULT FTHelper::Init(HWND hWnd, FTHelperCallBack callBack, PVOID callBackParam, NUI_IMAGE_TYPE depthType, NUI_IMAGE_RESOLUTION depthRes, BOOL bNearMode, BOOL bFallbackToDefault, NUI_IMAGE_TYPE colorType, NUI_IMAGE_RESOLUTION colorRes, BOOL bSeatedSkeletonMode)
{if (!hWnd || !callBack){return E_INVALIDARG;}m_hWnd = hWnd;m_CallBack = callBack;m_CallBackParam = callBackParam;m_ApplicationIsRunning = true;m_depthType = depthType;m_depthRes = depthRes;m_bNearMode = bNearMode;m_bFallbackToDefault = bFallbackToDefault;m_bSeatedSkeletonMode = bSeatedSkeletonMode;m_colorType = colorType;m_colorRes = colorRes;m_hFaceTrackingThread = CreateThread(NULL, 0, FaceTrackingStaticThread, (PVOID)this, 0, 0);return S_OK;
}
DWORD WINAPI FTHelper::FaceTrackingStaticThread(PVOID lpParam)
{FTHelper* context = static_cast<FTHelper*>(lpParam);if (context){return context->FaceTrackingThread();}return 0;
}

这个线程里面

DWORD WINAPI FTHelper::FaceTrackingThread()
{FT_CAMERA_CONFIG videoConfig;FT_CAMERA_CONFIG depthConfig;FT_CAMERA_CONFIG* pDepthConfig = NULL;// Try to get the Kinect camera to workHRESULT hr = m_KinectSensor.Init(m_depthType, m_depthRes, m_bNearMode, m_bFallbackToDefault, m_colorType, m_colorRes, m_bSeatedSkeletonMode);if (SUCCEEDED(hr)){m_KinectSensorPresent = TRUE;m_KinectSensor.GetVideoConfiguration(&videoConfig);m_KinectSensor.GetDepthConfiguration(&depthConfig);pDepthConfig = &depthConfig;m_hint3D[0] = m_hint3D[1] = FT_VECTOR3D(0, 0, 0);}else{m_KinectSensorPresent = FALSE;WCHAR errorText[MAX_PATH];ZeroMemory(errorText, sizeof(WCHAR) * MAX_PATH);wsprintf(errorText, L"Could not initialize the Kinect sensor. hr=0x%x\n", hr);MessageBoxW(m_hWnd, errorText, L"Face Tracker Initialization Error\n", MB_OK);return 1;}// Try to start the face tracker.m_pFaceTracker = FTCreateFaceTracker(_opt);if (!m_pFaceTracker){MessageBoxW(m_hWnd, L"Could not create the face tracker.\n", L"Face Tracker Initialization Error\n", MB_OK);return 2;}hr = m_pFaceTracker->Initialize(&videoConfig, pDepthConfig, NULL, NULL); if (FAILED(hr)){WCHAR path[512], buffer[1024];GetCurrentDirectoryW(ARRAYSIZE(path), path);wsprintf(buffer, L"Could not initialize face tracker (%s). hr=0x%x", path, hr);MessageBoxW(m_hWnd, /*L"Could not initialize the face tracker.\n"*/ buffer, L"Face Tracker Initialization Error\n", MB_OK);return 3;}hr = m_pFaceTracker->CreateFTResult(&m_pFTResult);if (FAILED(hr) || !m_pFTResult){MessageBoxW(m_hWnd, L"Could not initialize the face tracker result.\n", L"Face Tracker Initialization Error\n", MB_OK);return 4;}// Initialize the RGB image.m_colorImage = FTCreateImage();if (!m_colorImage || FAILED(hr = m_colorImage->Allocate(videoConfig.Width, videoConfig.Height, FTIMAGEFORMAT_UINT8_B8G8R8X8))){return 5;}if (pDepthConfig){m_depthImage = FTCreateImage();if (!m_depthImage || FAILED(hr = m_depthImage->Allocate(depthConfig.Width, depthConfig.Height, FTIMAGEFORMAT_UINT16_D13P3))){return 6;}}SetCenterOfImage(NULL);m_LastTrackSucceeded = false;while (m_ApplicationIsRunning){CheckCameraInput();InvalidateRect(m_hWnd, NULL, FALSE);UpdateWindow(m_hWnd);Sleep(16);}m_pFaceTracker->Release();m_pFaceTracker = NULL;if(m_colorImage){m_colorImage->Release();m_colorImage = NULL;}if(m_depthImage) {m_depthImage->Release();m_depthImage = NULL;}if(m_pFTResult){m_pFTResult->Release();m_pFTResult = NULL;}m_KinectSensor.Release();return 0;
}

这个线程会在一个循环里面执行

    while (m_ApplicationIsRunning){CheckCameraInput();InvalidateRect(m_hWnd, NULL, FALSE);UpdateWindow(m_hWnd);Sleep(16);}

然后CheckCameraInput处理每帧

// Get a video image and process it.
void FTHelper::CheckCameraInput()
{HRESULT hrFT = E_FAIL;if (m_KinectSensorPresent && m_KinectSensor.GetVideoBuffer()){HRESULT hrCopy = m_KinectSensor.GetVideoBuffer()->CopyTo(m_colorImage, NULL, 0, 0);if (SUCCEEDED(hrCopy) && m_KinectSensor.GetDepthBuffer()){hrCopy = m_KinectSensor.GetDepthBuffer()->CopyTo(m_depthImage, NULL, 0, 0);}// Do face trackingif (SUCCEEDED(hrCopy)){FT_SENSOR_DATA sensorData(m_colorImage, m_depthImage, m_KinectSensor.GetZoomFactor(), m_KinectSensor.GetViewOffSet());FT_VECTOR3D* hint = NULL;if (SUCCEEDED(m_KinectSensor.GetClosestHint(m_hint3D))){hint = m_hint3D;}if (m_LastTrackSucceeded){hrFT = m_pFaceTracker->ContinueTracking(&sensorData, hint, m_pFTResult);}else{hrFT = m_pFaceTracker->StartTracking(&sensorData, NULL, hint, m_pFTResult);}}}m_LastTrackSucceeded = SUCCEEDED(hrFT) && SUCCEEDED(m_pFTResult->GetStatus());if (m_LastTrackSucceeded){SubmitFraceTrackingResult(m_pFTResult);}else{m_pFTResult->Reset();}SetCenterOfImage(m_pFTResult);
}

在SubmitFraceTrackingResult函数里面获取结果参数,并将网格边框显示出来。

BOOL FTHelper::SubmitFraceTrackingResult(IFTResult* pResult)
{if (pResult != NULL && SUCCEEDED(pResult->GetStatus())){if (m_CallBack){(*m_CallBack)(m_CallBackParam);//回调函数会在这里执行}if (m_DrawMask){FLOAT* pSU = NULL;UINT numSU;BOOL suConverged;m_pFaceTracker->GetShapeUnits(NULL, &pSU, &numSU, &suConverged);POINT viewOffset = {0, 0};FT_CAMERA_CONFIG cameraConfig;if (m_KinectSensorPresent){m_KinectSensor.GetVideoConfiguration(&cameraConfig);}else{cameraConfig.Width = 640;cameraConfig.Height = 480;cameraConfig.FocalLength = 500.0f;}IFTModel* ftModel;HRESULT hr = m_pFaceTracker->GetFaceModel(&ftModel);if (SUCCEEDED(hr)){hr = VisualizeFaceModel(m_colorImage, ftModel, &cameraConfig, pSU, 1.0, viewOffset, pResult, 0x00FFFF00);ftModel->Release();}}}return TRUE;
}
HRESULT VisualizeFaceModel(IFTImage* pColorImg, IFTModel* pModel, FT_CAMERA_CONFIG const* pCameraConfig, FLOAT const* pSUCoef, FLOAT zoomFactor, POINT viewOffset, IFTResult* pAAMRlt, UINT32 color)
{if (!pColorImg || !pModel || !pCameraConfig || !pSUCoef || !pAAMRlt){return E_POINTER;}HRESULT hr = S_OK;UINT vertexCount = pModel->GetVertexCount();FT_VECTOR2D* pPts2D = reinterpret_cast<FT_VECTOR2D*>(_malloca(sizeof(FT_VECTOR2D) * vertexCount));if (pPts2D){FLOAT *pAUs;UINT auCount;hr = pAAMRlt->GetAUCoefficients(&pAUs, &auCount);if (SUCCEEDED(hr)){FLOAT scale, rotationXYZ[3], translationXYZ[3];hr = pAAMRlt->Get3DPose(&scale, rotationXYZ, translationXYZ);if (SUCCEEDED(hr)){hr = pModel->GetProjectedShape(pCameraConfig, zoomFactor, viewOffset, pSUCoef, pModel->GetSUCount(), pAUs, auCount, scale, rotationXYZ, translationXYZ, pPts2D, vertexCount);if (SUCCEEDED(hr)){POINT* p3DMdl   = reinterpret_cast<POINT*>(_malloca(sizeof(POINT) * vertexCount));if (p3DMdl){for (UINT i = 0; i < vertexCount; ++i){p3DMdl[i].x = LONG(pPts2D[i].x + 0.5f);p3DMdl[i].y = LONG(pPts2D[i].y + 0.5f);}FT_TRIANGLE* pTriangles;UINT triangleCount;hr = pModel->GetTriangles(&pTriangles, &triangleCount);if (SUCCEEDED(hr)){struct EdgeHashTable{UINT32* pEdges;UINT edgesAlloc;void Insert(int a, int b) {UINT32 v = (min(a, b) << 16) | max(a, b);UINT32 index = (v + (v << 8)) * 49157, i;for (i = 0; i < edgesAlloc - 1 && pEdges[(index + i) & (edgesAlloc - 1)] && v != pEdges[(index + i) & (edgesAlloc - 1)]; ++i){}pEdges[(index + i) & (edgesAlloc - 1)] = v;}} eht;eht.edgesAlloc = 1 << UINT(log(2.f * (1 + vertexCount + triangleCount)) / log(2.f));eht.pEdges = reinterpret_cast<UINT32*>(_malloca(sizeof(UINT32) * eht.edgesAlloc));if (eht.pEdges){ZeroMemory(eht.pEdges, sizeof(UINT32) * eht.edgesAlloc);for (UINT i = 0; i < triangleCount; ++i){ eht.Insert(pTriangles[i].i, pTriangles[i].j);eht.Insert(pTriangles[i].j, pTriangles[i].k);eht.Insert(pTriangles[i].k, pTriangles[i].i);}for (UINT i = 0; i < eht.edgesAlloc; ++i){if(eht.pEdges[i] != 0){pColorImg->DrawLine(p3DMdl[eht.pEdges[i] >> 16], p3DMdl[eht.pEdges[i] & 0xFFFF], color, 1);}}_freea(eht.pEdges);}// Render the face rect in magentaRECT rectFace;hr = pAAMRlt->GetFaceRect(&rectFace);if (SUCCEEDED(hr)){POINT leftTop = {rectFace.left, rectFace.top};POINT rightTop = {rectFace.right - 1, rectFace.top};POINT leftBottom = {rectFace.left, rectFace.bottom - 1};POINT rightBottom = {rectFace.right - 1, rectFace.bottom - 1};UINT32 nColor = 0xff00ff;SUCCEEDED(hr = pColorImg->DrawLine(leftTop, rightTop, nColor, 1)) &&SUCCEEDED(hr = pColorImg->DrawLine(rightTop, rightBottom, nColor, 1)) &&SUCCEEDED(hr = pColorImg->DrawLine(rightBottom, leftBottom, nColor, 1)) &&SUCCEEDED(hr = pColorImg->DrawLine(leftBottom, leftTop, nColor, 1));}}_freea(p3DMdl); }else{hr = E_OUTOFMEMORY;}}}}_freea(pPts2D);}else{hr = E_OUTOFMEMORY;}return hr;
}

然后在主窗口的消息循环中

    case WM_PAINT:hdc = BeginPaint(hWnd, &ps);// Draw the avatar window and the video windowPaintWindow(hdc, hWnd);EndPaint(hWnd, &ps);break;

画图函数长这样

// Draw the egg head and the camera video with the mask superimposed.
BOOL SingleFace::PaintWindow(HDC hdc, HWND hWnd)
{static int errCount = 0;BOOL ret = FALSE;RECT rect;GetClientRect(hWnd, &rect);int width = rect.right - rect.left;int height = rect.bottom - rect.top;int halfWidth = width/2;// Show the video on the right of the windowerrCount += !ShowVideo(hdc, width - halfWidth, height, halfWidth, 0);// Draw the egg avatar on the left of the windowerrCount += !ShowEggAvatar(hdc, halfWidth, height, 0, 0);return ret;
}

其中ShowVideo会调用 m_FTHelper.GetColorImage(),获取helper画好的网格和边框。

// Drawing the video window
BOOL SingleFace::ShowVideo(HDC hdc, int width, int height, int originX, int originY)
{BOOL ret = TRUE;// Now, copy a fraction of the camera image into the screen.IFTImage* colorImage = m_FTHelper.GetColorImage();if (colorImage){int iWidth = colorImage->GetWidth();int iHeight = colorImage->GetHeight();if (iWidth > 0 && iHeight > 0){int iTop = 0;int iBottom = iHeight;int iLeft = 0;int iRight = iWidth;// Keep a separate buffer.if (m_pVideoBuffer && SUCCEEDED(m_pVideoBuffer->Allocate(iWidth, iHeight, FTIMAGEFORMAT_UINT8_B8G8R8A8))){// Copy do the video buffer while converting bytescolorImage->CopyTo(m_pVideoBuffer, NULL, 0, 0);// Compute the best approximate copy ratio.float w1 = (float)iHeight * (float)width;float w2 = (float)iWidth * (float)height;if (w2 > w1 && height > 0){// video image too widefloat wx = w1/height;iLeft = (int)max(0, m_FTHelper.GetXCenterFace() - wx / 2);iRight = iLeft + (int)wx;if (iRight > iWidth){iRight = iWidth;iLeft = iRight - (int)wx;}}else if (w1 > w2 && width > 0){// video image too narrowfloat hy = w2/width;iTop = (int)max(0, m_FTHelper.GetYCenterFace() - hy / 2);iBottom = iTop + (int)hy;if (iBottom > iHeight){iBottom = iHeight;iTop = iBottom - (int)hy;}}int const bmpPixSize = m_pVideoBuffer->GetBytesPerPixel();SetStretchBltMode(hdc, HALFTONE);BITMAPINFO bmi = {sizeof(BITMAPINFO), iWidth, iHeight, 1, static_cast<WORD>(bmpPixSize * CHAR_BIT), BI_RGB, m_pVideoBuffer->GetStride() * iHeight, 5000, 5000, 0, 0};if (0 == StretchDIBits(hdc, originX, originY, width, height,iLeft, iBottom, iRight-iLeft, iTop-iBottom, m_pVideoBuffer->GetBuffer(), &bmi, DIB_RGB_COLORS, SRCCOPY)){ret = FALSE;}}}}return ret;
}

ShowEggAvatar会调用m_eggavatar.DrawImage(m_pImageBuffer);画鸡蛋脸, 注意鸡蛋脸的参数在回调函数里面设置好了

// Drawing code
BOOL SingleFace::ShowEggAvatar(HDC hdc, int width, int height, int originX, int originY)
{static int errCount = 0;BOOL ret = FALSE;if (m_pImageBuffer && SUCCEEDED(m_pImageBuffer->Allocate(width, height, FTIMAGEFORMAT_UINT8_B8G8R8A8))){memset(m_pImageBuffer->GetBuffer(), 0, m_pImageBuffer->GetStride() * height); // clear to blackm_eggavatar.SetScaleAndTranslationToWindow(height, width);m_eggavatar.DrawImage(m_pImageBuffer);BITMAPINFO bmi = {sizeof(BITMAPINFO), width, height, 1, static_cast<WORD>(m_pImageBuffer->GetBytesPerPixel() * CHAR_BIT), BI_RGB, m_pImageBuffer->GetStride() * height, 5000, 5000, 0, 0};errCount += (0 == StretchDIBits(hdc, 0, 0, width, height, 0, 0, width, height, m_pImageBuffer->GetBuffer(), &bmi, DIB_RGB_COLORS, SRCCOPY));ret = TRUE;}return ret;
}

kinnect face tracking大概流程相关推荐

  1. 浏览器了解(一)浏览器大概流程

    浏览器大概流程 我们知道浏览器从加载网页到显示有一个过程,大致如下 首先是从网络接口获取网页的内容,当然最先得到的是HTML,这是会进行HTML的解析过程(如果此时遇到外部资源,会再次启动网络接口获取 ...

  2. 第一个项目的大概流程

    创建实体类 实体类的类名与数据库的表名相同,实体类的变量名与数据库的列名相同,即每个属性对应表中的一个列,然后创建getset方法和全参无参的构造方法 创建DAO层 首先创建要实现的方法的接口,例如: ...

  3. hdfs大概流程和命令操作

    文章目录 一. hdfs大概流程 1.1 存储 1.2 读取 2. 命令操作 1. appendToFile 文件内容追加 2. 查看HDFS文件内容 3. 修改组,拥有者以及权限 3. 从本地cop ...

  4. MySQL:一个简单insert语句的大概流程

    简单记录,可能有误,主要记录重要的接口以备后用. 一.操作说明 我建了一个简单的表,插入一个简单的数据. mysql> create table testin(id int); Query OK ...

  5. 游戏原画可以自学么?有没有自学的大概流程

    游戏原画可以自学么?有没有自学的大概流程?俗话说得好,光说不练假把式,光练不说傻把式,又练又说真把式.这话的意思是只有又说又能实践才是真本事,但是画画不需要说话,所以只剩下实践了.学画画光靠嘴巴说是没 ...

  6. 双端影视APP后台搭建及前端安装编译大概流程

    1,服务器或虚拟主机+域名,这个相当于我们的仓库,用来存放源码文件和数据库: 2,聚合网注册,注册这个聚合网后会有一个属于自己的ID号和APPkey,我们需要把源码中ID和key替换为自己的,否则打包 ...

  7. Android直播软件开发中接入腾讯IM大概流程是怎样的

    现阶段来看,直播软件中的即时通讯是非常重要的一个部分,毕竟直播过程中的交流和沟通是非常重要的,所以在Android直播软件开发时需要接入相关的IM服务. 通常我们选择的即时聊天服务,会选择集成简单方便 ...

  8. 一个运维项目的大概流程,附带20种运维开发工具

    项目前期 产品模型-开发-测试(安全)-上线 立项: 初期: 1)需要的服务器(物理机,虚拟机),数据库,环境,带宽 2) 项目运维的部署方案(也为了后期方便新人的操作) ,比如环境规划(具备一定的扩 ...

  9. Android手机FOTA升级的大概流程介绍

    本文内容转载自FOTA升级流程以及FOTA相关知识总结两篇文章,有需要可去原文查看. Android设备的系统升级有两种方式: (1)下载更新包到手机后,手动安装,即所谓"卡刷包" ...

最新文章

  1. JavaScript- The Good Parts CHAPTER 2
  2. sql server 语句自动补齐
  3. 去除SAP中的一些特殊字符
  4. 几何画板可以这样画虚线
  5. Integer 值判断相等
  6. 阿里实习 电面,面试
  7. Javaimport以及Java类的搜索路径
  8. Nutanix 以现代化 IT 基础架构推动医共体建设
  9. Markdown中如何加入上标、下标?
  10. java操作xml一般都用什么_用Java如何处理XML数据
  11. 利用libxml2解析xml文档
  12. day22 随机输出ArrayList
  13. macOS Big Sur安装Mojave动态桌面壁纸
  14. Plantcare_启动子预测结果_快速筛选指定基因启动子上的顺式作用元件个数
  15. 斐讯E1刷K2版老毛子Padavan,完美实现中继教程
  16. 天呐?发现一个媲美 “百度” 的程序员网站
  17. 计算机演示文稿实验报告,演示文稿实验报告.doc
  18. C#编写的winform程序绑定comboBox成功,添加一个默认的文字选项请选择
  19. linux广播命令,分享|在 Linux 终端收听广播
  20. jquery html添加背景图片,jquery动态更换设置背景图的方法

热门文章

  1. 基于机器视觉的安利纽崔莱瓶子外观检测
  2. 编写通用 Hello World 驱动程序 (KMDF)
  3. liunx 双网卡同网段配置
  4. TypeScript学习笔记(第一天)------创建简单的web应用
  5. 不用wp-pagenav,wordpress自带分页代码调用
  6. Jenkins进阶系列之——07Jenkins纳入版本控制
  7. 数据结构之线性表——链式存储结构之单链表(php代码实现)
  8. 新浪第一时间视频直播全球火炬接力
  9. 诸葛亮的十堂课:在变局中安身立命,在逆境中找到力量
  10. 一维稳态导热的数值计算c语言,传热传质上机实习题(参考资料C语言)