导 Kinect2库,opencv库,pcl库

Kinect2驱动安装:     https://blog.csdn.net/qq_15204179/article/details/107706758

ndfreenect2.cmake   [Kinect2]

# 查找依赖库
FIND_LIBRARY(freenect2_LIBRARY freenect2PATHS ~/freenect2/libNO_DEFAULT_PATH)
SET(freenect2_LIBRARIES ${freenect2_LIBRARY} pthread)# 查找路径
FIND_PATH(freenect2_INCLUDE_DIR libfreenect2/libfreenect2.hppPATHS ~/freenect2/includeNO_DEFAULT_PATH)
SET(freenect2_INCLUDE_DIRS ${freenect2_INCLUDE_DIR})

FindOpenCV.cmake   [OpenCV]


# once done, this will define
#OpenCV_FOUND        -  whether there is OpenCV in the prebuilt libraries
#OpenCV_INCLUDE_DIRS -  include directories for OpenCV
#OpenCV_LIBRARY_DIRS -  library directories for OpenCV
#OpenCV_LIBRARIES    -  link this to use OpenCVset(OpenCV_DIR ~/3rdparty/OpenCV-3.4.9)unset(OpenCV_FOUND)MESSAGE(STATUS "OpenCV_DIR TEST  ${PREBUILT_DIR}")set(OpenCV_INCLUDE_DIRS "${OpenCV_DIR}/include" "${OpenCV_DIR}/include/opencv")
set(OpenCV_LIB_COMPONENTS opencv_core;opencv_ml;opencv_calib3d;opencv_highgui;opencv_superres;opencv_photo;opencv_imgcodecs;opencv_features2d;opencv_viz;opencv_flann;opencv_shape;opencv_stitching;opencv_dnn;opencv_videostab;opencv_imgproc;opencv_objdetect;opencv_video;opencv_videoio;opencv_reg;opencv_aruco;opencv_dpm;opencv_xobjdetect;opencv_xfeatures2d;opencv_surface_matching;opencv_plot;opencv_hfs;opencv_tracking;opencv_rgbd;opencv_text;opencv_dnn_objdetect;opencv_face;opencv_optflow;opencv_bgsegm;opencv_bioinspired;opencv_ximgproc;opencv_saliency;opencv_freetype;opencv_stereo;opencv_img_hash;opencv_fuzzy;opencv_ccalib;opencv_line_descriptor;opencv_hdf;opencv_datasets;opencv_phase_unwrapping;opencv_xphoto;opencv_structured_light)find_path(OpenCV_INCLUDE_DIRS NAMES opencv.h HINTS ${OpenCV_DIR}/include NO_SYSTEM_ENVIRONMENT_PATH)set(OpenCV_LIBRARY_DIRS ${OpenCV_DIR}/lib)FOREACH(cvcomponent ${OpenCV_LIB_COMPONENTS})find_library(lib_${cvcomponent} NAMES ${cvcomponent} HINTS ${OpenCV_DIR}/lib NO_DEFAULT_PATH)set(OpenCV_LIBRARIES ${OpenCV_LIBRARIES};${lib_${cvcomponent}})
ENDFOREACH()set(OpenCV_LIBS ${OpenCV_LIBRARIES})set(OpenCV_INCLUDE_DIRS ${OpenCV_INCLUDE_DIRS};${OpenCV_INCLUDE_DIRS}/opencv)if (OpenCV_INCLUDE_DIRS AND OpenCV_LIBRARIES)set(OpenCV_FOUND TRUE)
endif (OpenCV_INCLUDE_DIRS AND OpenCV_LIBRARIES)if (OpenCV_FOUND)if (NOT OpenCV_FIND_QUIETLY)message(STATUS "Found OpenCV: ${OpenCV_LIBRARIES}")endif (NOT OpenCV_FIND_QUIETLY)
else(OpenCV_FOUND)if (OpenCV_FIND_REQUIRED)message(FATAL_ERROR "Could not find the OpenCV library")endif ()
endif (OpenCV_FOUND)

CMakeLists.txt

cmake_minimum_required(VERSION 3.16)
project(kinect2demo)set(CMAKE_CXX_STANDARD 14)set(OUTPUT_DIRECTORY_ROOT ${CMAKE_CURRENT_SOURCE_DIR}/build/debug)
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY "${OUTPUT_DIRECTORY_ROOT}/bin" CACHE PATH "Runtime directory" FORCE)
set(CMAKE_LIBRARY_OUTPUT_DIRECTORY "${OUTPUT_DIRECTORY_ROOT}/lib" CACHE PATH "Library directory" FORCE)
set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY "${OUTPUT_DIRECTORY_ROOT}/lib" CACHE PATH "Archive directory" FORCE)set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${CMAKE_SOURCE_DIR}/cmake)
#set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ~/3rdparty/OpenCV-3.4.9/share/OpenCV)# -------------------------------------------------- 2
find_package(freenect2 REQUIRED)
find_package(OpenCV REQUIRED)
find_package(PCL REQUIRED)include_directories(${freenect2_INCLUDE_DIR})
include_directories(${OpenCV_INCLUDE_DIRS})
include_directories(${PCL_INCLUDE_DIRS})link_directories(${OpenCV_LIBRARY_DIRS})
link_directories(${PCL_LIBRARY_DIRS})add_definitions(${PCL_DEFINITIONS})add_subdirectory(src)
add_subdirectory(src/camera)add_executable(kinect2demo main.cpp)

------------------------------------------------------------------------------------------------------------------------------------------------

01-Kinect2Camera.cpp  Kinect2获取保存彩色图及深度图

//
// Created by ty on 20-6-3.
//#include <iostream>
#include <libfreenect2/libfreenect2.hpp>
#include <libfreenect2/packet_pipeline.h>
#include <libfreenect2/frame_listener_impl.h>
#include <libfreenect2/registration.h>#include <opencv2/opencv.hpp>using namespace std;
using namespace cv;const int ACTION_ESC = 27;
const int ACTION_SPACE = 32;// 图片的输出目录
const string output_dir = "../output";/*** 获取Kinect2相机的内容** in:*      Kinect2硬件设备* out:*      彩色图*      深度图**  过滤深度值 去掉无限值|NAN*  水平翻转*  保存彩色图和深度图*  缩放为0.5倍*/
int main(int argc, char *argv[]) {// 创建libfreenect2对象libfreenect2::Freenect2 freenect2;if (freenect2.enumerateDevices() == 0) {std::cerr << "当前未发现Kinect2设备" << std::endl;return -1;}const string &serial = freenect2.getDefaultDeviceSerialNumber();if (serial.empty()) {std::cerr << "设备序列号不存在" << std::endl;return -1;}std::cout << "设备序列号:" << serial << std::endl;// 创建数据处理管道
//    libfreenect2::OpenGLPacketPipeline *pipeline = new libfreenect2::CpuPacketPipeline();libfreenect2::OpenGLPacketPipeline *pipeline = new libfreenect2::OpenGLPacketPipeline();// 打开设备libfreenect2::Freenect2Device *device = freenect2.openDevice(serial, pipeline);// 创建数据接收监听器libfreenect2::SyncMultiFrameListener listener(libfreenect2::Frame::Color | libfreenect2::Frame::Depth);// 给设备设置数据监听器//颜色设备device->setColorFrameListener(&listener);//红外设备  红外与深度是一个相机device->setIrAndDepthFrameListener(&listener);// 启动设备device->start();// 创建对齐工具,帮我们把rgb和depth对齐(去畸变)libfreenect2::Registration *registration = new libfreenect2::Registration(device->getIrCameraParams(),//相机内参,相机中再带存了些;最好自己标定下数据会更准确些device->getColorCameraParams());std::cout << "设备固件版本:" << device->getFirmwareVersion() << std::endl;libfreenect2::Frame undistorted(512, 424, 4),   // 创建对象接收:已经去畸变的深度图,第3个参数:浮点类型的字节数registered(512, 424, 4),    // 创建对象接收:已经对齐的彩色图 (可能会有孔洞)把彩色图对齐到深度图上,第3个参数:浮点类型的字节数bigdepth(1920, 1082, 4);    // 创建对象接收:将小深度图对齐到彩色图上得到的大深度图  1920*1082 float; 1082 第一行与最好一行多了两空像素需要过干掉,第3个参数:浮点类型的字节数// 创建一个容器,准备接收数据libfreenect2::FrameMap frames;int index = 0;while (true) {//用户按下空格保存图片 按下esc退出// 这里会阻塞,直到拿到最新的数据。阻塞才会被释放,必须先释放当前的数据:listener.release(frames);listener.waitForNewFrame(frames);//取颜色图片libfreenect2::Frame *rgb = frames[libfreenect2::Frame::Color];//取深度图片libfreenect2::Frame *depth = frames[libfreenect2::Frame::Depth];//        std::cout << "rgb->width: " << rgb->width << " rgb->height: " << rgb->height << std::endl;
//        std::cout << "depth->width: " << depth->width << " depth->height: " << depth->height << std::endl;// 彩色图与深度图合并; 对原始数据进行对齐 彩色图映射到深度图上  rgb彩色图 ,undistorted去畸变的深度图,registered配准后的彩色图 把彩色图贴到深度图上了 ;是否允许过滤 把相机不可见的像素过滤掉;bigdepth 输出把深度映射到彩色图上的图 大的深度图// bigdepth 输出把深度映射到彩色图上的图 大的深度图 1920*1082 float; 1082 第一行与最好一行多了两空像素需要过干掉registration->apply(rgb, depth, &undistorted, &registered, true, &bigdepth);// 512x424 已和深度对齐的彩色图
//        Mat registeredRgbMat = Mat(registered.height, registered.width, CV_8UC4, registered.data);// 512x424 已去畸变的深度图
//        Mat undistortedDepthMat = Mat(undistorted.height, undistorted.width, CV_32FC1, undistorted.data);// 512x424 未去畸变的深度图
//        Mat smallDepthMat = Mat(depth->height, depth->width, CV_32FC1, depth->data);// 将kinect类型的数据转成OpenCV的Mat//CV_8UC4 u代表无符号 c是通道 通道为4// 1920x1080// opencv提供Mat构造函数 帮我们把unsigned char* 转成 mat对象Mat rgbMat = Mat(rgb->height, rgb->width, CV_8UC4, rgb->data);// 1920x1082  CV_32FC1 32位精度浮点数 单通道  bigDepthMat_ 深度值单位毫米,接收的时候要用CV_32FC1数据类型Mat bigDepthMat_ = Mat(bigdepth.height, bigdepth.width, CV_32FC1, bigdepth.data);//过滤深度值 去掉无限值|NAN, 裁剪//       起始列   起始行       总列数       需要的行数//  Rect_(_Tp _x, _Tp _y, _Tp _width, _Tp _height); 1920*1082 float 截取深度图 获得1080的长度// 截取深度图 获得1080的长度 ,1920*1082 floatMat bigDepthMat = bigDepthMat_(Rect(0, 1, bigDepthMat_.cols, bigDepthMat_.rows - 2));//过滤深度值 去掉无限值|NANfor (int i = 0; i < bigDepthMat.rows; ++i) {for (int j = 0; j < bigDepthMat.cols; ++j) {float &d = bigDepthMat.at<float>(i, j);if (fpclassify(d) == FP_INFINITE || fpclassify(d) == NAN) {d = 0;}}}// *  水平翻转flip(rgbMat, rgbMat, 1);flip(bigDepthMat, bigDepthMat, 1);int action = waitKey(30) & 0xFF;if (action == ACTION_ESC || action == 'q' || action == 'Q') {//按下q 或 esc 27退出break;} else if (action == ACTION_SPACE) {//用户按下空格保存32图片std::cout << "保存rgb图和depth图" << std::endl;
//            *  保存彩色图和深度图// 保存彩图stringstream rr;rr << output_dir << "/rgb_image_" << index << ".jpg";vector<int> params;params.push_back(CV_IMWRITE_JPEG_QUALITY);//图片格式params.push_back(100);//压缩的比例bool rst = imwrite(rr.str(), rgbMat, params);if (!rst) {std::cerr << "目标目录不存在,请检查后再保存: " << output_dir << std::endl;return -1;}std::cout << "保存彩色图成功:" << rr.str() << std::endl;// 保存深度图 深度图全是数字 不是可是的图片,要保存成文件// 将数据转成单通道16位UINT,CV_16UC1 整形16位的保存,接收的时候要用CV_32FC1数据类型bigDepthMat.convertTo(bigDepthMat, CV_16UC1);stringstream dd;dd << output_dir << "/depth_image_" << index << ".xml";FileStorage fs(dd.str(), FileStorage::WRITE);fs << "depth" << bigDepthMat;fs.release();std::cout << "保存深度值成功:" << dd.str() << std::endl;}
//       *  缩放为0.5倍resize(rgbMat, rgbMat, Size(), 0.5, 0.5, INTER_LINEAR);resize(bigDepthMat, bigDepthMat, Size(), 0.5, 0.5, INTER_LINEAR);imshow("color", rgbMat);//彩色图显示imshow("depth", bigDepthMat / 4500.0f);//官方推荐显示深度图除4500//必须先释放当前的数据  waitForNewFrame才会拿到数据listener.release(frames);}device->stop();//停掉设备device->close();//后关闭设备delete registration;//回收数据
}

02-PointCloudMaker.cpp  Kinect2转点云图

//
// Created by ty on 20-6-3.
//#include <iostream>#include <opencv2/opencv.hpp>#include <pcl/point_types.h>
#include <pcl/io/pcd_io.h>
#include <pcl/io/io.h>
#include <pcl/visualization/cloud_viewer.h>using namespace std;
using namespace cv;typedef pcl::PointXYZRGB PointT;
typedef pcl::PointCloud<PointT> PointCloud;/*** OpenCV* PCL** 输入:*      彩色图*      深度图*      相机的内参** 输出:*      点云图**/
int main(int argc, char *argv[]) {// 读取彩色图和深度图const Mat &rgbMat = imread("./output1/rgb_image_0.jpg", CV_LOAD_IMAGE_UNCHANGED);// 深度图Mat depthMat;FileStorage fs("./output1/depth_image_0.xml", FileStorage::READ);fs["depth"] >> depthMat;fs.release();// 读取相机内参Mat cameraMatrix = cv::Mat_<double>(3, 3);FileStorage camera_fs("./calib/calibration_in_params.yml", FileStorage::READ);camera_fs["cameraMatrix"] >> cameraMatrix;camera_fs.release();std::cout << cameraMatrix << std::endl;double fx = cameraMatrix.at<double>(0, 0);double fy = cameraMatrix.at<double>(1, 1);double cx = cameraMatrix.at<double>(0, 2);//udouble cy = cameraMatrix.at<double>(1, 2);//v// 准备一个点云,接收数据PointCloud::Ptr cloud(new PointCloud);// 遍历每一个深度图的宽高,给点云的每个点指定其颜色值和深度值for (int v = 0; v < depthMat.rows; ++v) { // 所有行for (int u = 0; u < depthMat.cols; ++u) { // 所有// 取出每个像素对应的Z值ushort d = depthMat.at<ushort>(v, u);//单位毫米double depth_value = double(d) / 1000.0f;//变为米为单位,计算都是用米为单位,存储都是毫米为单位if (depth_value == 0 || isnan(depth_value) || abs(depth_value) < 0.0001) {//depth_value==0 或者不是数字 或者绝对值很小比1微米还小的值continue;}// 创建一个点PointPointT p;// 根据内参计算出其X,Y -----------------------------------------p.z = depth_value;p.x = (u - cx) * p.z / fx;p.y = (v - cy) * p.z / fy;// 给其指定颜色Vec3b bgr = rgbMat.at<Vec3b>(v, u);p.b = bgr[0];p.g = bgr[1];p.r = bgr[2];// 添加到cloud中cloud->points.push_back(p);}}// 显示cloud点云cloud->width = cloud->points.size();//无序点云的宽即是cloud长度cloud->height = 1;cloud->is_dense = false;//是否是密集的pcl::visualization::CloudViewer viewer("cloud_viewer");viewer.showCloud(cloud);//保存成点云文件
//    pcl::io::savePCDFileASCII("output/test_pcd.pcd", cloud);
//    std::cerr << "Saved " << cloud.points.size() << " data points to test_pcd.pcd." << std::endl;while (!viewer.wasStopped()) {//}}

03-PhotoCapture.cpp  Kinect2 实时获取点云图

#include <iostream>
#include <opencv2/opencv.hpp>
#include <sstream>
#include <cstdlib>#include <pcl/io/pcd_io.h>
#include <pcl/visualization/cloud_viewer.h>
#include <KinectCamera.h>using namespace std;
using namespace cv;float qnan_ = std::numeric_limits<float>::quiet_NaN();
const char *cameraInCailFile = "./calib/calibration_in_params.yml";
// 获取目标目录
//const char *output_dir = "./scene";
// 制作模板目录
const char *output_dir = "./capture_temp";const int ACTION_ESC = 27;
const int ACTION_SPACE = 32;Eigen::Matrix<float, 1920, 1> colmap;
Eigen::Matrix<float, 1080, 1> rowmap;
const short w = 1920, h = 1080;// 准备数据
void prepareMake3D(const double cx, const double cy,const double fx, const double fy) {
//    const int w = 512;
//    const int h = 424;float *pm1 = colmap.data();float *pm2 = rowmap.data();for (int i = 0; i < w; i++) {*pm1++ = (i - cx + 0.5) / fx;}for (int i = 0; i < h; i++) {*pm2++ = (i - cy + 0.5) / fy;}
}/*** 根据内参,合并RGB彩色图和深度图到点云* @param cloud* @param depthMat* @param rgbMat*/
void getCloud(pcl::PointCloud<pcl::PointXYZRGB>::Ptr &cloud, Mat &depthMat, Mat &rgbMat) {const float *itD0 = (float *) depthMat.ptr();const char *itRGB0 = (char *) rgbMat.ptr();if (cloud->size() != w * h)cloud->resize(w * h);// 获取点云的点集合的指针pcl::PointXYZRGB *itP = &cloud->points[0];bool is_dense = true;// 遍历所有行for (size_t y = 0; y < h; ++y) {// 偏差个数, 跳过y行 y * 1920 个const unsigned int offset = y * w;// 深度图指针加上偏差const float *itD = itD0 + offset;// 彩色图指针加上偏差const char *itRGB = itRGB0 + offset * 4;// 获取指定行的系数const float dy = rowmap(y);// 遍历第y行的所有列for (size_t x = 0; x < w; ++x, ++itP, ++itD, itRGB += 4) {const float depth_value = *itD / 1000.0f;if (!isnan(depth_value) && abs(depth_value) >= 0.0001) {const float rx = colmap(x) * depth_value;const float ry = dy * depth_value;itP->z = depth_value;itP->x = rx;itP->y = ry;itP->b = itRGB[0];itP->g = itRGB[1];itP->r = itRGB[2];} else {itP->z = qnan_;itP->x = qnan_;itP->y = qnan_;itP->b = qnan_;itP->g = qnan_;itP->r = qnan_;is_dense = false;}}}cloud->is_dense = is_dense;
}/** 保存Kenect相机的实时图片*  按enter键保存* */
int main(int argc, char *argv[]) {// 显示RGB图// 显示深度图// 显示合成后的点云图auto *camera = new KinectCamera();
//    cv::VideoCapture capture(0);if (!camera->isOpened()) {std::cout << "无法开启摄像头!" << std::endl;return -1;}Mat cameraMatrix = cv::Mat_<double>(3, 3);FileStorage paramFs(cameraInCailFile, FileStorage::READ);paramFs["cameraMatrix"] >> cameraMatrix;// 内参数据double fx = cameraMatrix.at<double>(0, 0);double fy = cameraMatrix.at<double>(1, 1);double cx = cameraMatrix.at<double>(0, 2);double cy = cameraMatrix.at<double>(1, 2);prepareMake3D(cx, cy, fx, fy);// 准备点云查看工具pcl::visualization::PCLVisualizer::Ptr viewer(new pcl::visualization::PCLVisualizer("3D Viewer"));// 创建智能指针点云pcl::PointCloud<pcl::PointXYZRGB>::Ptr cloud(new pcl::PointCloud<pcl::PointXYZRGB>());viewer->setBackgroundColor(0, 0, 0);pcl::visualization::PointCloudColorHandlerRGBField<pcl::PointXYZRGB> rgbHandler(cloud);viewer->addPointCloud<pcl::PointXYZRGB>(cloud, rgbHandler, "sample cloud");viewer->setPointCloudRenderingProperties(pcl::visualization::PCL_VISUALIZER_POINT_SIZE, 1, "sample cloud");int index = 0;while (true) {cv::Mat rgbMat, depthMat;camera->capture(rgbMat, depthMat);if (!viewer->wasStopped()) {// 将RGB和深度信息合成为一个点云图,保存到cloud中getCloud(cloud, depthMat, rgbMat);pcl::visualization::PointCloudColorHandlerRGBField<pcl::PointXYZRGB> rgbHandler(cloud);viewer->updatePointCloud<pcl::PointXYZRGB>(cloud, rgbHandler, "sample cloud");viewer->spinOnce();}int action = cv::waitKey(20) & 0xFF;if (action == ACTION_SPACE || action == 'q') {// 保存点云std::cout << "保存当前的点云" << std::endl;stringstream rr;rr << output_dir << "/camera_rgb_" << index << ".jpg";vector<int> compression_params;compression_params.push_back(CV_IMWRITE_JPEG_QUALITY);  //选择jpegcompression_params.push_back(100); //在这个填入你要的图片质量bool rst = cv::imwrite(rr.str(), rgbMat, compression_params);if (!rst) {std::cerr << "保存失败,请检查该文件夹是否存在:" << output_dir << std::endl;return -1;}cout << rr.str() << endl;depthMat.convertTo(depthMat, CV_16UC1);// 保存深度图stringstream ss;ss << output_dir << "/camera_depth_" << index << ".xml";FileStorage fs(ss.str(), FileStorage::WRITE);fs << "depth" << depthMat;fs.release();stringstream ss_pcd;ss_pcd << output_dir << "/table_scene_" << index << ".pcd";// 保存模板点云图pcl::io::savePCDFile(ss_pcd.str(), *cloud);index++;cout << "成功保存RGB, 深度图, 点云图" << endl;}else if(action == ACTION_ESC) {// 退出break;}cv::imshow("rgb", rgbMat);cv::imshow("depth", depthMat / 4500);}camera->release();return 0;
}

===========================================================================================

Kinect_封装好的驱动

只能获取彩色图

workspas/HandEyeCalibration/cmake/Findfreenect2.cmake

# 查找依赖库
# 头文件include: freenect2_INCLUDE_DIRS
# 依赖库:       freenect2_LIBRARIESFIND_LIBRARY(freenect2_LIBRARY freenect2PATHS ~/freenect2/libNO_DEFAULT_PATH)
SET(freenect2_LIBRARIES ${freenect2_LIBRARY} pthread)# 查找路径
FIND_PATH(freenect2_INCLUDE_DIR libfreenect2/libfreenect2.hppPATHS ~/freenect2/includeNO_DEFAULT_PATH)
SET(freenect2_INCLUDE_DIRS ${freenect2_INCLUDE_DIR})

workspas/HandEyeCalibration/src/camera/CMakeLists.txt

add_library(kinect_camera KinectCamera.cpp KinectCamera.h)
target_link_libraries(kinect_camera ${OpenCV_LIBRARIES} ${freenect2_LIBRARIES})

workspas/HandEyeCalibration/src/camera/KinectCamera.cpp

//
// Created by ty on 20-9-26.
//#include "KinectCamera.h"int KinectCamera::init() {// 判断已连接的设备个数if (freenect2.enumerateDevices() == 0) {std::cerr << "未发现Kinect2设备" << std::endl;return -1;}const string &serial = freenect2.getDefaultDeviceSerialNumber();if (serial.empty()) {std::cerr << "设备序列号不存在" << std::endl;return -1;}std::cout << "设备序列号: " << serial << std::endl;// 创建pipeline接收并处理数据
//    auto *pipeline = new libfreenect2::CpuPacketPipeline();auto pipeline = new libfreenect2::OpenGLPacketPipeline();// 根据序列号开启设备,并设定数据处理通道device = freenect2.openDevice(serial, pipeline);// 创建数据接收器(监听器),给设备执行监听;device->setColorFrameListener(&listener);device->setIrAndDepthFrameListener(&listener);device->start();return 0;
}void KinectCamera::capture(Mat& outputMat) {listener.waitForNewFrame(frames);libfreenect2::Frame *rgb = frames[libfreenect2::Frame::Color];libfreenect2::Frame *depth = frames[libfreenect2::Frame::Depth];Mat colorMat = Mat(rgb->height, rgb->width, CV_8UC4, rgb->data);// 图像进行镜像处理flip(colorMat, colorMat, 1);colorMat.copyTo(outputMat);listener.release(frames);}void KinectCamera::release() {device->stop();device->close();
}KinectCamera::~KinectCamera() {}

workspas/HandEyeCalibration/src/camera/KinectCamera.h

//
// Created by ty on 20-9-26.
//#ifndef HANDEYECALIBRATION_KINECTCAMERA_H
#define HANDEYECALIBRATION_KINECTCAMERA_H#include <iostream>
#include <opencv2/opencv.hpp>
#include <libfreenect2/libfreenect2.hpp>
#include <libfreenect2/packet_pipeline.h>
#include <libfreenect2/frame_listener_impl.h>using namespace std;
using namespace cv;class KinectCamera {private:int status = -2; // -2 初始状态, -1 出现错误, 0 正常可用// 1. 初始化Kinect2相机对象libfreenect2::Freenect2 freenect2;libfreenect2::Freenect2Device *device = nullptr;libfreenect2::SyncMultiFrameListener listener;libfreenect2::FrameMap frames;public:KinectCamera(): listener(libfreenect2::Frame::Color | libfreenect2::Frame::Depth) {status = init();}virtual ~KinectCamera();// 判断设备是否开启bool isOpened() {return status == 0;}virtual int init();virtual void release();void capture(Mat& colorMat);void capture(Mat& colorMat, Mat& depthMat);
};#endif //HANDEYECALIBRATION_KINECTCAMERA_H

使用:

#include <libfreenect2/libfreenect2.hpp>
#include <libfreenect2/packet_pipeline.h>
#include <libfreenect2/frame_listener_impl.h>
#include <opencv2/opencv.hpp>int main(int argc, char *argv[]) {KinectCamera* camera = new KinectCamera();if (!camera->isOpened()) {std::cerr << "相机打开失败" << std::endl;return -1;}while (true) {//  2. 循环接收相机数据rgb,depth (一定要在下次wait之前释放掉frames.   //listener.release(frames);)Mat colorMat, srcMat, gray;camera->capture(colorMat);// 备份一份原图colorMat.copyTo(srcMat);// 3. 在rgb彩色图里查找角点cvtColor(colorMat, gray, COLOR_BGR2GRAY);imageSize = gray.size();}cv::destroyAllWindows();// 设备使用完毕后,及时关闭camera->release();}

------------------------------------------------------------------------------------------------------------------------------------

控制台命令启动相机

cd ~/3rdparty/source/libfreenect2-master/build/bin
./Protonect

导 Kinect2库,opencv库,pcl库相关推荐

  1. 3D视觉学习计划之PCL库的基础知识

    3D视觉学习计划之PCL库的基础知识 一.PCL库的概述 PCL是一个大型跨平台开源C++编程库,它在吸收了前人点云相关研究基础上建立起来,实现了大量点云相关的通用算法和高效数据结构,涉及到点云获取. ...

  2. PCL库官方教程01

    PCL 库概览 介绍 PCL 库的组件,简短说明各个模块的功能及其之间的交互. 概述 PCL 被拆分为多个模块化库.主要模块如下图所示: Filters 滤波器 下图给出了一个去除噪声的例子.由于测量 ...

  3. 随笔:送给初次使用PCL库的小伙伴

    写在前面: PCL库,之前在使用他的时候,只是各种掉库,觉得自己会调库了,根据案例可以跑出来自己想要的结果(比如计算一个点云的边界,使用RANSAC拟合三维直线等)觉得自己就是掌握了,其实并不然,这样 ...

  4. 学习PCL库你应该知道的C++特性

    要学会PCL首先要对C++进行学习,所以这里我们首先对PCL库的代码中常见的C++的技巧进行整理和概述,并且对其中的难点进行细化讲解.首先我们搞清楚PCL库的文件形式.是一个以CMake构建的项目,库 ...

  5. Ubuntu下Qt中使用pcl库

    pcl依赖及安装 1.一般不用到qt或vtk显示点云 ###pcl通过ppa安装步骤 //这样安装,目前默认安装的是pcl1.7.2 sudo add-apt-repository ppa:v-lau ...

  6. 3d激光雷达开发(从halcon看点云pcl库)

    [ 声明:版权所有,欢迎转载,请勿用于商业用途. 联系信箱:feixiaoxing @163.com] 做点云开发的,很少有不知道pcl库的,这一点就有点像做数字图像处理的,很少有不知道opencv的 ...

  7. PCL Lesson1 :PCL库PCLVisualizer的简单使用

    PCL库PCLVisualizer的简单使用. 包括实例化对象,填充点云,静态显示和动态显示 #include <stdio.h> #include <string> #inc ...

  8. 基于opencv第三方视觉库,通过内网IP调用手机摄像头,实现人脸识别与图形监测

    1. 安装opencv视觉库 OpenCV 是一个开源的计算机视觉库,OpenCV 库用C语言和 C++ 语言编写,可以在 Windows.Linux.Mac OS X 等系统运行.同时也在积极开发 ...

  9. 利用PCL库构建Mesh三维模型

    从两张任意拍摄的一对图像(得有大部分重合面积)和相机内参矩阵开始,重建出基于Mesh的三维模型,美观又实用,还不赶快学起来.本文也是记录一下自己学习过程,废话较多,请多包涵,主要代码已注释,请自行下载 ...

最新文章

  1. 汇编:模拟C语言实现break与continue
  2. python3 读取配置文件中的参数值替换yaml文件中的占位符(变量)
  3. Java Process.exitValue Process.waitFor()
  4. 《Dream(梦想)》,无力的我,想放弃的我,深深的问自己,什么是梦想!!!
  5. Java内存模型–快速概述和注意事项
  6. Linux NTP服务配置 for Oracle RAC
  7. Find and Delete Files with Extension Name
  8. Java Applet Reflection Type Confusion Remote Code Execution
  9. zip解压缩jar包,像jar包中add文件
  10. Android字体的适配问题
  11. android 隐藏wifi密码,手机连接隐藏wifi怎么设置密码 手机如何添加隐藏wifi?-192路由网...
  12. Error:Execution failed for task ':app:compileDebugNdk'. Error: NDK integrat
  13. 洋钱罐借款「顶风作案」
  14. 电路与电子线路实验一万用表的设计与仿真——北京理工大学
  15. win10系统如何添加和切换多个桌面?
  16. Executor框架-Executors
  17. 冬天来了,春天还远吗
  18. python中的./与../
  19. 【软件测试】自动化测试战零基础教程——Python自动化从入门到实战(六)
  20. Angular响应式开发中报错Property 'map' does not exist on type 'Observable'.引用rxjs也没用。

热门文章

  1. DCOM 遇到错误“登录失败: 未知的用户名或错误密码
  2. 分布式文件系统MooseFs部署(二)
  3. informantion_schema库介绍
  4. 哈尔滨工程大学ACM预热赛(A,C,H,I)
  5. 我如何转行为程序员?心态支撑着我
  6. JAVA如何插入MySql的datetime类型
  7. 好久不写日志了,现在开始,好好写了。。
  8. ZABBIX Agent2监控docker
  9. Zabbix监控Nginx连接状态
  10. 使用Excel公式,获取 当前 Excel 的Sheet页 的 名字