AprilTag实时识别与定位

在看完前面几篇介绍VISP的安装,编译,配置,AprilTag码的生成,识别等,见相关系列:

AprilTag专栏

下面是实时显示时进行识别AprilTag的cpp代码。

#include <visp3/core/vpConfig.h>
#ifdef VISP_HAVE_MODULE_SENSOR
#include <visp3/sensor/vpV4l2Grabber.h>
#include <visp3/sensor/vp1394CMUGrabber.h>
#include <visp3/sensor/vp1394TwoGrabber.h>
#include <visp3/sensor/vpFlyCaptureGrabber.h>
#include <visp3/sensor/vpRealSense2.h>
#endif
#include <visp3/detection/vpDetectorAprilTag.h>
#include <visp3/gui/vpDisplayGDI.h>
#include <visp3/gui/vpDisplayOpenCV.h>
#include <visp3/gui/vpDisplayX.h>
#include <visp3/core/vpXmlParserCamera.h>//#undef VISP_HAVE_V4L2
//#undef VISP_HAVE_DC1394
//#undef VISP_HAVE_CMU1394
//#undef VISP_HAVE_FLYCAPTURE
//#undef VISP_HAVE_REALSENSE2
//#undef VISP_HAVE_OPENCVint main(int argc, const char **argv)
{#if defined(VISP_HAVE_APRILTAG) && \(defined(VISP_HAVE_V4L2) || defined(VISP_HAVE_DC1394) || defined(VISP_HAVE_CMU1394) || (VISP_HAVE_OPENCV_VERSION >= 0x020100) || \defined(VISP_HAVE_FLYCAPTURE) || defined(VISP_HAVE_REALSENSE2) )int opt_device = 0;             // For OpenCV and V4l2 grabber to set the camera devicevpDetectorAprilTag::vpAprilTagFamily tagFamily = vpDetectorAprilTag::TAG_36h11;vpDetectorAprilTag::vpPoseEstimationMethod poseEstimationMethod = vpDetectorAprilTag::HOMOGRAPHY_VIRTUAL_VS;double tagSize = 0.053;float quad_decimate = 1.0;int nThreads = 1;std::string intrinsic_file = "";std::string camera_name = "";bool display_tag = false;int color_id = -1;unsigned int thickness = 2;bool align_frame = false;#if !(defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI) || defined(VISP_HAVE_OPENCV))bool display_off = true;std::cout << "Warning: There is no 3rd party (X11, GDI or openCV) to dislay images..." << std::endl;
#elsebool display_off = false;
#endifvpImage<unsigned char> I;for (int i = 1; i < argc; i++) {if (std::string(argv[i]) == "--pose_method" && i + 1 < argc) {poseEstimationMethod = (vpDetectorAprilTag::vpPoseEstimationMethod)atoi(argv[i + 1]);} else if (std::string(argv[i]) == "--tag_size" && i + 1 < argc) {tagSize = atof(argv[i + 1]);} else if (std::string(argv[i]) == "--camera_device" && i + 1 < argc) {opt_device = atoi(argv[i + 1]);} else if (std::string(argv[i]) == "--quad_decimate" && i + 1 < argc) {quad_decimate = (float)atof(argv[i + 1]);} else if (std::string(argv[i]) == "--nthreads" && i + 1 < argc) {nThreads = atoi(argv[i + 1]);} else if (std::string(argv[i]) == "--intrinsic" && i + 1 < argc) {intrinsic_file = std::string(argv[i + 1]);} else if (std::string(argv[i]) == "--camera_name" && i + 1 < argc) {camera_name = std::string(argv[i + 1]);} else if (std::string(argv[i]) == "--display_tag") {display_tag = true;} else if (std::string(argv[i]) == "--display_off") {display_off = true;} else if (std::string(argv[i]) == "--color" && i + 1 < argc) {color_id = atoi(argv[i+1]);} else if (std::string(argv[i]) == "--thickness" && i + 1 < argc) {thickness = (unsigned int) atoi(argv[i+1]);} else if (std::string(argv[i]) == "--tag_family" && i + 1 < argc) {tagFamily = (vpDetectorAprilTag::vpAprilTagFamily)atoi(argv[i + 1]);} else if (std::string(argv[i]) == "--z_aligned") {align_frame = true;}else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") {std::cout << "Usage: " << argv[0]<< " [--camera_device <camera device> (default: 0)]"<< " [--tag_size <tag_size in m> (default: 0.053)]"" [--quad_decimate <quad_decimate> (default: 1)]"" [--nthreads <nb> (default: 1)]"" [--intrinsic <intrinsic file> (default: empty)]"" [--camera_name <camera name>  (default: empty)]"" [--pose_method <method> (0: HOMOGRAPHY, 1: HOMOGRAPHY_VIRTUAL_VS, "" 2: DEMENTHON_VIRTUAL_VS, 3: LAGRANGE_VIRTUAL_VS, "" 4: BEST_RESIDUAL_VIRTUAL_VS, 5: HOMOGRAPHY_ORTHOGONAL_ITERATION) (default: 0)]"" [--tag_family <family> (0: TAG_36h11, 1: TAG_36h10 (DEPRECATED), 2: TAG_36ARTOOLKIT (DEPRECATED),"" 3: TAG_25h9, 4: TAG_25h7 (DEPRECATED), 5: TAG_16h5, 6: TAG_CIRCLE21h7, 7: TAG_CIRCLE49h12,"" 8: TAG_CUSTOM48h12, 9: TAG_STANDARD41h12, 10: TAG_STANDARD52h13) (default: 0)]"" [--display_tag] [--z_aligned]";
#if (defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI) || defined(VISP_HAVE_OPENCV))std::cout << " [--display_off] [--color <color id>] [--thickness <line thickness>]";
#endifstd::cout << " [--help]" << std::endl;return EXIT_SUCCESS;}}try {vpCameraParameters cam;cam.initPersProjWithoutDistortion(615.1674805, 615.1675415, 312.1889954, 243.4373779);vpXmlParserCamera parser;if (!intrinsic_file.empty() && !camera_name.empty())parser.parse(cam, intrinsic_file, camera_name, vpCameraParameters::perspectiveProjWithoutDistortion);#if defined(VISP_HAVE_V4L2)vpV4l2Grabber g;std::ostringstream device;device << "/dev/video" << opt_device;std::cout << "Use Video 4 Linux grabber on device " << device.str() << std::endl;g.setDevice(device.str());g.setScale(1);g.open(I);
#elif defined(VISP_HAVE_DC1394)(void)opt_device; // To avoid non used warningstd::cout << "Use DC1394 grabber" << std::endl;vp1394TwoGrabber g;g.open(I);
#elif defined(VISP_HAVE_CMU1394)(void)opt_device; // To avoid non used warningstd::cout << "Use CMU1394 grabber" << std::endl;vp1394CMUGrabber g;g.open(I);
#elif defined(VISP_HAVE_FLYCAPTURE)(void)opt_device; // To avoid non used warningstd::cout << "Use FlyCapture grabber" << std::endl;vpFlyCaptureGrabber g;g.open(I);
#elif defined(VISP_HAVE_REALSENSE2)(void)opt_device; // To avoid non used warningstd::cout << "Use Realsense 2 grabber" << std::endl;vpRealSense2 g;rs2::config config;config.disable_stream(RS2_STREAM_DEPTH);config.disable_stream(RS2_STREAM_INFRARED);config.enable_stream(RS2_STREAM_COLOR, 640, 480, RS2_FORMAT_RGBA8, 30);g.open(config);g.acquire(I);std::cout << "Read camera parameters from Realsense device" << std::endl;cam = g.getCameraParameters(RS2_STREAM_COLOR, vpCameraParameters::perspectiveProjWithoutDistortion);
#elif defined(VISP_HAVE_OPENCV)std::cout << "Use OpenCV grabber on device " << opt_device << std::endl;cv::VideoCapture g(opt_device); // Open the default cameraif (!g.isOpened()) {            // Check if we succeededstd::cout << "Failed to open the camera" << std::endl;return -1;}cv::Mat frame;g >> frame; // get a new frame from cameravpImageConvert::convert(frame, I);
#endifstd::cout << cam << std::endl;std::cout << "poseEstimationMethod: " << poseEstimationMethod << std::endl;std::cout << "tagFamily: " << tagFamily << std::endl;std::cout << "nThreads : " << nThreads << std::endl;std::cout << "Z aligned: " << align_frame << std::endl;vpDisplay *d = NULL;if (! display_off) {#ifdef VISP_HAVE_X11d = new vpDisplayX(I);
#elif defined(VISP_HAVE_GDI)d = new vpDisplayGDI(I);
#elif defined(VISP_HAVE_OPENCV)d = new vpDisplayOpenCV(I);
#endif}vpDetectorAprilTag detector(tagFamily);detector.setAprilTagQuadDecimate(quad_decimate);detector.setAprilTagPoseEstimationMethod(poseEstimationMethod);detector.setAprilTagNbThreads(nThreads);detector.setDisplayTag(display_tag, color_id < 0 ? vpColor::none : vpColor::getColor(color_id), thickness);detector.setZAlignedWithCameraAxis(align_frame);std::vector<double> time_vec;for (;;) {#if defined(VISP_HAVE_V4L2) || defined(VISP_HAVE_DC1394) || defined(VISP_HAVE_CMU1394) || defined(VISP_HAVE_FLYCAPTURE) || defined(VISP_HAVE_REALSENSE2)g.acquire(I);
#elif defined(VISP_HAVE_OPENCV)g >> frame;vpImageConvert::convert(frame, I);
#endifvpDisplay::display(I);double t = vpTime::measureTimeMs();std::vector<vpHomogeneousMatrix> cMo_vec;detector.detect(I, tagSize, cam, cMo_vec);t = vpTime::measureTimeMs() - t;time_vec.push_back(t);std::stringstream ss;ss << "Detection time: " << t << " ms for " << detector.getNbObjects() << " tags";vpDisplay::displayText(I, 40, 20, ss.str(), vpColor::red);for (size_t i = 0; i < cMo_vec.size(); i++) {vpDisplay::displayFrame(I, cMo_vec[i], cam, tagSize / 2, vpColor::none, 3);}vpDisplay::displayText(I, 20, 20, "Click to quit.", vpColor::red);vpDisplay::flush(I);if (vpDisplay::getClick(I, false))break;}std::cout << "Benchmark computation time" << std::endl;std::cout << "Mean / Median / Std: " << vpMath::getMean(time_vec) << " ms"<< " ; " << vpMath::getMedian(time_vec) << " ms"<< " ; " << vpMath::getStdev(time_vec) << " ms" << std::endl;if (! display_off)delete d;} catch (const vpException &e) {std::cerr << "Catch an exception: " << e.getMessage() << std::endl;}return EXIT_SUCCESS;
#else(void)argc;(void)argv;
#ifndef VISP_HAVE_APRILTAGstd::cout << "Enable Apriltag support, configure and build ViSP to run this tutorial" << std::endl;
#elsestd::cout << "Install a 3rd party dedicated to frame grabbing (dc1394, cmu1394, v4l2, OpenCV, FlyCapture, Realsense2), configure and build ViSP again to use this example" << std::endl;
#endif
#endifreturn EXIT_SUCCESS;
}

这个例子和上面的类似

The usage of this example is similar to the previous one:

  1. with option you select the kind of tag that you want to
    detect.–tag_family
  2. if more than one camera is connected to you computer, with option
    you can select which camera to use. The first camera that is found
    has number 0.–input

To detect 36h11 tags on images acquired by a second camera connected to your computer use:

$ ./tutorial-apriltag-detector-live --tag_family 0 --input 1

The source code of this example is very similar to the previous one except that here we use camera framegrabber devices (see Tutorial: Image frame grabbing).

这里用实时显示

Two different grabber may be used:
两种方法

If ViSP was built with Video For Linux (V4L2) support available for example on Fedora or Ubuntu distribution, VISP_HAVE_V4L2 macro is defined. In that case, images coming from an USB camera are acquired using vpV4l2Grabber class.
如果是UNIX系统,用这个类采集图像

If ViSP wasn’t built with V4L2 support but with OpenCV, we use cv::VideoCapture class to grab the images. Notice that when images are acquired with OpenCV there is an additional conversion from cv::Mat to vpImage.
如果是用OpenCV采集图像,那就还要做一个转换

#if defined(VISP_HAVE_V4L2)vpV4l2Grabber g;std::ostringstream device;device << "/dev/video" << opt_device;std::cout << "Use Video 4 Linux grabber on device " << device.str() << std::endl;g.setDevice(device.str());g.setScale(1);g.open(I);
#elif defined(VISP_HAVE_DC1394)(void)opt_device; // To avoid non used warningstd::cout << "Use DC1394 grabber" << std::endl;vp1394TwoGrabber g;g.open(I);
#elif defined(VISP_HAVE_CMU1394)(void)opt_device; // To avoid non used warningstd::cout << "Use CMU1394 grabber" << std::endl;vp1394CMUGrabber g;g.open(I);
#elif defined(VISP_HAVE_FLYCAPTURE)(void)opt_device; // To avoid non used warningstd::cout << "Use FlyCapture grabber" << std::endl;vpFlyCaptureGrabber g;g.open(I);
#elif defined(VISP_HAVE_REALSENSE2)(void)opt_device; // To avoid non used warningstd::cout << "Use Realsense 2 grabber" << std::endl;vpRealSense2 g;rs2::config config;config.disable_stream(RS2_STREAM_DEPTH);config.disable_stream(RS2_STREAM_INFRARED);config.enable_stream(RS2_STREAM_COLOR, 640, 480, RS2_FORMAT_RGBA8, 30);g.open(config);g.acquire(I);std::cout << "Read camera parameters from Realsense device" << std::endl;cam = g.getCameraParameters(RS2_STREAM_COLOR, vpCameraParameters::perspectiveProjWithoutDistortion);
#elif defined(VISP_HAVE_OPENCV)std::cout << "Use OpenCV grabber on device " << opt_device << std::endl;cv::VideoCapture g(opt_device); // Open the default cameraif (!g.isOpened()) {            // Check if we succeededstd::cout << "Failed to open the camera" << std::endl;return -1;}cv::Mat frame;g >> frame; // get a new frame from cameravpImageConvert::convert(frame, I);
#endif

Then in the while loop, at each iteration we acquire a new image
在循环的时候,采集一副新图象

#if defined(VISP_HAVE_V4L2) || defined(VISP_HAVE_DC1394) || defined(VISP_HAVE_CMU1394) || defined(VISP_HAVE_FLYCAPTURE) || defined(VISP_HAVE_REALSENSE2)g.acquire(I);
#elif defined(VISP_HAVE_OPENCV)g >> frame;vpImageConvert::convert(frame, I);
#endif

This new image is then given as input to the AprilTag detector.
图像之后又给这个变量

ViSP中AprilTag的实时识别与定位相关推荐

  1. AprilTag的Tag识别,定位以及跟随

    AprilTag的Tag识别,定位以及跟随 Author lifuguan E-mail 1002732355@qq.com 开源链接 https://github.com/lifuguan/Apri ...

  2. 使用(SIFT特征KMeans聚类关键点训练SVM)实现自然图像中的logo商标识别和定位

    (本博客只记录方法,因为本人觉得这是机器学习特征工程中一种比较不错的做法) 上一篇博客中的方法:使用Py-OpenCV(SIFT关键点)实现自然图像中的logo商标识别和定位 当然也能提前欲知该方法的 ...

  3. 基于yolov5与改进VGGNet的车辆多标签实时识别算法

    摘    要 为了能快速.有效地识别视频中的车辆信息,文中结合YOLOv3算法和CNN算法的优点,设计了一种能实时识别车辆多标签信息的算法.首先,利用具有较高识别速度和准确率的YOLOv3实现对视频流 ...

  4. ViSP中识别AprilTag的C++实例代码与运行结果

    VISP中识别AprilTag的C++可运行代码与运行结果 Introduction ***具体解释见下一篇:***VISP中识别AprilTag的C++实例代码解释 ***具体帮助开发文档下载:** ...

  5. ViSP中识别AprilTag的C++实例代码解释

    VISP中识别AprilTag的C++实例代码解释 接着上一篇: VISP中识别AprilTag的C++实例代码与运行结果 先展示代码,一句一句解释吧 #include <visp3/detec ...

  6. 在H5中使用腾讯地图,实现定位,距离计算,实时搜索,地址逆解析

    在H5中使用腾讯地图,实现定位,距离计算,实时搜索,地址逆解析 1.创建应用 2.下载微信sdk包 3.安装`vue-jsonp` 4.使用 获取当前位置信息,逆解析地址 路线规划,距离计算 实时搜索 ...

  7. You Only Watch Once:实时人体动作定位网络

    点击我爱计算机视觉标星,更快获取CVML新技术 今天跟大家介绍一篇YOLO风格浓郁的论文,来自慕尼黑工业大学的学者受人类视觉的启发,提出一种快速实时的视频动作定位方法You Only Watch On ...

  8. ABCNet 精读:使用自适应贝塞尔曲线网络进行进行实时场景文本定位 OCR 文本定位 文本检测 CVPR

    文章目录 ABCNet 导读:使用自适应贝塞尔曲线网络进行进行实时场景文本定位 ABCNet:Real-time Scene Text Spotting with Adaptive Bezier-Cu ...

  9. 【智慧消防云平台】感知、识别、定位,实时动态采集消防信息

    智慧消防云平台 安科瑞 崔远航 ​智慧消防云平台基于物联网.大数据.云计算等现代信息技术,将分散的火灾自动报警设备.电气火灾监控设备.智慧烟感探测器.智慧消防用水等设备连接形成网络,并对这些设备的状态 ...

最新文章

  1. IOS15仿地铁我的页面
  2. idea无法导入主题jar包_总结IDEA开发的26个常用设置
  3. 突发!美国财政部、商务部双双出手制裁!大疆、旷视等8家被列入投资黑名单!34家被拉入实体清单,GPU龙头景嘉微在列!...
  4. 磁盘管理之逻辑卷管理(Logical Volume Manager)
  5. php case语句吗,【PHP公开课|关于PHP switch case语句,你知道怎么用吗,这个例子你一定要来看看】- 环球网校...
  6. 飞思卡尔单片机DZ60---时钟初始化
  7. 为什么大龄程序员不选择自己创业?
  8. Word双栏右对齐插入MathType公式
  9. 关于URL的转码与解码
  10. mysql什么情况用引号_sql中什么时候需要用双引号,什么时候用单引号;
  11. python个人博客搭建_Django后台 + Wordpress主题快速搭建个人博客
  12. 制作简单的指纹图集,并对其进行处理
  13. Windows11 dev 回退到beta
  14. 个人卖云服务器需要什么证,云服务器个人使用能做什么 云服务器要不要个人认证...
  15. SQL必知必会笔记(上)
  16. Elasticsearch 中的 Guice
  17. Electron-vue开发桌面应用调用TSCLIB.dll(tsc标签打印机连接库)
  18. 常用线缆用量计算公式大汇总
  19. 计算机的硬盘 u盘启动,【电脑bios设置硬盘启动】电脑bios设置光驱启动_电脑bios设置u盘启动...
  20. 平时小腿肌肉在跳动,是不是和肌肉萎缩有关?

热门文章

  1. [ASP.NET MVC3.0]Contact Manager 之迭代开发 一
  2. 关于android分辨率和使用iphone版切图
  3. Alt + sysrq + REISUB doesn't reboot my laptop
  4. 谷歌公布十大恶意网站 均曾攻击上万网站
  5. CCNA笔记-交换机安全
  6. resin3.1+apache2.2.8 整合(笔记)
  7. 基于SSM实现的奶茶店会员管理系统
  8. JSP实现医院住院管理系统
  9. Intellij IDEA设置运行时Java参数
  10. Invalid bound statement (not found): com.xsw.dao.CategoryDao.getCategoryById] with root cause