打开摄像头,位置:tracker_run.cpp

bool TrackerRun::init()
{
    ImgAcqParas imgAcqParas;
    imgAcqParas.device =1; //_paras.device;
    imgAcqParas.expansionStr = _paras.expansion;

...}

dsst_tracker.hpp中引用了gradientMex

//#include "gradientMex.hpp"

gradientMex.hpp中定义了两种fhog方法,一种是转置的,一种是直接计算的,

namespace piotr {
    void fhog(float * const M, float * const O,

...}

fhogToCvCol方法声明:在scale_estimator.hpp:

if (paras.useFhogTranspose){
                fhogToCvCol = &piotr::fhogToCvColT;
             printf("useFhogTranspose \n");
            }
            else{
                fhogToCvCol = &piotr::fhogToCol;
                 printf("useFhogTranspose no \n");
            }

fhog方法声明:在dsst_tracker.hpp:

if (paras.useFhogTranspose)
                cvFhog = &piotr::cvFhogT < T, DFC > ;
            else
                cvFhog = &piotr::cvFhog < T, DFC > ;

跟踪流程:

  bool updateAtScalePos(const cv::Mat& image, const Point& oldPos, const T oldScale,
            Rect& boundingBox)
        {            ++_frameIdx;
            if (!_isInitialized)
                return false;
            T newScale = oldScale;
            Point newPos = oldPos;
            cv::Point2i maxResponseIdx;
            cv::Mat response;
            // in case of error return the last box
            boundingBox = _lastBoundingBox;
            if (detectModel(image, response, maxResponseIdx, newPos, newScale) == false)
                return false;
            // return box
            Rect tempBoundingBox;
            tempBoundingBox.width = _baseTargetSz.width * newScale;
            tempBoundingBox.height = _baseTargetSz.height * newScale;
            tempBoundingBox.x = newPos.x - tempBoundingBox.width / 2;
            tempBoundingBox.y = newPos.y - tempBoundingBox.height / 2;
            if (_ENABLE_TRACKING_LOSS_DETECTION)
            {                if (evalReponse(image, response, maxResponseIdx,
                    tempBoundingBox) == false)
                    return false;
            }
            if (updateModel(image, newPos, newScale) == false)
                return false;
            boundingBox &= Rect(0, 0, static_cast<T>(image.cols), static_cast<T>(image.rows));
            boundingBox = tempBoundingBox;
            _lastBoundingBox = tempBoundingBox;
            return true;
        }
 bool detectModel(const cv::Mat& image, cv::Mat& response,
            cv::Point2i& maxResponseIdx, Point& newPos,
            T& newScale) const
        {            // find translation
            std::shared_ptr<DFC> xt(0);
            if (getTranslationFeatures(image, xt, newPos, newScale) == false)
                return false;
            std::shared_ptr<DFC> xtf;
            if (_USE_CCS)
                xtf = DFC::dftFeatures(xt);
            else
                xtf = DFC::dftFeatures(xt, cv::DFT_COMPLEX_OUTPUT);
            //dft时候添加参数DFT_COMPLEX_OUTPUT,就可以自动得到复数矩阵了
            std::shared_ptr<DFC> sampleSpec = DFC::mulSpectrumsFeatures(_hfNumerator, xtf, false);
            cv::Mat sumXtf = DFC::sumFeatures(sampleSpec);
            cv::Mat hfDenLambda = addRealToSpectrum<T>(_LAMBDA, _hfDenominator);
            cv::Mat responseTf;
            if (_USE_CCS)
                divSpectrums(sumXtf, hfDenLambda, responseTf, 0, false);
            else
                divideSpectrumsNoCcs<T>(sumXtf, hfDenLambda, responseTf);
            cv::Mat translationResponse;
            idft(responseTf, translationResponse, cv::DFT_REAL_OUTPUT | cv::DFT_SCALE);
            cv::Point delta;
            double maxResponse;
            cv::Point_<T> subDelta;
            minMaxLoc(translationResponse, 0, &maxResponse, 0, &delta);
            subDelta = delta;
            if (_CELL_SIZE != 1)
                subDelta = subPixelDelta<T>(translationResponse, delta);
            T posDeltaX = (subDelta.x + 1 - floor(translationResponse.cols / consts::c2_0)) * newScale;
            T posDeltaY = (subDelta.y + 1 - floor(translationResponse.rows / consts::c2_0)) * newScale;
            newPos.x += round(posDeltaX * _CELL_SIZE);
            newPos.y += round(posDeltaY * _CELL_SIZE);
            if (_debug != 0)
                _debug->showResponse(translationResponse, maxResponse);
            if (_scaleEstimator)
            {                //find scale
                T tempScale = newScale * _templateScaleFactor;
                if (_scaleEstimator->detectScale(image, newPos,
                    tempScale) == false)
                    return false;
                newScale = tempScale / _templateScaleFactor;
            }
            response = translationResponse;
            maxResponseIdx = delta;
            return true;
        }
检测特征,fhog特征:
  bool getTranslationFeatures(const cv::Mat& image, std::shared_ptr<DFC>& features,
            const Point& pos, T scale) const
        {            cv::Mat patch;
            Size patchSize = _templateSz * scale;
            if (getSubWindow(image, patch, patchSize, pos) == false)
                return false;
            if (_ORIGINAL_VERSION)
                depResize(patch, patch, _templateSz);
            else
                resize(patch, patch, _templateSz, 0, 0, _RESIZE_TYPE);
            if (_debug != 0)
                _debug->showPatch(patch);
            cv::Mat floatPatch;
            patch.convertTo(floatPatch, CV_32FC(3));
            features.reset(new DFC());
            cvFhog(floatPatch, features, _CELL_SIZE, DFC::numberOfChannels() - 1);
            // append gray-scale image
            if (patch.channels() == 1)
            {                if (_CELL_SIZE != 1)
                    resize(patch, patch, features->channels[0].size(), 0, 0, _RESIZE_TYPE);
                features->channels[DFC::numberOfChannels() - 1] = patch / 255.0 - 0.5;
            }
            else
            {                if (_CELL_SIZE != 1)
                    resize(patch, patch, features->channels[0].size(), 0, 0, _RESIZE_TYPE);
                cv::Mat grayFrame;
                cvtColor(patch, grayFrame, cv::COLOR_BGR2GRAY);
                grayFrame.convertTo(grayFrame, CV_TYPE);
                grayFrame = grayFrame / 255.0 - 0.5;
                features->channels[DFC::numberOfChannels() - 1] = grayFrame;
            }
            DFC::mulFeatures(features, _cosWindow);
            return true;
        }
计算cvFhog:
 void cvFhog(const cv::Mat& img, std::shared_ptr<OUT>& cvFeatures, int binSize, int fhogChannelsToCopy = 31)
    {        const int orientations = 9;
        // ensure array is continuous
        const cv::Mat& image = (img.isContinuous() ? img : img.clone());
        int channels = image.channels();
        int computeChannels = 32;
        int width = image.cols;
        int height = image.rows;
        int widthBin = width / binSize;
        int heightBin = height / binSize;
        float* const I = (float*)wrCalloc(static_cast<size_t>(width * height * channels), sizeof(float));
        float* const H = (float*)wrCalloc(static_cast<size_t>(widthBin * heightBin * computeChannels), sizeof(float));
        float* const M = (float*)wrCalloc(static_cast<size_t>(width * height), sizeof(float));
        float* const O = (float*)wrCalloc(static_cast<size_t>(width * height), sizeof(float));
        // row major (interleaved) to col major (non interleaved;clustered)
        float* imageData = reinterpret_cast<float*>(image.data);
        float* const redChannel = I;
        float* const greenChannel = I + width * height;
        float* const blueChannel = I + 2 * width * height;
        int colMajorPos = 0, rowMajorPos = 0;
        for (int row = 0; row < height; ++row)
        {            for (int col = 0; col < width; ++col)
            {                colMajorPos = col * height + row;
                rowMajorPos = row * channels * width + col * channels;
                blueChannel[colMajorPos] = imageData[rowMajorPos];
                greenChannel[colMajorPos] = imageData[rowMajorPos + 1];
                redChannel[colMajorPos] = imageData[rowMajorPos + 2];
            }
        }
        // calc fhog in col major
        gradMag(I, M, O, height, width, channels, true);
        if (fhogChannelsToCopy == 27)
            fhog(M, O, H, height, width, binSize, orientations, -1, 0.2f, false);
        else
            fhog(M, O, H, height, width, binSize, orientations, -1, 0.2f);
        // only copy the amount of the channels the user wants
        // or the amount that fits into the output array
        int channelsToCopy = std::min(fhogChannelsToCopy, OUT::numberOfChannels());
        for (int c = 0; c < channelsToCopy; ++c)
        {            cv::Mat_<PRIMITIVE_TYPE> m(heightBin, widthBin);
            cvFeatures->channels[c] = m;
        }
        PRIMITIVE_TYPE* cdata = 0;
        //col major to row major with separate channels
        for (int c = 0; c < channelsToCopy; ++c)
        {            float* Hc = H + widthBin * heightBin * c;
            cdata = reinterpret_cast<PRIMITIVE_TYPE*>(cvFeatures->channels[c].data);
            for (int row = 0; row < heightBin; ++row)
                for (int col = 0; col < widthBin; ++col)
                    cdata[row * widthBin + col] = Hc[row + heightBin * col];
        }
        wrFree(M);
        wrFree(O);
        wrFree(I);
        wrFree(H);
    }
检测目标并更新:
  else
        {            tStart = getTickCount();
            _targetOnFrame = _tracker->update(_image, _boundingBox);
            tDuration = getTickCount() - tStart;
        }

fhog调用流程:获取转移(平移/位置)特征

cvfhog getTranslationFeaturesfhog cvFhog

多尺度估计:

fhogToCvCol getScaleFeaturesfhog fhogToColfhogToCvCol getScaleFeatures forfhog fhogToColfhogToCvCol getScaleFeatures forfhog fhogToColfhogToCvCol getScaleFeatures forfhog fhogToColfhogToCvCol getScaleFeatures for

dsst跟踪算法源码分析相关推荐

  1. [unity3d]recast navigation navmesh 导航网格 寻路算法 源码分析

    recast navigation navmesh导航网格算法源码分析 Author:  林绍川 recast navigation navmesh是unity3d ue4内置的寻路算法 本文为了方便 ...

  2. rvo动态避障算法源码分析

    rvo动态避障算法 源码: snape (Jamie Snape) · GitHub 文档: RVO2 Library - Reciprocal Collision Avoidance for Rea ...

  3. 【机器学习经典算法源码分析系列】-- 逻辑回归

    1.逻辑回归(Logistic Regression)又常被成为"逻辑斯蒂回归",实质上是一个二元分类问题. 逻辑回归代价函数: 代价函数导数: Matlab实现: 采用matla ...

  4. Ribbon源码3-负载均衡算法源码分析

    0. 环境 nacos版本:1.4.1 Spring Cloud : Hoxton.SR9 Spring Boot :2.4.4 Spring Cloud alibaba: 2.2.5.RELEASE ...

  5. OpenCV人脸识别Eigen算法源码分析

    1 理论基础 学习Eigen人脸识别算法需要了解一下它用到的几个理论基础,现总结如下: 1.1 协方差矩阵 首先需要了解一下公式: 共公式可以看出:均值描述的是样本集合的平均值,而标准差描述的则是样本 ...

  6. MedianFlow中值流跟踪算法源码

    一.MedianFlow算法简介 该算法属于TLD跟踪算法中的Tracking部分.TLD算法可参考:Tracking-Learning-Detection原理分析.它基于LK光流跟踪算法,并使用FB ...

  7. OpenCV人脸识别LBPH算法源码分析

    1 背景及理论基础 人脸识别是指将一个需要识别的人脸和人脸库中的某个人脸对应起来(类似于指纹识别),目的是完成识别功能,该术语需要和人脸检测进行区分,人脸检测是在一张图片中把人脸定位出来,完成的是搜寻 ...

  8. 以太坊Ethash算法源码分析

    Ethash是以太坊目前使用的共识算法,其前身是Dagger-Hashimoto算法,但是进行了很大的改动. 1. Dagger-Hashimoto Dagger-Hashimoto算法想要达到以下几 ...

  9. AdaBoost算法源码分析

    基本的理论知识可以参考李航的统计学习和西瓜书,在这里简单介绍: bagging:基于数据随机抽样的分类器,更先进的bagging方法有随机森林等. boosting:是一种与bagging类似的技术, ...

最新文章

  1. 移动端图片上传旋转、压缩的解决方案
  2. Java序列化闲聊:序列化和Json
  3. 028_CSS外边距
  4. 解决Windows 2003中不允许的父路径Active Server Pages错误'ASP 0131'的方法
  5. 按键精灵易语言c,求助(把按键精灵的源码转为易语言的)
  6. tomcat 开启远程debug
  7. 转: 微博的多机房部署的实践(from infoq)
  8. [HTB]“Heist”靶机渗透详细思路
  9. OpenCV手部关键点检测(手势识别)代码示例
  10. LINUX的DNS怎么设置?linux下如何修改DNS地址
  11. SQL Server 数据库维护计划
  12. 差分放大器低通滤波器设计
  13. 程序员的8条解bug(甩锅)大招!
  14. oracle 拉里 网线通道,拉里.埃里森:Oracle云计算服务进入超速增长阶段
  15. 每日股市大盘自动复盘(基于聚宽量化投资平台)
  16. XML编程经验――LIBXML2库使用指南
  17. 区块链技术有什么影响?
  18. 首都师范大学计算机考研调剂,首都师范大学2018年考研调剂信息公布
  19. ssh免密登录和阿里云epel安装
  20. 【有毒的设计模式】工厂方法模式

热门文章

  1. objdump反汇编用法示例
  2. maven生成jar包,包含第三方jar包
  3. Linux监听请求到达时间,4: zabbix5.0自动发现网站域名并监控访问状态和请求时间...
  4. 盲人计算机培训计划,中国盲人协会-河南省郑州市盲人按摩培训班教学大纲及计划...
  5. mule_Mule ESB,ActiveMQ和DLQ
  6. android studio zbar,Android Studio 0.2.6和ZBar项目设置
  7. php标题 栏目不显示title,DEDECMS专题列表页不显示title的解决方法
  8. IDEA插件推荐:中文字符自动转化!
  9. 聊一聊:开源社区应该用中文吗?
  10. 醉酒删库:几杯红酒下肚,7小时数据消失...