bool Tracking::NeedNewKeyFrame()

函数功能 判断是否需要生成新的关键帧,确定关键帧的标准
步骤

1. 在上一次进行重定位之后,过了20帧数据,或关键帧数小于20个,不满足不能生成

2. 在上一个关键帧插入之后,过了20帧,或局部建图是空闲状态,不满足不能生成。

3. 当前帧跟踪到大于若干个点,不满足不能生成

4. 当前帧的跟踪点数小于90%的参考关键帧跟踪点数,并且当前帧跟踪点数大于15,不满足不能生成

bool Tracking::NeedNewKeyFrame()
{// 步骤1:如果用户在界面上选择重定位,那么将不插入关键帧// 由于插入关键帧过程中会生成MapPoint,因此用户选择重定位后地图上的点云和关键帧都不会再增加if(mbOnlyTracking)//如果仅跟踪,不选关键帧return false;//If Local Mapping is freezed by a Loop Closure do not insert keyframes// 如果局部地图被闭环检测使用,则不插入关键帧if(mpLocalMapper->isStopped() || mpLocalMapper->stopRequested())return false;const int nKFs = mpMap->KeyFramesInMap();//关键帧数// Do not insert keyframes if not enough frames have passed from last relocalisation// 步骤2:判断是否距离上一次插入关键帧的时间太短// mCurrentFrame.mnId是当前帧的ID// mnLastRelocFrameId是最近一次重定位帧的ID// mMaxFrames等于图像输入的帧率// 如果关键帧比较少,则考虑插入关键帧// 或距离上一次重定位超过1s,则考虑插入关键帧if(mCurrentFrame.mnId<mnLastRelocFrameId+mMaxFrames && nKFs>mMaxFrames)return false;// Tracked MapPoints in the reference keyframe// 步骤3:得到参考关键帧跟踪到的MapPoints数量// 在UpdateLocalKeyFrames函数中会将与当前关键帧共视程度最高的关键帧设定为当前帧的参考关键帧int nMinObs = 3;if(nKFs<=2)nMinObs=2;int nRefMatches = mpReferenceKF->TrackedMapPoints(nMinObs);//获取参考关键帧跟踪到的MapPoints数量// Local Mapping accept keyframes?// 步骤4:查询局部地图管理器是否繁忙bool bLocalMappingIdle = mpLocalMapper->AcceptKeyFrames();// Stereo & RGB-D: Ratio of close "matches to map"/"total matches"//双目和RGBD:比率接近地图匹配数/总匹配数// "total matches = matches to map + visual odometry matches"//总匹配数=地图匹配数+视觉里程计匹配数// Visual odometry matches will become MapPoints if we insert a keyframe.// This ratio measures how many MapPoints we could create if we insert a keyframe.//这个比率测量如果我们插入一个关键帧,我们可以创建多少个MapPoints// 步骤5:对于双目或RGBD摄像头,统计总的可以添加的MapPoints数量和跟踪到地图中的MapPoints数量int nMap = 0;//地图匹配数int nTotal= 0;//总匹配数if(mSensor!=System::MONOCULAR)// 双目或rgbd{for(int i =0; i<mCurrentFrame.N; i++)//遍历当前帧所有匹配点{if(mCurrentFrame.mvDepth[i]>0 && mCurrentFrame.mvDepth[i]<mThDepth)//map点的速度在合理范围内{nTotal++;// 总的可以添加mappoints数if(mCurrentFrame.mvpMapPoints[i])if(mCurrentFrame.mvpMapPoints[i]->Observations()>0)//mappoint能被观测nMap++;// 被关键帧观测到的mappoints数,即观测到地图中的MapPoints数量}}}else{// There are no visual odometry matches in the monocular casenMap=1;nTotal=1;}const float ratioMap = (float)nMap/(float)(std::max(1,nTotal));// 步骤6:决策是否需要插入关键帧// Thresholds// 设定inlier阈值,和之前帧特征点匹配的inlier比例float thRefRatio = 0.75f;if(nKFs<2)thRefRatio = 0.4f;// 关键帧只有一帧,那么插入关键帧的阈值设置很低if(mSensor==System::MONOCULAR)thRefRatio = 0.9f;// MapPoints中和地图关联的比例阈值float thMapRatio = 0.35f;if(mnMatchesInliers>300)thMapRatio = 0.20f;// Condition 1a: More than "MaxFrames" have passed from last keyframe insertion// 很长时间没有插入关键帧const bool c1a = mCurrentFrame.mnId>=mnLastKeyFrameId+mMaxFrames;// Condition 1b: More than "MinFrames" have passed and Local Mapping is idle// localMapper处于空闲状态const bool c1b = (mCurrentFrame.mnId>=mnLastKeyFrameId+mMinFrames && bLocalMappingIdle);// Condition 1c: tracking is weak// 跟踪要跪的节奏,0.25和0.3是一个比较低的阈值const bool c1c =  mSensor!=System::MONOCULAR && (mnMatchesInliers<nRefMatches*0.25 || ratioMap<0.3f) ;// Condition 2: Few tracked points compared to reference keyframe. Lots of visual odometry compared to map matches.// 阈值比c1c要高,与之前参考帧(最近的一个关键帧)重复度不是太高const bool c2 = ((mnMatchesInliers<nRefMatches*thRefRatio || ratioMap<thMapRatio) && mnMatchesInliers>15);if((c1a||c1b||c1c)&&c2){// If the mapping accepts keyframes, insert keyframe.// Otherwise send a signal to interrupt BA//如果mapping接受关键帧,则插入关键帧,否则发送信号到中断BAif(bLocalMappingIdle){return true;}else{mpLocalMapper->InterruptBA();//中断BAif(mSensor!=System::MONOCULAR){// 队列里不能阻塞太多关键帧// tracking插入关键帧不是直接插入,而且先插入到mlNewKeyFrames中,// 然后localmapper再逐个pop出来插入到mspKeyFramesif(mpLocalMapper->KeyframesInQueue()<3)//队列中关键帧小于3return true;elsereturn false;}elsereturn false;}}elsereturn false;
}

void Tracking::CreateNewKeyFrame()

函数功能 生成新的关键帧
步骤

1:将当前帧构造成关键帧

2:将当前关键帧设置为当前帧的参考关键帧

3:对于双目或rgbd摄像头,为当前帧生成新的MapPoints

void Tracking::CreateNewKeyFrame()
{if(!mpLocalMapper->SetNotStop(true))return;// 步骤1:将当前帧构造成关键帧KeyFrame* pKF = new KeyFrame(mCurrentFrame,mpMap,mpKeyFrameDB);// 步骤2:将当前关键帧设置为当前帧的参考关键帧// 在UpdateLocalKeyFrames函数中会将与当前关键帧共视程度最高的关键帧设定为当前帧的参考关键帧mpReferenceKF = pKF;mCurrentFrame.mpReferenceKF = pKF;// 这段代码和UpdateLastFrame中的那一部分代码功能相同// 步骤3:对于双目或rgbd摄像头,为当前帧生成新的MapPointsif(mSensor!=System::MONOCULAR){// 根据Tcw计算mRcw、mtcw和mRwc、mOwmCurrentFrame.UpdatePoseMatrices();// We sort points by the measured depth by the stereo/RGBD sensor.// We create all those MapPoints whose depth < mThDepth.// If there are less than 100 close points we create the 100 closest.// 步骤3.1:得到当前帧深度小于阈值的特征点// 创建新的MapPoint, depth < mThDepthvector<pair<float,int> > vDepthIdx;vDepthIdx.reserve(mCurrentFrame.N);for(int i=0; i<mCurrentFrame.N; i++){float z = mCurrentFrame.mvDepth[i];if(z>0){vDepthIdx.push_back(make_pair(z,i));}}if(!vDepthIdx.empty()){// 步骤3.2:按照深度从小到大排序sort(vDepthIdx.begin(),vDepthIdx.end());// 步骤3.3:将距离比较近的点包装成MapPointsint nPoints = 0;for(size_t j=0; j<vDepthIdx.size();j++){int i = vDepthIdx[j].second;bool bCreateNew = false;MapPoint* pMP = mCurrentFrame.mvpMapPoints[i];if(!pMP)bCreateNew = true;else if(pMP->Observations()<1){bCreateNew = true;mCurrentFrame.mvpMapPoints[i] = static_cast<MapPoint*>(NULL);}if(bCreateNew){cv::Mat x3D = mCurrentFrame.UnprojectStereo(i);MapPoint* pNewMP = new MapPoint(x3D,pKF,mpMap);// 这些添加属性的操作是每次创建MapPoint后都要做的pNewMP->AddObservation(pKF,i);pKF->AddMapPoint(pNewMP,i);pNewMP->ComputeDistinctiveDescriptors();pNewMP->UpdateNormalAndDepth();mpMap->AddMapPoint(pNewMP);mCurrentFrame.mvpMapPoints[i]=pNewMP;nPoints++;}else{nPoints++;}// 这里决定了双目和rgbd摄像头时地图点云的稠密程度// 但是仅仅为了让地图稠密直接改这些不太好,// 因为这些MapPoints会参与之后整个slam过程if(vDepthIdx[j].first>mThDepth && nPoints>100)break;}}}mpLocalMapper->InsertKeyFrame(pKF);mpLocalMapper->SetNotStop(false);mnLastKeyFrameId = mCurrentFrame.mnId;mpLastKeyFrame = pKF;
}

void Tracking::SearchLocalPoints()

函数功能 在局部地图中查找在当前帧视野范围内的点,将视野范围内的点和当前帧的特征点进行投影匹配
步骤

1:遍历当前帧的mvpMapPoints,标记这些MapPoints不参与之后的搜索

2:将所有局部MapPoints投影到当前帧,判断是否在视野范围内,然后进行投影匹配

3:对于双目或rgbd摄像头,为当前帧生成新的MapPoints

void Tracking::SearchLocalPoints()
{// Do not search map points already matched// 步骤1:遍历当前帧的mvpMapPoints,标记这些MapPoints不参与之后的搜索// 因为当前的mvpMapPoints一定在当前帧的视野中for(vector<MapPoint*>::iterator vit=mCurrentFrame.mvpMapPoints.begin(), vend=mCurrentFrame.mvpMapPoints.end(); vit!=vend; vit++){MapPoint* pMP = *vit;if(pMP){if(pMP->isBad()){*vit = static_cast<MapPoint*>(NULL);}else{// 更新能观测到该点的帧数加1pMP->IncreaseVisible();// 标记该点被当前帧观测到pMP->mnLastFrameSeen = mCurrentFrame.mnId;// 标记该点将来不被投影,因为已经匹配过pMP->mbTrackInView = false;}}}int nToMatch=0;// Project points in frame and check its visibility// 步骤2:将所有局部MapPoints投影到当前帧,判断是否在视野范围内,然后进行投影匹配for(vector<MapPoint*>::iterator vit=mvpLocalMapPoints.begin(), vend=mvpLocalMapPoints.end(); vit!=vend; vit++){MapPoint* pMP = *vit;// 已经被当前帧观测到MapPoint不再判断是否能被当前帧观测到if(pMP->mnLastFrameSeen == mCurrentFrame.mnId)continue;if(pMP->isBad())continue;// Project (this fills MapPoint variables for matching)// 步骤2.1:判断LocalMapPoints中的点是否在在视野内if(mCurrentFrame.isInFrustum(pMP,0.5)){// 观测到该点的帧数加1,该MapPoint在某些帧的视野范围内pMP->IncreaseVisible();// 只有在视野范围内的MapPoints才参与之后的投影匹配nToMatch++;}}if(nToMatch>0){ORBmatcher matcher(0.8);int th = 1;if(mSensor==System::RGBD)th=3;// If the camera has been relocalised recently, perform a coarser search// 如果不久前进行过重定位,那么进行一个更加宽泛的搜索,阈值需要增大if(mCurrentFrame.mnId<mnLastRelocFrameId+2)th=5;// 步骤2.2:对视野范围内的MapPoints通过投影进行特征点匹配matcher.SearchByProjection(mCurrentFrame,mvpLocalMapPoints,th);}
}
/*** @brief 更新LocalMap** 局部地图包括: \n* - K1个关键帧、K2个临近关键帧和参考关键帧* - 由这些关键帧观测到的MapPoints*/
void Tracking::UpdateLocalMap()
{// This is for visualization// 这行程序放在UpdateLocalPoints函数后面是不是好一些mpMap->SetReferenceMapPoints(mvpLocalMapPoints);// Update// 更新局部关键帧和局部MapPointsUpdateLocalKeyFrames();UpdateLocalPoints();
}
/*** @brief 更新局部关键点,called by UpdateLocalMap()* * 局部关键帧mvpLocalKeyFrames的MapPoints,更新mvpLocalMapPoints*/
void Tracking::UpdateLocalPoints()
{// 步骤1:清空局部MapPointsmvpLocalMapPoints.clear();// 步骤2:遍历局部关键帧mvpLocalKeyFramesfor(vector<KeyFrame*>::const_iterator itKF=mvpLocalKeyFrames.begin(), itEndKF=mvpLocalKeyFrames.end(); itKF!=itEndKF; itKF++){KeyFrame* pKF = *itKF;const vector<MapPoint*> vpMPs = pKF->GetMapPointMatches();// 步骤2:将局部关键帧的MapPoints添加到mvpLocalMapPointsfor(vector<MapPoint*>::const_iterator itMP=vpMPs.begin(), itEndMP=vpMPs.end(); itMP!=itEndMP; itMP++){MapPoint* pMP = *itMP;if(!pMP)continue;// mnTrackReferenceForFrame防止重复添加局部MapPointif(pMP->mnTrackReferenceForFrame==mCurrentFrame.mnId)continue;if(!pMP->isBad()){mvpLocalMapPoints.push_back(pMP);pMP->mnTrackReferenceForFrame=mCurrentFrame.mnId;}}}
}
/*** @brief 更新局部关键帧,called by UpdateLocalMap()** 遍历当前帧的MapPoints,将观测到这些MapPoints的关键帧和相邻的关键帧取出,更新mvpLocalKeyFrames*/
void Tracking::UpdateLocalKeyFrames()
{// Each map point vote for the keyframes in which it has been observed// 步骤1:遍历当前帧的MapPoints,记录所有能观测到当前帧MapPoints的关键帧map<KeyFrame*,int> keyframeCounter;for(int i=0; i<mCurrentFrame.N; i++){if(mCurrentFrame.mvpMapPoints[i]){MapPoint* pMP = mCurrentFrame.mvpMapPoints[i];if(!pMP->isBad()){// 能观测到当前帧MapPoints的关键帧const map<KeyFrame*,size_t> observations = pMP->GetObservations();for(map<KeyFrame*,size_t>::const_iterator it=observations.begin(), itend=observations.end(); it!=itend; it++)keyframeCounter[it->first]++;}else{mCurrentFrame.mvpMapPoints[i]=NULL;}}}if(keyframeCounter.empty())return;int max=0;KeyFrame* pKFmax= static_cast<KeyFrame*>(NULL);// 步骤2:更新局部关键帧(mvpLocalKeyFrames),添加局部关键帧有三个策略// 先清空局部关键帧mvpLocalKeyFrames.clear();mvpLocalKeyFrames.reserve(3*keyframeCounter.size());// All keyframes that observe a map point are included in the local map. Also check which keyframe shares most points// V-D K1: shares the map points with current frame// 策略1:能观测到当前帧MapPoints的关键帧作为局部关键帧for(map<KeyFrame*,int>::const_iterator it=keyframeCounter.begin(), itEnd=keyframeCounter.end(); it!=itEnd; it++){KeyFrame* pKF = it->first;if(pKF->isBad())continue;if(it->second>max){max=it->second;pKFmax=pKF;}mvpLocalKeyFrames.push_back(it->first);// mnTrackReferenceForFrame防止重复添加局部关键帧pKF->mnTrackReferenceForFrame = mCurrentFrame.mnId;}// Include also some not-already-included keyframes that are neighbors to already-included keyframes// V-D K2: neighbors to K1 in the covisibility graph// 策略2:与策略1得到的局部关键帧共视程度很高的关键帧作为局部关键帧for(vector<KeyFrame*>::const_iterator itKF=mvpLocalKeyFrames.begin(), itEndKF=mvpLocalKeyFrames.end(); itKF!=itEndKF; itKF++){// Limit the number of keyframesif(mvpLocalKeyFrames.size()>80)break;KeyFrame* pKF = *itKF;// 策略2.1:最佳共视的10帧const vector<KeyFrame*> vNeighs = pKF->GetBestCovisibilityKeyFrames(10);for(vector<KeyFrame*>::const_iterator itNeighKF=vNeighs.begin(), itEndNeighKF=vNeighs.end(); itNeighKF!=itEndNeighKF; itNeighKF++){KeyFrame* pNeighKF = *itNeighKF;if(!pNeighKF->isBad()){// mnTrackReferenceForFrame防止重复添加局部关键帧if(pNeighKF->mnTrackReferenceForFrame!=mCurrentFrame.mnId){mvpLocalKeyFrames.push_back(pNeighKF);pNeighKF->mnTrackReferenceForFrame=mCurrentFrame.mnId;break;}}}// 策略2.2:自己的子关键帧const set<KeyFrame*> spChilds = pKF->GetChilds();for(set<KeyFrame*>::const_iterator sit=spChilds.begin(), send=spChilds.end(); sit!=send; sit++){KeyFrame* pChildKF = *sit;if(!pChildKF->isBad()){if(pChildKF->mnTrackReferenceForFrame!=mCurrentFrame.mnId){mvpLocalKeyFrames.push_back(pChildKF);pChildKF->mnTrackReferenceForFrame=mCurrentFrame.mnId;break;}}}// 策略2.3:自己的父关键帧KeyFrame* pParent = pKF->GetParent();if(pParent){// mnTrackReferenceForFrame防止重复添加局部关键帧if(pParent->mnTrackReferenceForFrame!=mCurrentFrame.mnId){mvpLocalKeyFrames.push_back(pParent);pParent->mnTrackReferenceForFrame=mCurrentFrame.mnId;break;}}}// V-D Kref: shares the most map points with current frame// 步骤3:更新当前帧的参考关键帧,与自己共视程度最高的关键帧作为参考关键帧if(pKFmax){mpReferenceKF = pKFmax;mCurrentFrame.mpReferenceKF = mpReferenceKF;}
}

bool Tracking::Relocalization()

函数功能 重定位,从之前的关键帧中找出与当前帧之间拥有充足匹配点的候选帧,利用Ransac迭代,通过PnP求解位姿。
步骤

1. 先计算当前帧的BOW值,并从关键帧数据库中查找候选的匹配关键帧

2. 构建PnP求解器,标记杂点,准备好每个关键帧和当前帧的匹配点集

3. 用PnP算法求解位姿,进行若干次P4P Ransac迭代,并使用非线性最小二乘优化,直到发现一个有充足inliers支持的相机位置

4. 返回成功或失败

bool Tracking::Relocalization()
{// Compute Bag of Words Vector// 步骤1:计算当前帧特征点的Bow映射mCurrentFrame.ComputeBoW();// Relocalization is performed when tracking is lost当跟踪丢失执行重定位// Track Lost: Query KeyFrame Database for keyframe candidates for relocalisation// 步骤2:找到与当前帧相似的候选关键帧vector<KeyFrame*> vpCandidateKFs = mpKeyFrameDB->DetectRelocalizationCandidates(&mCurrentFrame);if(vpCandidateKFs.empty())//如果没找到候选关键帧,返回return false;const int nKFs = vpCandidateKFs.size();//候选关键帧个数// We perform first an ORB matching with each candidat// If enough matches are found we setup a PnP solver//我们首先执行与每个候选匹配的ORB匹配//如果找到足够的匹配,我们设置一个PNP解算器ORBmatcher matcher(0.75,true);vector<PnPsolver*> vpPnPsolvers;vpPnPsolvers.resize(nKFs);vector<vector<MapPoint*> > vvpMapPointMatches;vvpMapPointMatches.resize(nKFs);vector<bool> vbDiscarded;vbDiscarded.resize(nKFs);int nCandidates=0;for(int i=0; i<nKFs; i++){KeyFrame* pKF = vpCandidateKFs[i];if(pKF->isBad())vbDiscarded[i] = true;//去除不好的候选关键帧else{// 步骤3:通过BoW进行匹配int nmatches = matcher.SearchByBoW(pKF,mCurrentFrame,vvpMapPointMatches[i]);if(nmatches<15)//如果匹配点小于15剔除{vbDiscarded[i] = true;continue;}else//用pnp求解{// 初始化PnPsolverPnPsolver* pSolver = new PnPsolver(mCurrentFrame,vvpMapPointMatches[i]);pSolver->SetRansacParameters(0.99,10,300,4,0.5,5.991);vpPnPsolvers[i] = pSolver;nCandidates++;}}}// Alternatively perform some iterations of P4P RANSAC可选地执行P4P RANSAC的一些迭代// Until we found a camera pose supported by enough inliers直到早到符合很多内点的相机位置bool bMatch = false;ORBmatcher matcher2(0.9,true);while(nCandidates>0 && !bMatch){for(int i=0; i<nKFs; i++){if(vbDiscarded[i])continue;// Perform 5 Ransac Iterationsvector<bool> vbInliers;int nInliers;bool bNoMore;// 步骤4:通过EPnP算法估计姿态PnPsolver* pSolver = vpPnPsolvers[i];cv::Mat Tcw = pSolver->iterate(5,bNoMore,vbInliers,nInliers);// If Ransac reachs max. iterations discard keyframeif(bNoMore){vbDiscarded[i]=true;nCandidates--;}// If a Camera Pose is computed, optimizeif(!Tcw.empty()){Tcw.copyTo(mCurrentFrame.mTcw);set<MapPoint*> sFound;const int np = vbInliers.size();//内点个数for(int j=0; j<np; j++){if(vbInliers[j]){mCurrentFrame.mvpMapPoints[j]=vvpMapPointMatches[i][j];sFound.insert(vvpMapPointMatches[i][j]);}elsemCurrentFrame.mvpMapPoints[j]=NULL;}// 步骤5:通过PoseOptimization对姿态进行优化求解int nGood = Optimizer::PoseOptimization(&mCurrentFrame);if(nGood<10)continue;for(int io =0; io<mCurrentFrame.N; io++)if(mCurrentFrame.mvbOutlier[io])mCurrentFrame.mvpMapPoints[io]=static_cast<MapPoint*>(NULL);// If few inliers, search by projection in a coarse window and optimize again// 步骤6:如果内点较少,则通过投影的方式对之前未匹配的点进行匹配,再进行优化求解if(nGood<50){int nadditional =matcher2.SearchByProjection(mCurrentFrame,vpCandidateKFs[i],sFound,10,100);if(nadditional+nGood>=50){nGood = Optimizer::PoseOptimization(&mCurrentFrame);//优化// If many inliers but still not enough, search by projection again in a narrower window
//如果许多内点仍然不够,则在较窄的窗口中再次用投影搜索// the camera has been already optimized with many pointsif(nGood>30 && nGood<50){sFound.clear();for(int ip =0; ip<mCurrentFrame.N; ip++)if(mCurrentFrame.mvpMapPoints[ip])sFound.insert(mCurrentFrame.mvpMapPoints[ip]);nadditional =matcher2.SearchByProjection(mCurrentFrame,vpCandidateKFs[i],sFound,3,64);// Final optimizationif(nGood+nadditional>=50){nGood = Optimizer::PoseOptimization(&mCurrentFrame);for(int io =0; io<mCurrentFrame.N; io++)if(mCurrentFrame.mvbOutlier[io])mCurrentFrame.mvpMapPoints[io]=NULL;}}}}// If the pose is supported by enough inliers stop ransacs and continueif(nGood>=50){bMatch = true;break;}}}}if(!bMatch){return false;}else{mnLastRelocFrameId = mCurrentFrame.mnId;return true;}
}

ORB-SLAM2从理论到代码实现(八):Tracking.cc程序详解(下)相关推荐

  1. ORB-SLAM2从理论到代码实现(六):Tracking.cc程序详解(上)

    1.Tracking 框架 Tracking线程流程框图: 各流程对应的主要函数(来自吴博@泡泡机器人): Tracking整体流程图 上面这张图把Tracking.cc讲的特别明白. trackin ...

  2. ORB-SLAM2从理论到代码实现(七):Tracking.cc程序详解(中)

    void Tracking::MonocularInitialization()(StereoInitialization()由于类似,不再赘述) 步骤 1. 当第一次进入该方法的时候,没有先前的帧数 ...

  3. ORB-SLAM2从理论到代码实现(五):ORBmatcher.cc程序详解

    1. SearchByProjection(Frame &F, const vector<MapPoint*> &vpMapPoints, const float th) ...

  4. python小游戏代码大全-20行python代码的入门级小游戏的详解

    背景: 作为一个python小白,今天从菜鸟教程上看了一些python的教程,看到了python的一些语法,对比起来(有其他语言功底),感觉还是非常有趣,就随手添了一点内容,改了一个小例程,当着练练手 ...

  5. python中返回上一步操作的代码_Pycharm代码跳转后退回操作详解

    用Pycharm写Python代码有一段时间了,最近发现了一个Pycharm的一个小技巧想分享给大家,下面这篇文章主要给大家介绍了关于Pycharm代码跳转该如何回退的相关资料,文中介绍的非常详细,对 ...

  6. zemax操作数_ZEMAX与像差理论:二级光谱的ZEMAX描述与详解

    ZEMAX与像差理论:二级光谱的ZEMAX描述与详解 有人咨询一个问题,我们老师让我看看这个longtitudinal aberration图,让双胶合望远物镜的longtitudinal aberr ...

  7. for根据ID去重_汽车ECU参数标定之配置Overlay RAM实现Qorivva MPC57xx系列MCU参数在线标定和代码重映射原理和方法详解...

    内容提要 引言 1. MPC5744P的Overlay RAM工作原理介绍 2 MPC5744P的Flash Overlay配置详解 2.1 平台Flash标定区域描述字寄存器配置字0--PFLASH ...

  8. 使用python代码给手机发短信详解(twilio的使用)

    ☞☞☞点击查看更多优秀Python博客☜☜☜ 使用python代码给手机发短信详解 twilio网页注册 实现代码 效果演示 ==**文章导航:==** 使用python代码给手机发送短信,在这里给大 ...

  9. ST MCU_GPIO的八种工作模式详解

    GPIO的八种工作模式详解 浮空输入_IN_FLOATING 带上拉输入_IPU 带下拉输入_IPD 模拟输入_AIN 开漏输出_OUT_OD 推挽输出_OUT_PP 开漏复用输出_AF_OD 推挽复 ...

最新文章

  1. GitHub免费支持CI/CD了,开发测试部署高度自动化,支持各种语言,网友:第三方凉凉...
  2. 从中间件到分布式数据库生态,ShardingSphere 5.x革新变旧
  3. Cassandra1.2文档学习(7)—— 规划集群部署
  4. git新建账号_github 账号创建
  5. debian samba出错:set_variable_helper(yes ): value is not boolean!
  6. ABAP 练习用航班数据
  7. 用AD画PCB流程介绍
  8. 【ESP32】12.I2C LCD1602液晶显示实验(LiquidCrystal_I2C库)
  9. 项目经理:什么是矩阵型组织结构?
  10. QoS mechanisms——LLQ(拥塞管理,低延时队列)
  11. 精益生产的本质是什么?如何快速学…
  12. 微软服务器系统2018,升级微软2018 Windows10 四月正式版17134(1803)系统的多种方法...
  13. RabbitMQ入门篇、介绍RabbitMQ常用的五种模式
  14. 中国移动大数据推进“精准扶贫”
  15. 手机实现远程桌面控制
  16. Java设计模式之模板方法模式(UML类图分析+代码详解)
  17. 定时开关机(二):AlarmManager的使用及对定时不准问题的修改
  18. 第3章-21 判断回文字符串 (15分) PTA-python 题解 浙大版《Python 程序设计》题目集
  19. JVM知识点(全,一篇搞定)
  20. 由频谱重构时域信号:直观理解Griffin Lim算法

热门文章

  1. Python开发(基础):字符串
  2. zabbix监控系统的安装与配置
  3. 运行hadoop fs -ls 命令显示本地目录问题
  4. android binder机制之——(创建binder服务)
  5. 从零开始写个编译器吧 - 单词化简述(Tokenization)
  6. 区块链技术未来可能用于哪些方面?
  7. 全栈Python Flask教程-建立社交网络
  8. 联想e580没有声音_现在你可以购买通过 Linux 认证的联想 ThinkPad 和 ThinkStation
  9. 接口测试要如何做数据准备
  10. PRD 的编写和修改注意事项