根据这个代码就可以计算到出各类别的AP/MAP值

# --------------------------------------------------------
# YOLOv4
# 2020.11.05
# --------------------------------------------------------
from __future__ import print_function
import argparse
import xml.etree.ElementTree as ET
import os,sys
import pickle
import numpy as np
import pdbdef parse_args():"""Parse input arguments"""parser = argparse.ArgumentParser(description='Re-evaluate results')parser.add_argument('output_dir', nargs=1, help='results directory',type=str)parser.add_argument('--voc_dir', dest='voc_dir', default='/home/sxl/Data/voc/VOCtrainval/', type=str)parser.add_argument('--year', dest='year', default='2017', type=str)parser.add_argument('--image_set', dest='image_set', default='voc_test', type=str)parser.add_argument('--classes', dest='class_file', default='/home/sxl/Module/DarknetAB1022/data/voc.names', type=str)parser.add_argument('--ovthresh', dest='ovthresh', default=0.5, type=float)if len(sys.argv) == 1:parser.print_help()sys.exit(1)args = parser.parse_args()return args
def get_voc_results_file_template(image_set, out_dir = 'results'):#filename = 'comp4_det_' + image_set + '_{:s}.txt'#filename = image_set + '_{:s}.txt'filename = '{:s}.txt'path = os.path.join(out_dir, filename)return pathdef parse_rec(filename):""" Parse a PASCAL VOC xml file """tree = ET.parse(filename)objects = []for obj in tree.findall('object'):obj_struct = {}obj_struct['name'] = obj.find('name').textobj_struct['pose'] = obj.find('pose').textobj_struct['truncated'] = int(obj.find('truncated').text)obj_struct['difficult'] = int(obj.find('difficult').text)bbox = obj.find('bndbox')obj_struct['bbox'] = [int(bbox.find('xmin').text),int(bbox.find('ymin').text),int(bbox.find('xmax').text),int(bbox.find('ymax').text)]objects.append(obj_struct)return objectsdef voc_ap(rec, prec, use_07_metric=False):""" ap = voc_ap(rec, prec, [use_07_metric])Compute VOC AP given precision and recall.If use_07_metric is true, uses theVOC 07 11 point method (default:False)."""# VOC在2010之后换了评价方法,所以在这里决定是否用07年的方法if use_07_metric:# 11 point metricap = 0.for t in np.arange(0., 1.1, 0.1):  #07年采用的是11插值法,平分recall计算得来if np.sum(rec >= t) == 0:p = 0else:p = np.max(prec[rec >= t]) # 取一个recall阈值之后最大的precisionap = ap + p / 11. # 将11 个precision加和平均else: # 使用2010年后的方法,取所有不同的recall对应的点处的精度值做平均# correct AP calculation# first append sentinel values at the endmrec = np.concatenate(([0.], rec, [1.])) # recall和precision前后分别加了一个值,因为recall最后是1,所以mpre = np.concatenate(([0.], prec, [0.])) # 右边加了1,precision加的是0# compute the precision envelopefor i in range(mpre.size - 1, 0, -1):mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i]) # 从后往前,排除之前局部增加的precison情况# to calculate area under PR curve, look for points# where X axis (recall) changes valuei = np.where(mrec[1:] != mrec[:-1])[0] # 这里巧妙的错位,返回刚好TP的位置,# and sum (\Delta recall) * prec 用recall 的间隔对精度作加权平均ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])return apdef voc_eval(detpath,annopath,imagesetfile,classname,cachedir,ovthresh,use_07_metric=False):"""rec, prec, ap = voc_eval(detpath,annopath,imagesetfile,classname,[ovthresh],[use_07_metric])Top level function that does the PASCAL VOC evaluation.detpath: Path to detectionsdetpath.format(classname) should produce the detection results file.annopath: Path to annotationsannopath.format(imagename) should be the xml annotations file.imagesetfile: Text file containing the list of images, one image per line.classname: Category name (duh)cachedir: Directory for caching the annotations[ovthresh]: Overlap threshold (default = 0.5)[use_07_metric]: Whether to use VOC07's 11 point AP computation(default False)"""# assumes detections are in detpath.format(classname)# assumes annotations are in annopath.format(imagename)# assumes imagesetfile is a text file with each line an image name# cachedir caches the annotations in a pickle file# first load gtif not os.path.isdir(cachedir):os.mkdir(cachedir)cachefile = os.path.join(cachedir, 'annots.pkl')# read list of imageswith open(imagesetfile, 'r') as f:lines = f.readlines()imagenames = [x.strip() for x in lines]#pdb.set_trace()if not os.path.isfile(cachefile):# load annotsrecs = {}for i, imagename in enumerate(imagenames):recs[imagename] = parse_rec(annopath.format(imagename))if i % 100 == 0:print('Reading annotation for {:d}/{:d}'.format(i + 1, len(imagenames)))# saveprint('Saving cached annotations to {:s}'.format(cachefile))with open(cachefile, 'wb') as f:pickle.dump(recs, f)else:# loadwith open(cachefile, 'rb') as f:recs = pickle.load(f)# extract gt objects for this classclass_recs = {}npos = 0for imagename in imagenames:R = [obj for obj in recs[imagename] if obj['name'] == classname]bbox = np.array([x['bbox'] for x in R])difficult = np.array([x['difficult'] for x in R]).astype(np.bool)det = [False] * len(R)npos = npos + sum(~difficult) # npos=TP+FNclass_recs[imagename] = {'bbox': bbox,'difficult': difficult,'det': det}# read detsdetfile = detpath.format(classname)#pdb.set_trace()with open(detfile, 'r') as f:lines = f.readlines()splitlines = [x.strip().split(' ') for x in lines]image_ids = [x[0] for x in splitlines]confidence = np.array([float(x[1]) for x in splitlines])BB = np.array([[float(z) for z in x[2:]] for x in splitlines])# sort by confidencesorted_ind = np.argsort(-confidence)sorted_scores = np.sort(-confidence)BB = BB[sorted_ind, :]image_ids = [image_ids[x] for x in sorted_ind]# go down dets and mark TPs and FPsnd = len(image_ids)tp = np.zeros(nd)fp = np.zeros(nd)for d in range(nd):R = class_recs[image_ids[d]]bb = BB[d, :].astype(float)ovmax = -np.infBBGT = R['bbox'].astype(float)if BBGT.size > 0:# compute overlaps# intersectionixmin = np.maximum(BBGT[:, 0], bb[0])iymin = np.maximum(BBGT[:, 1], bb[1])ixmax = np.minimum(BBGT[:, 2], bb[2])iymax = np.minimum(BBGT[:, 3], bb[3])iw = np.maximum(ixmax - ixmin + 1., 0.)ih = np.maximum(iymax - iymin + 1., 0.)inters = iw * ih# unionuni = ((bb[2] - bb[0] + 1.) * (bb[3] - bb[1] + 1.) +(BBGT[:, 2] - BBGT[:, 0] + 1.) *(BBGT[:, 3] - BBGT[:, 1] + 1.) - inters)overlaps = inters / uniovmax = np.max(overlaps)jmax = np.argmax(overlaps)if ovmax > ovthresh:if not R['difficult'][jmax]:if not R['det'][jmax]:tp[d] = 1.R['det'][jmax] = 1else:fp[d] = 1.else:fp[d] = 1.# compute precision recallfp = np.cumsum(fp)tp = np.cumsum(tp)rec = tp / float(npos)# avoid divide by zero in case the first detection matches a difficult# ground truthprec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)ap = voc_ap(rec, prec, use_07_metric)return rec, prec, apdef do_python_eval(devkit_path, year, image_set, classes, ovthresh, output_dir = 'results'):"""Parse input arguments"""# parser = argparse.ArgumentParser(description='Re-evaluate results')# parser.add_argument('output_dir', nargs=1, help='results directory',#                     type=str)# parser.add_argument('--voc_dir', dest='voc_dir', default='data/VOCdevkit', type=str)# parser.add_argument('--year', dest='year', default='2017', type=str)# parser.add_argument('--image_set', dest='image_set', default='test', type=str)# parser.add_argument('--classes', dest='class_file', default='data/voc.names', type=str)annopath = os.path.join(devkit_path,'VOC' + year,'Annotations','{:s}.xml')imagesetfile = os.path.join(devkit_path,'VOC' + year,'ImageSets','Main',image_set+'.txt')cachedir = os.path.join(devkit_path, 'annotations_cache')aps = []# The PASCAL VOC metric changed in 2010# use_07_metric = True if int(_year) < 2010 else Falseuse_07_metric = Falseprint('VOC07 metric? ' + ('Yes' if use_07_metric else 'No'))if not os.path.isdir(output_dir):os.mkdir(output_dir)# i -index cls- categoryfor i, cls in enumerate(classes):if cls == '__background__':continuefilename = get_voc_results_file_template(image_set).format(cls)rec, prec, ap = voc_eval(filename, annopath, imagesetfile, cls, cachedir, ovthresh,use_07_metric=use_07_metric)aps += [ap]print('AP for {} = {:.4f}'.format(cls, ap))with open(os.path.join(output_dir, cls + '_pr.pkl'), 'wb') as f:pickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)print('Mean AP = {:.4f}'.format(np.mean(aps)))print('~~~~~~~~')print('Results:')for ap in aps:print('{:.3f}'.format(ap))print('{:.3f}'.format(np.mean(aps)))print('~~~~~~~~')if __name__ == '__main__':args = parse_args()output_dir = os.path.abspath(args.output_dir[0])with open(args.class_file, 'r') as f:lines = f.readlines()classes = [t.strip('\n') for t in lines]print ('Evaluating detections')do_python_eval(args.voc_dir, args.year, args.image_set, classes, args.ovthresh, output_dir)

YOLO-目标检测中计算AP、MAP方法相关推荐

  1. 目标检测中的性能提升方法综述

    文章目录 一.多尺度检测 1.什么是多尺度检测? 2.降低下采样率与空洞卷积 3.多尺度训练 4.优化Anchor尺寸训练 5.深层与浅层特征融合 6.SNIP,尺度归一化 7.TridentNet, ...

  2. 目标检测中的知识蒸馏方法

    目标检测中的知识蒸馏方法 知识蒸馏 (Knowledge Distillation KD) 是模型压缩(轻量化)的一种有效的解决方案,这种方法可以使轻量级的学生模型获得繁琐的教师模型中的知识.知识蒸馏 ...

  3. 目标检测中NMS和mAP指标中的的IoU阈值和置信度阈值

    有时候路走的太远,会忘了为什么要出发. 学习亦如是 在目标检测中,经常看到置信度阈值和IoU阈值这两个关键参数,且NMS计算和mAP计算中都会有这两个,那它们的区别是什么?本文就这个问题做一次总结. ...

  4. 深度学习目标检测中计算目标的AP(average precision)平均精度、有什么用?

    AP衡量的是学出来的模型在每个类别上的好坏,mAP衡量的是学出的模型在所有类别上的好坏,得到AP后mAP的计算就变得很简单了,就是取所有AP的平均值. 参考文章:深度学习-目标检测评估指标P-R曲线. ...

  5. 常见的目标检测中的背景建模方法总结

    最近一直在做前景检测方面的研究,刚开始主要是做一些工程性的应用,为了解决工程方面的问题,下了不少功夫,也看了不少最近国内外的文章.一直想做个总结,拖着拖着,终究却写成这篇极不成功的总结.(文章下载:h ...

  6. 常见的目标检测中的背景建模方法

    Author: JW. ZHOU 2014/6/13 最近一直在做前景检测方面的研究,刚开始主要是做一些工程性的应用,为了解决工程方面的问题,下了不少功夫,也看了不少最近国内外的文章.一直想做个总结, ...

  7. Background Subtraction and Modeling 常见的目标检测中的背景建模方法漫谈

    转载者的话: 最近一直在看background subtraction and modeling.正如如下链接的第一个回帖所说,背景建模虽然最近研究不是很热,但对视频的处理至关重要,甚至直接影响一个系 ...

  8. 通俗地讲解目标检测中AP指标

    声明:以下内容全是我的个人见解,如有问题,欢迎指正! AP(Average Precision)即平均精度,是目标检测中的一个常用指标. 一.精确率和召回率 说道AP,那不得不提准确率和召回率.首先我 ...

  9. 目标检测中准确率accuracy的计算(precision是精度、查准率)(Recall是召回率、查全率)

    准确率定义为: 查准率P和查全率R分别定义为: 查准率关心的是"预测出正例的正确率"即从正反例子中挑选出正例的问题. 查全率关心的是"预测出正例的保证性"即从正 ...

最新文章

  1. Spring 5.1.13 和 Spring Boot 2.2.3 发布
  2. 程序员进阶架构师路线
  3. Laravel框架开发规范-修订版
  4. oracle rman备份spfile,RMAN备份恢复之SPFILE的恢复(一)
  5. 编译安装时的--prefix参数的使用方法,很实用,mark一下
  6. 字节跳动联邦学习平台通过中国信通院首批评估 数据安全能力得到专业验证
  7. VC/MFC程序开启关闭和打开自己或其他软件,更改窗口类
  8. 三分钟快速理解javascript内存管理
  9. html元素可以有多个ID吗?
  10. NodeMCU(ESP8266)使用UDP控制8x8点阵显示
  11. 淘宝运营,新链接和老连接如何做搜索,有什么区别?
  12. MATLAB 剔除异常点
  13. 13种MongoDB的GUI简介
  14. Hyde And Hebbe
  15. Teamviewer使用方法
  16. FANUC机器人6点示教法设定工具坐标系的具体步骤
  17. ubuntu和windos 局域网NTP配置时间同步设置方法
  18. JetBrains所有IDE和.NET 工具 V2022.1全面升级
  19. Roadstar.ai陷罗生门!内斗升级,周光与另两位创始人各执一词
  20. 获取一个APP里面的图片素材

热门文章

  1. BZOJ 1412 [ZJOI2009]狼和羊的故事(最小割)
  2. Mybatis学习之单表增删改查
  3. Codeforces 235B Let's Play Osu!
  4. 【LeetCode】50. Pow(x, n) (3 solutions)
  5. 感知器python代码
  6. Python进阶_wxpy学习:异常chu'li
  7. Tensorflow 错误总结:NameError: name 'core' is not defined
  8. 快慢指针寻找循环节点
  9. caffe源码分析:softmax_layer.cpp softmax_loss_layer.cpp
  10. 2020年终总结暨组会PPT20201229《复现chen密度径向分布结果》