跑了一晚上的模型,实在占GPU资源,这两天已经有很多小朋友说我了。我选择了其中一个参数。

https://github.com/dmlc/gluon-cv/blob/master/scripts/detection/faster_rcnn/train_faster_rcnn.py

train_faster_rcnn的修改之前就弄好了,这里贴一个完整的。

"""Train Faster-RCNN end to end."""
import argparse
import os
# disable autotune
os.environ['MXNET_CUDNN_AUTOTUNE_DEFAULT'] = '0'
import logging
import time
import numpy as np
import mxnet as mx
from mxnet import nd
from mxnet import gluon
from mxnet import autograd
import gluoncv as gcv
from gluoncv import data as gdata
from gluoncv import utils as gutils
from gluoncv.model_zoo import get_model
from gluoncv.data import batchify
from gluoncv.data.transforms.presets.rcnn import FasterRCNNDefaultTrainTransform
from gluoncv.data.transforms.presets.rcnn import FasterRCNNDefaultValTransform
from gluoncv.utils.metrics.voc_detection import VOC07MApMetric
from gluoncv.utils.metrics.coco_detection import COCODetectionMetric
from gluoncv.utils.metrics.accuracy import Accuracy# add_lst
from gluoncv.data import LstDetectiondef parse_args():parser = argparse.ArgumentParser(description='Train Faster-RCNN networks e2e.')parser.add_argument('--network', type=str, default='resnet50_v1b',help="Base network name which serves as feature extraction base.")parser.add_argument('--dataset', type=str, default='voc',help='Training dataset. Now support voc and coco.')parser.add_argument('--num-workers', '-j', dest='num_workers', type=int,default=4, help='Number of data workers, you can use larger ''number to accelerate data loading, if you CPU and GPUs are powerful.')parser.add_argument('--gpus', type=str, default='0',help='Training with GPUs, you can specify 1,3 for example.')parser.add_argument('--epochs', type=str, default='',help='Training epochs.')parser.add_argument('--resume', type=str, default='',help='Resume from previously saved parameters if not None. ''For example, you can resume from ./faster_rcnn_xxx_0123.params')parser.add_argument('--start-epoch', type=int, default=0,help='Starting epoch for resuming, default is 0 for new training.''You can specify it to 100 for example to start from 100 epoch.')parser.add_argument('--lr', type=str, default='',help='Learning rate, default is 0.001 for voc single gpu training.')parser.add_argument('--lr-decay', type=float, default=0.1,help='decay rate of learning rate. default is 0.1.')parser.add_argument('--lr-decay-epoch', type=str, default='',help='epoches at which learning rate decays. default is 14,20 for voc.')parser.add_argument('--lr-warmup', type=str, default='',help='warmup iterations to adjust learning rate, default is 0 for voc.')parser.add_argument('--momentum', type=float, default=0.9,help='SGD momentum, default is 0.9')parser.add_argument('--wd', type=str, default='',help='Weight decay, default is 5e-4 for voc')parser.add_argument('--log-interval', type=int, default=100,help='Logging mini-batch interval. Default is 100.')parser.add_argument('--save-prefix', type=str, default='',help='Saving parameter prefix')parser.add_argument('--save-interval', type=int, default=1,help='Saving parameters epoch interval, best model will always be saved.')parser.add_argument('--val-interval', type=int, default=1,help='Epoch interval for validation, increase the number will reduce the ''training time if validation is slow.')parser.add_argument('--seed', type=int, default=233,help='Random seed to be fixed.')parser.add_argument('--verbose', dest='verbose', action='store_true',help='Print helpful debugging info once set.')parser.add_argument('--mixup', action='store_true', help='Use mixup training.')parser.add_argument('--no-mixup-epochs', type=int, default=20,help='Disable mixup training if enabled in the last N epochs.')args = parser.parse_args()if args.dataset == 'voc' or args.dataset == 'pedestrian':args.epochs = int(args.epochs) if args.epochs else 20args.lr_decay_epoch = args.lr_decay_epoch if args.lr_decay_epoch else '14,20'args.lr = float(args.lr) if args.lr else 0.001args.lr_warmup = args.lr_warmup if args.lr_warmup else -1args.wd = float(args.wd) if args.wd else 5e-4elif args.dataset == 'coco':args.epochs = int(args.epochs) if args.epochs else 26args.lr_decay_epoch = args.lr_decay_epoch if args.lr_decay_epoch else '17,23'args.lr = float(args.lr) if args.lr else 0.00125args.lr_warmup = args.lr_warmup if args.lr_warmup else 8000args.wd = float(args.wd) if args.wd else 1e-4num_gpus = len(args.gpus.split(','))if num_gpus == 1:args.lr_warmup = -1else:args.lr *=  num_gpusargs.lr_warmup /= num_gpusreturn argsclass RPNAccMetric(mx.metric.EvalMetric):def __init__(self):super(RPNAccMetric, self).__init__('RPNAcc')def update(self, labels, preds):# label: [rpn_label, rpn_weight]# preds: [rpn_cls_logits]rpn_label, rpn_weight = labelsrpn_cls_logits = preds[0]# calculate num_inst (average on those fg anchors)num_inst = mx.nd.sum(rpn_weight)# cls_logits (b, c, h, w) red_label (b, 1, h, w)# pred_label = mx.nd.argmax(rpn_cls_logits, axis=1, keepdims=True)pred_label = mx.nd.sigmoid(rpn_cls_logits) >= 0.5# label (b, 1, h, w)num_acc = mx.nd.sum((pred_label == rpn_label) * rpn_weight)self.sum_metric += num_acc.asscalar()self.num_inst += num_inst.asscalar()class RPNL1LossMetric(mx.metric.EvalMetric):def __init__(self):super(RPNL1LossMetric, self).__init__('RPNL1Loss')def update(self, labels, preds):# label = [rpn_bbox_target, rpn_bbox_weight]# pred = [rpn_bbox_reg]rpn_bbox_target, rpn_bbox_weight = labelsrpn_bbox_reg = preds[0]# calculate num_inst (average on those fg anchors)num_inst = mx.nd.sum(rpn_bbox_weight) / 4# calculate smooth_l1loss = mx.nd.sum(rpn_bbox_weight * mx.nd.smooth_l1(rpn_bbox_reg - rpn_bbox_target, scalar=3))self.sum_metric += loss.asscalar()self.num_inst += num_inst.asscalar()class RCNNAccMetric(mx.metric.EvalMetric):def __init__(self):super(RCNNAccMetric, self).__init__('RCNNAcc')def update(self, labels, preds):# label = [rcnn_label]# pred = [rcnn_cls]rcnn_label = labels[0]rcnn_cls = preds[0]# calculate num_accpred_label = mx.nd.argmax(rcnn_cls, axis=-1)num_acc = mx.nd.sum(pred_label == rcnn_label)self.sum_metric += num_acc.asscalar()self.num_inst += rcnn_label.sizeclass RCNNL1LossMetric(mx.metric.EvalMetric):def __init__(self):super(RCNNL1LossMetric, self).__init__('RCNNL1Loss')def update(self, labels, preds):# label = [rcnn_bbox_target, rcnn_bbox_weight]# pred = [rcnn_reg]rcnn_bbox_target, rcnn_bbox_weight = labelsrcnn_bbox_reg = preds[0]# calculate num_instnum_inst = mx.nd.sum(rcnn_bbox_weight) / 4# calculate smooth_l1loss = mx.nd.sum(rcnn_bbox_weight * mx.nd.smooth_l1(rcnn_bbox_reg - rcnn_bbox_target, scalar=1))self.sum_metric += loss.asscalar()self.num_inst += num_inst.asscalar()def get_dataset(dataset, args):if dataset.lower() == 'voc':train_dataset = gdata.VOCDetection(splits=[(2007, 'trainval'), (2012, 'trainval')])val_dataset = gdata.VOCDetection(splits=[(2007, 'test')])#print(val_dataset.classes)#('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor')
val_metric = VOC07MApMetric(iou_thresh=0.5, class_names=val_dataset.classes)elif dataset.lower() == 'coco':train_dataset = gdata.COCODetection(splits='instances_train2017', use_crowd=False)val_dataset = gdata.COCODetection(splits='instances_val2017', skip_empty=False)val_metric = COCODetectionMetric(val_dataset, args.save_prefix + '_eval', cleanup=True)elif dataset.lower() == 'pedestrian':lst_dataset = LstDetection('train_val.lst',root=os.path.expanduser('.'))print(len(lst_dataset))first_img = lst_dataset[0][0]print(first_img.shape)print(lst_dataset[0][1])train_dataset = LstDetection('train.lst',root=os.path.expanduser('.'))val_dataset = LstDetection('val.lst',root=os.path.expanduser('.'))classs = ('pedestrian',)val_metric = VOC07MApMetric(iou_thresh=0.5,class_names=classs)else:raise NotImplementedError('Dataset: {} not implemented.'.format(dataset))if args.mixup:from gluoncv.data.mixup import MixupDetectiontrain_dataset = MixupDetection(train_dataset)return train_dataset, val_dataset, val_metricdef get_dataloader(net, train_dataset, val_dataset, batch_size, num_workers):"""Get dataloader."""train_bfn = batchify.Tuple(*[batchify.Append() for _ in range(5)])train_loader = mx.gluon.data.DataLoader(train_dataset.transform(FasterRCNNDefaultTrainTransform(net.short, net.max_size, net)),batch_size, True, batchify_fn=train_bfn, last_batch='rollover', num_workers=num_workers)val_bfn = batchify.Tuple(*[batchify.Append() for _ in range(3)])val_loader = mx.gluon.data.DataLoader(val_dataset.transform(FasterRCNNDefaultValTransform(net.short, net.max_size)),batch_size, False, batchify_fn=val_bfn, last_batch='keep', num_workers=num_workers)return train_loader, val_loaderdef save_params(net, logger, best_map, current_map, epoch, save_interval, prefix):current_map = float(current_map)if current_map > best_map[0]:logger.info('[Epoch {}] mAP {} higher than current best {} saving to {}'.format(epoch, current_map, best_map, '{:s}_best.params'.format(prefix)))best_map[0] = current_mapnet.save_parameters('{:s}_best.params'.format(prefix))with open(prefix+'_best_map.log', 'a') as f:f.write('{:04d}:\t{:.4f}\n'.format(epoch, current_map))if save_interval and (epoch + 1) % save_interval == 0:logger.info('[Epoch {}] Saving parameters to {}'.format(epoch, '{:s}_{:04d}_{:.4f}.params'.format(prefix, epoch, current_map)))net.save_parameters('{:s}_{:04d}_{:.4f}.params'.format(prefix, epoch, current_map))def split_and_load(batch, ctx_list):"""Split data to 1 batch each device."""num_ctx = len(ctx_list)new_batch = []for i, data in enumerate(batch):new_data = [x.as_in_context(ctx) for x, ctx in zip(data, ctx_list)]new_batch.append(new_data)return new_batchdef validate(net, val_data, ctx, eval_metric):"""Test on validation dataset."""clipper = gcv.nn.bbox.BBoxClipToImage()eval_metric.reset()net.hybridize(static_alloc=True)for batch in val_data:batch = split_and_load(batch, ctx_list=ctx)det_bboxes = []det_ids = []det_scores = []gt_bboxes = []gt_ids = []gt_difficults = []for x, y, im_scale in zip(*batch):# get prediction resultsids, scores, bboxes = net(x)det_ids.append(ids)det_scores.append(scores)# clip to image size
            det_bboxes.append(clipper(bboxes, x))# rescale to original resolutionim_scale = im_scale.reshape((-1)).asscalar()det_bboxes[-1] *= im_scale# split ground truthsgt_ids.append(y.slice_axis(axis=-1, begin=4, end=5))gt_bboxes.append(y.slice_axis(axis=-1, begin=0, end=4))gt_bboxes[-1] *= im_scalegt_difficults.append(y.slice_axis(axis=-1, begin=5, end=6) if y.shape[-1] > 5 else None)# update metricfor det_bbox, det_id, det_score, gt_bbox, gt_id, gt_diff in zip(det_bboxes, det_ids, det_scores, gt_bboxes, gt_ids, gt_difficults):eval_metric.update(det_bbox, det_id, det_score, gt_bbox, gt_id, gt_diff)return eval_metric.get()def get_lr_at_iter(alpha):return 1. / 3. * (1 - alpha) + alphadef train(net, train_data, val_data, eval_metric, ctx, args):"""Training pipeline"""net.collect_params().setattr('grad_req', 'null')net.collect_train_params().setattr('grad_req', 'write')trainer = gluon.Trainer(net.collect_train_params(),  # fix batchnorm, fix first stage, etc...'sgd',{'learning_rate': args.lr,'wd': args.wd,'momentum': args.momentum,'clip_gradient': 5})# lr decay policylr_decay = float(args.lr_decay)lr_steps = sorted([float(ls) for ls in args.lr_decay_epoch.split(',') if ls.strip()])lr_warmup = float(args.lr_warmup)  # avoid int division# TODO(zhreshold) losses?rpn_cls_loss = mx.gluon.loss.SigmoidBinaryCrossEntropyLoss(from_sigmoid=False)rpn_box_loss = mx.gluon.loss.HuberLoss(rho=1/9.)  # == smoothl1rcnn_cls_loss = mx.gluon.loss.SoftmaxCrossEntropyLoss()rcnn_box_loss = mx.gluon.loss.HuberLoss()  # == smoothl1metrics = [mx.metric.Loss('RPN_Conf'),mx.metric.Loss('RPN_SmoothL1'),mx.metric.Loss('RCNN_CrossEntropy'),mx.metric.Loss('RCNN_SmoothL1'),]rpn_acc_metric = RPNAccMetric()rpn_bbox_metric = RPNL1LossMetric()rcnn_acc_metric = RCNNAccMetric()rcnn_bbox_metric = RCNNL1LossMetric()metrics2 = [rpn_acc_metric, rpn_bbox_metric, rcnn_acc_metric, rcnn_bbox_metric]# set up logger
    logging.basicConfig()logger = logging.getLogger()logger.setLevel(logging.INFO)log_file_path = args.save_prefix + '_train.log'log_dir = os.path.dirname(log_file_path)if log_dir and not os.path.exists(log_dir):os.makedirs(log_dir)fh = logging.FileHandler(log_file_path)logger.addHandler(fh)logger.info(args)if args.verbose:logger.info('Trainable parameters:')logger.info(net.collect_train_params().keys())logger.info('Start training from [Epoch {}]'.format(args.start_epoch))best_map = [0]for epoch in range(args.start_epoch, args.epochs):mix_ratio = 1.0if args.mixup:# TODO(zhreshold) only support evenly mixup now, target generator needs to be modified otherwisetrain_data._dataset.set_mixup(np.random.uniform, 0.5, 0.5)mix_ratio = 0.5if epoch >= args.epochs - args.no_mixup_epochs:train_data._dataset.set_mixup(None)mix_ratio = 1.0while lr_steps and epoch >= lr_steps[0]:new_lr = trainer.learning_rate * lr_decaylr_steps.pop(0)trainer.set_learning_rate(new_lr)logger.info("[Epoch {}] Set learning rate to {}".format(epoch, new_lr))for metric in metrics:metric.reset()tic = time.time()btic = time.time()net.hybridize(static_alloc=True)base_lr = trainer.learning_ratefor i, batch in enumerate(train_data):if epoch == 0 and i <= lr_warmup:# adjust based on real percentagenew_lr = base_lr * get_lr_at_iter(i / lr_warmup)if new_lr != trainer.learning_rate:if i % args.log_interval == 0:logger.info('[Epoch 0 Iteration {}] Set learning rate to {}'.format(i, new_lr))trainer.set_learning_rate(new_lr)batch = split_and_load(batch, ctx_list=ctx)batch_size = len(batch[0])losses = []metric_losses = [[] for _ in metrics]add_losses = [[] for _ in metrics2]with autograd.record():for data, label, rpn_cls_targets, rpn_box_targets, rpn_box_masks in zip(*batch):gt_label = label[:, :, 4:5]gt_box = label[:, :, :4]cls_pred, box_pred, roi, samples, matches, rpn_score, rpn_box, anchors = net(data, gt_box)# losses of rpnrpn_score = rpn_score.squeeze(axis=-1)num_rpn_pos = (rpn_cls_targets >= 0).sum()rpn_loss1 = rpn_cls_loss(rpn_score, rpn_cls_targets, rpn_cls_targets >= 0) * rpn_cls_targets.size / num_rpn_posrpn_loss2 = rpn_box_loss(rpn_box, rpn_box_targets, rpn_box_masks) * rpn_box.size / num_rpn_pos# rpn overall loss, use sum rather than averagerpn_loss = rpn_loss1 + rpn_loss2# generate targets for rcnncls_targets, box_targets, box_masks = net.target_generator(roi, samples, matches, gt_label, gt_box)# losses of rcnnnum_rcnn_pos = (cls_targets >= 0).sum()rcnn_loss1 = rcnn_cls_loss(cls_pred, cls_targets, cls_targets >= 0) * cls_targets.size / cls_targets.shape[0] / num_rcnn_posrcnn_loss2 = rcnn_box_loss(box_pred, box_targets, box_masks) * box_pred.size / box_pred.shape[0] / num_rcnn_posrcnn_loss = rcnn_loss1 + rcnn_loss2# overall losseslosses.append(rpn_loss.sum() * mix_ratio + rcnn_loss.sum() * mix_ratio)metric_losses[0].append(rpn_loss1.sum() * mix_ratio)metric_losses[1].append(rpn_loss2.sum() * mix_ratio)metric_losses[2].append(rcnn_loss1.sum() * mix_ratio)metric_losses[3].append(rcnn_loss2.sum() * mix_ratio)add_losses[0].append([[rpn_cls_targets, rpn_cls_targets>=0], [rpn_score]])add_losses[1].append([[rpn_box_targets, rpn_box_masks], [rpn_box]])add_losses[2].append([[cls_targets], [cls_pred]])add_losses[3].append([[box_targets, box_masks], [box_pred]])autograd.backward(losses)for metric, record in zip(metrics, metric_losses):metric.update(0, record)for metric, records in zip(metrics2, add_losses):for pred in records:metric.update(pred[0], pred[1])trainer.step(batch_size)# update metricsif args.log_interval and not (i + 1) % args.log_interval:# msg = ','.join(['{}={:.3f}'.format(*metric.get()) for metric in metrics])msg = ','.join(['{}={:.3f}'.format(*metric.get()) for metric in metrics + metrics2])logger.info('[Epoch {}][Batch {}], Speed: {:.3f} samples/sec, {}'.format(epoch, i, args.log_interval * batch_size/(time.time()-btic), msg))btic = time.time()msg = ','.join(['{}={:.3f}'.format(*metric.get()) for metric in metrics])logger.info('[Epoch {}] Training cost: {:.3f}, {}'.format(epoch, (time.time()-tic), msg))
#         if not (epoch + 1) % args.val_interval:#             # consider reduce the frequency of validation to save time
#             map_name, mean_ap = validate(net, val_data, ctx, eval_metric)
#             val_msg = '\n'.join(['{}={}'.format(k, v) for k, v in zip(map_name, mean_ap)])#             logger.info('[Epoch {}] Validation: \n{}'.format(epoch, val_msg))
#             current_map = float(mean_ap[-1])
#         else:
#             current_map = 0.current_map = 0save_params(net, logger, best_map, current_map, epoch, args.save_interval, args.save_prefix)if __name__ == '__main__':args = parse_args()# fix seed for mxnet, numpy and python builtin random generator.
    gutils.random.seed(args.seed)# training contextsctx = [mx.gpu(int(i)) for i in args.gpus.split(',') if i.strip()]ctx = ctx if ctx else [mx.cpu()]args.batch_size = len(ctx)  # 1 batch per device# networknet_name = '_'.join(('faster_rcnn', args.network, args.dataset))args.save_prefix += net_namenet = get_model(net_name, pretrained_base=True)if args.resume.strip():net.load_parameters(args.resume.strip())else:for param in net.collect_params().values():if param._data is not None:continueparam.initialize()net.collect_params().reset_ctx(ctx)# training datatrain_dataset, val_dataset, eval_metric = get_dataset(args.dataset, args)train_data, val_data = get_dataloader(net, train_dataset, val_dataset, args.batch_size, args.num_workers)# trainingtrain(net, train_data, val_data, eval_metric, ctx, args)

View Code

检测部分,是在demo 下修改的,填了几个参数,可以用lst文件遍历了,用cv2画图,不用那个matplotlib了

"""Faster RCNN Demo script."""
import os
import argparse
import mxnet as mx
import gluoncv as gcv
from gluoncv.data.transforms import presets
from matplotlib import pyplot as plt
import cv2font = cv2.FONT_HERSHEY_SIMPLEXdef parse_args():parser = argparse.ArgumentParser(description='Test with Faster RCNN networks.')parser.add_argument('--network', type=str, default='faster_rcnn_resnet50_v1b_coco',help="Faster RCNN full network name")parser.add_argument('--images', type=str, default='',help='Test images, use comma to split multiple.')parser.add_argument('--gpus', type=str, default='',help='Training with GPUs, you can specify 1,3 for example.')parser.add_argument('--pretrained', type=str, default='True',help='Load weights from previously saved parameters. You can specify parameter file name.')parser.add_argument('--thresh', type=float, default=0.5,help='Threshold of object score when visualize the bboxes.')# add_lstparser.add_argument('--lst', type=str,default='',help="predict's lst file")args = parser.parse_args()return argsif __name__ == '__main__':args = parse_args()# context listctx = [mx.gpu(int(i)) for i in args.gpus.split(',') if i.strip()]ctx = [mx.cpu()] if not ctx else ctx# grab some image if not specifiedif not args.images.strip() and args.lst=='':gcv.utils.download('https://github.com/dmlc/web-data/blob/master/' +'gluoncv/detection/biking.jpg?raw=true', 'biking.jpg')image_list = ['biking.jpg']else:image_list = [x.strip() for x in args.images.split(',') if x.strip()]cnt = 0if args.lst!='':print(args.lst)file = open('val_front_0913.lst')image_list = []for line in file:line = line.split('\t')print('/mnt/hdfs-data-4/data/jian.yin/val_front_0913/'+line[-1][:-1])image_list.append('/mnt/hdfs-data-4/data/jian.yin/val_front_0913/'+line[-1][:-1])cnt+=1print 'sum of pic ',cntif args.pretrained.lower() in ['true', '1', 'yes', 't']:net = gcv.model_zoo.get_model(args.network, pretrained=True)else:net = gcv.model_zoo.get_model(args.network, pretrained=False, pretrained_base=False)net.load_parameters(args.pretrained)net.set_nms(0.3, 200)net.collect_params().reset_ctx(ctx = ctx)ax = None# write plt.txtfw = open('draw/plt.txt','w')dict = {}cnt1 = 0for image in image_list:dict['url'] = imagebbox_list = []x, img = presets.rcnn.load_test(image, short=net.short, max_size=net.max_size)img_h = img.shape[0]img_w = img.shape[1]x = x.as_in_context(ctx[0])ids, scores, bboxes = [xx[0].asnumpy() for xx in net(x)]original_img = cv2.imread(image)original_img_h = original_img.shape[0]original_img_w = original_img.shape[1]for i in range(scores.shape[0]):if scores[i] > args.thresh:x1 = int(bboxes[i][0]*original_img_h/img_h)y1 = int(bboxes[i][1]*original_img_w/img_w)x2 = int(bboxes[i][2]*original_img_h/img_h)y2 = int(bboxes[i][3]*original_img_w/img_w)bbox_list.append((float(scores[i]),x1,y1,x2,y2))dict['bbox'] = bbox_listfw.write(str(dict)+'\n')cnt1+=1print 'The last ',cnt-cnt1fw.close()
#                 cv2.rectangle(original_img, (x1, y1), (x2, y2), (255,0,0), 3)
#                 cv2.putText(original_img,'person '+str(scores[i]),(x1,y1),font,0.5,(255,0,0),2)
#                 cv2.imwrite('draw/'+str(cnt)+'.jpg', original_img)#         print(bboxes)
#         ax = gcv.utils.viz.plot_bbox(img, bboxes, scores, ids, thresh=args.thresh,
#                                      class_names=net.classes, ax=ax)
#         plt.savefig(str(cnt)+'predict.jpg')
#         cnt+=1
#         plt.show()

把得分情况,锚框位置都写在文件里了,不用每次跑模型来得到,想怎么都可以了。plt.py

import cv2
import os
font = cv2.FONT_HERSHEY_SIMPLEXfile = open('plt.txt')
cnt = 1
for line in file:dict = eval(line)url = dict['url']bbox = dict['bbox']img = cv2.imread(url)for i in range(len(bbox)):score = bbox[i][0]score = '%.2f' % scorex1 = bbox[i][1]y1 = bbox[i][2]x2 = bbox[i][3]y2 = bbox[i][4]cv2.rectangle(img, (x1, y1), (x2, y2), (255,0,0), 3)cv2.putText(img,'person '+str(score),(x1,y1),font,0.5,(255,0,0),2)url = url.split('/')x_url = url[5]+'/'+url[6]+'/'+url[7]+'/'+url[8]if not os.path.exists(url[5]+'/'+url[6]+'/'+url[7]+'/'):os.makedirs(url[5]+'/'+url[6]+'/'+url[7]+'/')cv2.imwrite(x_url, img)print('The last ',6137-cnt)cnt+=1

转载于:https://www.cnblogs.com/TreeDream/p/10180651.html

gluoncv 训练自己的数据集,进行目标检测相关推荐

  1. 【Detectron2】使用 Detectron2 训练基于 coco 数据集的目标检测网络

    文章目录 一.安装 Detectron2 二.软连接 coco 数据集 三.训练 四.数据集相关参数 五.输出结果路径 六.COCO 数据集简介 七.模型相关参数 八.可视化结果 一.安装 Detec ...

  2. YOLOF训练自己的数据集(目标检测,cvpods版本)

    训练准备: github repo地址:https://github.com/megvii-model/YOLOF github上有两个版本,一个是cvpods,一个是detectron2,第二个我每 ...

  3. [数据集][VOC][目标检测]河道垃圾水面漂浮物数据集目标检测可用yolo训练-1304张介绍

    数据集格式:Pascal VOC格式(不包含分割路径的txt文件和yolo格式的txt文件,仅仅包含jpg图片和对应的xml) 图片数量(jpg文件个数):1304 标注数量(xml文件个数):130 ...

  4. Object Detection with Discriminatively Trained Part Based Models(使用判别训练的部件模型进行目标检测 )

    目录(?)[-] 摘要 关键词 引言 相关研究工作 模型 1 可变形部件模型 2 匹配 3 混合模型 隐藏变量SVM 1 半凸规划 2 最优化 3 随机梯度下降 4 难例挖掘SVM版 5 难例挖掘LS ...

  5. YOLOv7训练自己的数据集(口罩检测)

    YOLOv7训练自己的数据集(口罩检测) 前言 前提条件 实验环境 项目结构 制作自己的数据集 数据集目录结构 训练自己的数据集 VOC格式数据集转换成YOLO格式数据集 修改cfg配置 新建一个my ...

  6. 电气领域相关数据集(目标检测,分类图像数据及负荷预测),电气设备红外测温图像,输电线路图像数据续

    另外一部分见:电气领域相关数据集(目标检测,分类图像数据及负荷预测),输电线路图像数据 1. 变电站烟火检测图像数据集(3600多张,VOC标签) 2. 导线破损检测图像数据集(有拼接增强,VOC标签 ...

  7. TOLOv5训练自己的数据集--漫画人物检测

    TOLOv5训练自己的数据集--漫画人物检测 TOLOv5训练自己的数据集 YOLOv5模型下载 使用LabelImage标注图片 生成所需数据集 更改配置文件 训练 测试 TOLOv5训练自己的数据 ...

  8. KITTI数据集3D目标检测数据下载并可视化简洁实用版

    KITTI数据集3D目标检测部分下载使用简洁实用版 1.下载数据 使用Left Image和Velodyne点云数据 下载地址:http://www.cvlibs.net/datasets/kitti ...

  9. OpenCvSharp (C# OpenCV) DNN模块加载自己训练的TensorFlow模型做目标检测(含手势识别、骰子识别、菜品识别)(附源码)

    本文作者Color Space,文章未经作者允许禁止转载! 本文将介绍OpenCVSharp DNN模块加载自己训练的TensorFlow模型做目标检测(含手势识别.骰子识别.菜品识别)! 前言: 下 ...

  10. tensorflow精进之路(二十五)——Object Detection API目标检测(下)(VOC数据集训练自己的模型进行目标检测)

    1.概述 上一讲,我们使用了别人根据COCO数据集训练好的模型来做目标检测,这一讲,我们就来训练自己的模型. 2.下载数据集 为了方便学习,我们先使用别人整理好的数据集来训练---VOC 2012数据 ...

最新文章

  1. NetworkX玩一下 --update@2017.06.28
  2. node升级命令_Laravel Mix 4升级说明与“排坑儿”指南
  3. php7.2 的好处,PHP 7.2 中弃用的功能
  4. 北大OJ百练——4073:最长公共字符串后缀(C语言)
  5. python训练营微信公众号真实性_用python进行微信公众号开发(仅测试学习)
  6. 使用vbs脚本检查网站是否使用asp.net
  7. EF 6 code first - 从SQL server迁移到MySQL
  8. 5中bug vue_苹果官网出BUG!这些都只要一两百元
  9. Editplus 的配色方案
  10. win7中jar包不能安装的问题
  11. Extjs6(六)——增删查改之查询
  12. KITTI数据集(全)百度网盘下载地址
  13. 南方CASS工程应用--道路断面土方计算实例教程
  14. 语音识别/合成开源项目
  15. 2015年第四届C/C++ A组蓝桥杯省赛真题
  16. 联通大数据应用及沃云平台支撑能力
  17. 【深度学习】研究者意外发现DALL-E 2在用自创语言生成图像:全文黑话,人类都看不懂...
  18. 蓝旭前端预习5之DOM(文档对象模型)
  19. 滚球法(Ball Pivoting)三维表面重建论文笔记
  20. 手把手带你写一个中断输入设备驱动~

热门文章

  1. 如何在浏览器上跑深度学习模型?并且一行JS代码都不用写
  2. 盘点 | 假期里你错过的人工智能重要新闻有这些
  3. 如何优雅地用TensorFlow预测时间序列:TFTS库详细教程
  4. 聊一聊 软件系统中的“热力学第二定律”
  5. 漫画:7 种编程语言的学习曲线
  6. 想入门图深度学习?这篇55页的教程帮你理清楚了脉络
  7. 自学机器学习课程怕踩雷?有人帮你选出了top 5优质课
  8. 何恺明团队新作:只用普通ViT,不做分层设计也能搞定目标检测
  9. NumPy迎来重大版本更新
  10. 奥巴马竟被「去马赛克 AI」洗白,CVPR 新研究惹上种族歧视大麻烦,LeCun 也被卷入其中...