#1 makeTxt.py(将训练数据自动划分为训练集、验证集和测试集)... 2

#2 voc_label.py(将VOC格式数据集转换成yolo数据集)... 3

#3 tube.yaml 6

#4 yolov5s.yaml 7

#5 train.py. 9

#6 detect.py. 28

#7 Video.py(每间隔3秒对应的帧数采集照片并yolov5检测最后保存) 36

#1 makeTxt.py(将训练数据自动划分为训练集、验证集和测试集)

import os
import random

trainval_percent = 0.9
train_percent = 0.9
xmlfilepath = 'data/Annotations'
txtsavepath = 'data/ImageSet'
total_xml = os.listdir(xmlfilepath)

num = len(total_xml)
list = range(num)
tv = int(num * trainval_percent)
tr = int(tv * train_percent)
trainval = random.sample(list, tv)
train = random.sample(trainval, tr)

ftrainval = open('data/ImageSet/trainval.txt', 'w')
ftest = open('data/ImageSet/test.txt', 'w')
ftrain = open('data/ImageSet/train.txt', 'w')
fval = open('data/ImageSet/val.txt', 'w')

for i in list:
    name = total_xml[i][:-4] + '\n'
    if i in trainval:
        ftrainval.write(name)
        if i in train:
            ftrain.write(name)
        else:
            fval.write(name)
    else:
        ftest.write(name)

ftrainval.close()
ftrain.close()
fval.close()
ftest.close()

#2 voc_label.py(将VOC格式数据集转换成yolo数据集)

#xml解析包import xml.etree.ElementTree as ETimport pickleimport os# os.listdir() 方法用于返回指定的文件夹包含的文件或文件夹的名字的列表from os import listdir, getcwdfrom os.path import joinsets = ['train', 'test', 'val']classes = ['01', '02', '03', '04', '05', '06', '07', '08', '09']# 进行归一化操作def convert(size, box): # size:(原图w,原图h) , box:(xmin,xmax,ymin,ymax)dw = 1./size[0]     # 1/wdh = 1./size[1]     # 1/hx = (box[0] + box[1])/2.0   # 物体在图中的中心点x坐标y = (box[2] + box[3])/2.0   # 物体在图中的中心点y坐标w = box[1] - box[0]         # 物体实际像素宽度h = box[3] - box[2]         # 物体实际像素高度x = x*dw    # 物体中心点x的坐标比(相当于 x/原图w)w = w*dw    # 物体宽度的宽度比(相当于 w/原图w)y = y*dh    # 物体中心点y的坐标比(相当于 y/原图h)h = h*dh    # 物体宽度的宽度比(相当于 h/原图h)return (x, y, w, h)    # 返回 相对于原图的物体中心点的x坐标比,y坐标比,宽度比,高度比,取值范围[0-1]# year ='2012', 对应图片的id(文件名)def convert_annotation(image_id):'''将对应文件名的xml文件转化为label文件,xml文件包含了对应的bunding框以及图片长款大小等信息,通过对其解析,然后进行归一化最终读到label文件中去,也就是说一张图片文件对应一个xml文件,然后通过解析和归一化,能够将对应的信息保存到唯一一个label文件中去labal文件中的格式:calss x y w h  同时,一张图片对应的类别有多个,所以对应的bunding的信息也有多个'''# 对应的通过year 找到相应的文件夹,并且打开相应image_id的xml文件,其对应bund文件in_file = open('data/Annotations/%s.xml' % (image_id), encoding='utf-8')# 准备在对应的image_id 中写入对应的label,分别为# <object-class> <x> <y> <width> <height>out_file = open('data/labels/%s.txt' % (image_id), 'w', encoding='utf-8')# 解析xml文件tree = ET.parse(in_file)# 获得对应的键值对root = tree.getroot()# 获得图片的尺寸大小size = root.find('size')# 如果xml内的标记为空,增加判断条件if size != None:# 获得宽w = int(size.find('width').text)# 获得高h = int(size.find('height').text)# 遍历目标objfor obj in root.iter('object'):# 获得difficult ??difficult = obj.find('difficult').text# 获得类别 =string 类型cls = obj.find('name').text# 如果类别不是对应在我们预定好的class文件中,或difficult==1则跳过if cls not in classes or int(difficult) == 1:continue# 通过类别名称找到idcls_id = classes.index(cls)# 找到bndbox 对象xmlbox = obj.find('bndbox')# 获取对应的bndbox的数组 = ['xmin','xmax','ymin','ymax']b = (float(xmlbox.find('xmin').text), float(xmlbox.find('xmax').text), float(xmlbox.find('ymin').text),float(xmlbox.find('ymax').text))print(image_id, cls, b)# 带入进行归一化操作# w = 宽, h = 高, b= bndbox的数组 = ['xmin','xmax','ymin','ymax']bb = convert((w, h), b)# bb 对应的是归一化后的(x,y,w,h)# 生成 calss x y w h 在label文件中out_file.write(str(cls_id) + " " + " ".join([str(a) for a in bb]) + '\n')# 返回当前工作目录wd = getcwd()print(wd)for image_set in sets:'''对所有的文件数据集进行遍历做了两个工作:1.将所有图片文件都遍历一遍,并且将其所有的全路径都写在对应的txt文件中去,方便定位2.同时对所有的图片文件进行解析和转化,将其对应的bundingbox 以及类别的信息全部解析写到label 文件中去最后再通过直接读取文件,就能找到对应的label 信息'''# 先找labels文件夹如果不存在则创建if not os.path.exists('data/labels/'):os.makedirs('data/labels/')# 读取在ImageSets/Main 中的train、test..等文件的内容# 包含对应的文件名称image_ids = open('data/ImageSet/%s.txt' % (image_set)).read().strip().split()# 打开对应的2012_train.txt 文件对其进行写入准备list_file = open('data/%s.txt' % (image_set), 'w')# 将对应的文件_id以及全路径写进去并换行for image_id in image_ids:list_file.write('data/images/%s.jpg\n' % (image_id))# 调用  year = 年份  image_id = 对应的文件名_idconvert_annotation(image_id)# 关闭文件list_file.close()# os.system(‘comand’) 会执行括号中的命令,如果命令成功执行,这条语句返回0,否则返回1# os.system("cat 2007_train.txt 2007_val.txt 2012_train.txt 2012_val.txt > train.txt")# os.system("cat 2007_train.txt 2007_val.txt 2007_test.txt 2012_train.txt 2012_val.txt > train.all.txt")

#3 tube.yaml

train: ../mydata3/train/images # train images (relative to 'path') 118287 imagesval: ../mydata3/val/images # train images (relative to 'path') 5000 imagesnc: 19  # number of classesnames: ['module1', '0001', '0010', '0011', '0100', '0101', '0110', '0111', '1000', '1001', 'Open', 'Tube body', 'module2', 'Bar 1', 'Bar 2', 'Bar 3', 'module3', '1010', '1011']  # class names

#4 yolov5s.yaml

# parametersnc: 19  # number of classesdepth_multiple: 0.33  # model depth multiplewidth_multiple: 0.50  # layer channel multiple# anchorsanchors:- [10,13, 16,30, 33,23]  # P3/8- [30,61, 62,45, 59,119]  # P4/16- [116,90, 156,198, 373,326]  # P5/32# YOLOv5 backbonebackbone:# [from, number, module, args][[-1, 1, Focus, [64, 3]],  # 0-P1/2[-1, 1, Conv, [128, 3, 2]],  # 1-P2/4[-1, 3, C3, [128]],[-1, 1, Conv, [256, 3, 2]],  # 3-P3/8[-1, 9, C3, [256]],[-1, 1, Conv, [512, 3, 2]],  # 5-P4/16[-1, 9, C3, [512]],[-1, 1, Conv, [1024, 3, 2]],  # 7-P5/32[-1, 1, SPP, [1024, [5, 9, 13]]],[-1, 3, C3, [1024, False]],  # 9]# YOLOv5 headhead:[[-1, 1, Conv, [512, 1, 1]],[-1, 1, nn.Upsample, [None, 2, 'nearest']],[[-1, 6], 1, Concat, [1]],  # cat backbone P4[-1, 3, C3, [512, False]],  # 13[-1, 1, Conv, [256, 1, 1]],[-1, 1, nn.Upsample, [None, 2, 'nearest']],[[-1, 4], 1, Concat, [1]],  # cat backbone P3[-1, 3, C3, [256, False]],  # 17 (P3/8-small)[-1, 1, Conv, [256, 3, 2]],[[-1, 14], 1, Concat, [1]],  # cat head P4[-1, 3, C3, [512, False]],  # 20 (P4/16-medium)[-1, 1, Conv, [512, 3, 2]],[[-1, 10], 1, Concat, [1]],  # cat head P5[-1, 3, C3, [1024, False]],  # 23 (P5/32-large)[[17, 20, 23], 1, Detect, [nc, anchors]],  # Detect(P3, P4, P5)]

#5 train.py

import argparse
import logging
import math
import os

os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE'
import random
import time
from pathlib import Path
from threading import Thread

import numpy as np
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
import torch.utils.data
import yaml
from torch.cuda import amp
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm

import test  # import test.py to get mAP after each epoch
from models.experimental import attempt_load
from models.yolo import Model
from utils.autoanchor import check_anchors
from utils.datasets import create_dataloader
from utils.general import labels_to_class_weights, increment_path, labels_to_image_weights, init_seeds, \
    fitness, strip_optimizer, get_latest_run, check_dataset, check_file, check_git_status, check_img_size, \
    check_requirements, print_mutation, set_logging, one_cycle, colorstr
from utils.google_utils import attempt_download
from utils.loss import ComputeLoss
from utils.plots import plot_images, plot_labels, plot_results, plot_evolution
from utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_distributed_zero_first

logger = logging.getLogger(__name__)

def train(hyp, opt, device, tb_writer=None, wandb=None):
    logger.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items()))
    save_dir, epochs, batch_size, total_batch_size, weights, rank = \
        Path(opt.save_dir), opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank

# Directories
    wdir = save_dir / 'weights'
    wdir.mkdir(parents=True, exist_ok=True)  # make dir
    last = wdir / 'last.pt'
    best = wdir / 'best.pt'
    results_file = save_dir / 'results.txt'

# Save run settings
    with open(save_dir / 'hyp.yaml', 'w') as f:
        yaml.dump(hyp, f, sort_keys=False)
    with open(save_dir / 'opt.yaml', 'w') as f:
        yaml.dump(vars(opt), f, sort_keys=False)

# Configure
    plots = not opt.evolve  # create plots
    cuda = device.type != 'cpu'
    init_seeds(2 + rank)
    with open(opt.data) as f:
        data_dict = yaml.load(f, Loader=yaml.SafeLoader)  # data dict
    with torch_distributed_zero_first(rank):
        check_dataset(data_dict)  # check
    train_path = data_dict['train']
    test_path = data_dict['val']
    nc = 1 if opt.single_cls else int(data_dict['nc'])  # number of classes
    names = ['item'] if opt.single_cls and len(data_dict['names']) != 1 else data_dict['names']  # class names
    assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (len(names), nc, opt.data)  # check

# Model
    pretrained = weights.endswith('.pt')
    if pretrained:
        with torch_distributed_zero_first(rank):
            attempt_download(weights)  # download if not found locally
        ckpt = torch.load(weights, map_location=device)  # load checkpoint
        if hyp.get('anchors'):
            ckpt['model'].yaml['anchors'] = round(hyp['anchors'])  # force autoanchor
        model = Model(opt.cfg or ckpt['model'].yaml, ch=3, nc=nc).to(device)  # create
        exclude = ['anchor'] if opt.cfg or hyp.get('anchors') else []  # exclude keys
        state_dict = ckpt['model'].float().state_dict()  # to FP32
        state_dict = intersect_dicts(state_dict, model.state_dict(), exclude=exclude)  # intersect
        model.load_state_dict(state_dict, strict=False)  # load
        logger.info('Transferred %g/%g items from %s' % (len(state_dict), len(model.state_dict()), weights))  # report
    else:
        model = Model(opt.cfg, ch=3, nc=nc).to(device)  # create

# Freeze
    freeze = []  # parameter names to freeze (full or partial)
    for k, v in model.named_parameters():
        v.requires_grad = True  # train all layers
        if any(x in k for x in freeze):
            print('freezing %s' % k)
            v.requires_grad = False

# Optimizer
    nbs = 64  # nominal batch size
    accumulate = max(round(nbs / total_batch_size), 1)  # accumulate loss before optimizing
    hyp['weight_decay'] *= total_batch_size * accumulate / nbs  # scale weight_decay
    logger.info(f"Scaled weight_decay = {hyp['weight_decay']}")

pg0, pg1, pg2 = [], [], []  # optimizer parameter groups
    for k, v in model.named_modules():
        if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter):
            pg2.append(v.bias)  # biases
        if isinstance(v, nn.BatchNorm2d):
            pg0.append(v.weight)  # no decay
        elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter):
            pg1.append(v.weight)  # apply decay

if opt.adam:
        optimizer = optim.Adam(pg0, lr=hyp['lr0'], betas=(hyp['momentum'], 0.999))  # adjust beta1 to momentum
    else:
        optimizer = optim.SGD(pg0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True)

optimizer.add_param_group({'params': pg1, 'weight_decay': hyp['weight_decay']})  # add pg1 with weight_decay
    optimizer.add_param_group({'params': pg2})  # add pg2 (biases)
    logger.info('Optimizer groups: %g .bias, %g conv.weight, %g other' % (len(pg2), len(pg1), len(pg0)))
    del pg0, pg1, pg2

# Scheduler https://arxiv.org/pdf/1812.01187.pdf
    # https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR
    lf = one_cycle(1, hyp['lrf'], epochs)  # cosine 1->hyp['lrf']
    scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
    # plot_lr_scheduler(optimizer, scheduler, epochs)

# Logging
    if rank in [-1, 0] and wandb and wandb.run is None:
        opt.hyp = hyp  # add hyperparameters
        wandb_run = wandb.init(config=opt, resume="allow",
                               project='YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem,
                               name=save_dir.stem,
                               id=ckpt.get('wandb_id') if 'ckpt' in locals() else None)
    loggers = {'wandb': wandb}  # loggers dict

# Resume
    start_epoch, best_fitness = 0, 0.0
    if pretrained:
        # Optimizer
        if ckpt['optimizer'] is not None:
            optimizer.load_state_dict(ckpt['optimizer'])
            best_fitness = ckpt['best_fitness']

# Results
        if ckpt.get('training_results') is not None:
            with open(results_file, 'w') as file:
                file.write(ckpt['training_results'])  # write results.txt

# Epochs
        start_epoch = ckpt['epoch'] + 1
        if opt.resume:
            assert start_epoch > 0, '%s training to %g epochs is finished, nothing to resume.' % (weights, epochs)
        if epochs < start_epoch:
            logger.info('%s has been trained for %g epochs. Fine-tuning for %g additional epochs.' %
                        (weights, ckpt['epoch'], epochs))
            epochs += ckpt['epoch']  # finetune additional epochs

del ckpt, state_dict

# Image sizes
    gs = int(model.stride.max())  # grid size (max stride)
    nl = model.model[-1].nl  # number of detection layers (used for scaling hyp['obj'])
    imgsz, imgsz_test = [check_img_size(x, gs) for x in opt.img_size]  # verify imgsz are gs-multiples

# DP mode
    if cuda and rank == -1 and torch.cuda.device_count() > 1:
        model = torch.nn.DataParallel(model)

# SyncBatchNorm
    if opt.sync_bn and cuda and rank != -1:
        model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)
        logger.info('Using SyncBatchNorm()')

# EMA
    ema = ModelEMA(model) if rank in [-1, 0] else None

# DDP mode
    if cuda and rank != -1:
        model = DDP(model, device_ids=[opt.local_rank], output_device=opt.local_rank)

# Trainloader
    dataloader, dataset = create_dataloader(train_path, imgsz, batch_size, gs, opt,
                                            hyp=hyp, augment=True, cache=opt.cache_images, rect=opt.rect, rank=rank,
                                            world_size=opt.world_size, workers=opt.workers,
                                            image_weights=opt.image_weights, quad=opt.quad, prefix=colorstr('train: '))
    mlc = np.concatenate(dataset.labels, 0)[:, 0].max()  # max label class
    nb = len(dataloader)  # number of batches
    assert mlc < nc, 'Label class %g exceeds nc=%g in %s. Possible class labels are 0-%g' % (mlc, nc, opt.data, nc - 1)

# Process 0
    if rank in [-1, 0]:
        ema.updates = start_epoch * nb // accumulate  # set EMA updates
        testloader = create_dataloader(test_path, imgsz_test, total_batch_size, gs, opt,  # testloader
                                       hyp=hyp, cache=opt.cache_images and not opt.notest, rect=True, rank=-1,
                                       world_size=opt.world_size, workers=opt.workers,
                                       pad=0.5, prefix=colorstr('val: '))[0]

if not opt.resume:
            labels = np.concatenate(dataset.labels, 0)
            c = torch.tensor(labels[:, 0])  # classes
            # cf = torch.bincount(c.long(), minlength=nc) + 1.  # frequency
            # model._initialize_biases(cf.to(device))
            if plots:
                plot_labels(labels, save_dir, loggers)
                if tb_writer:
                    tb_writer.add_histogram('classes', c, 0)

# Anchors
            if not opt.noautoanchor:
                check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz)

# Model parameters
    hyp['box'] *= 3. / nl  # scale to layers
    hyp['cls'] *= nc / 80. * 3. / nl  # scale to classes and layers
    hyp['obj'] *= (imgsz / 640) ** 2 * 3. / nl  # scale to image size and layers
    model.nc = nc  # attach number of classes to model
    model.hyp = hyp  # attach hyperparameters to model
    model.gr = 1.0  # iou loss ratio (obj_loss = 1.0 or iou)
    model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc  # attach class weights
    model.names = names

# Start training
    t0 = time.time()
    nw = max(round(hyp['warmup_epochs'] * nb), 1000)  # number of warmup iterations, max(3 epochs, 1k iterations)
    # nw = min(nw, (epochs - start_epoch) / 2 * nb)  # limit warmup to < 1/2 of training
    maps = np.zeros(nc)  # mAP per class
    results = (0, 0, 0, 0, 0, 0, 0)  # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls)
    scheduler.last_epoch = start_epoch - 1  # do not move
    scaler = amp.GradScaler(enabled=cuda)
    compute_loss = ComputeLoss(model)  # init loss class
    logger.info(f'Image sizes {imgsz} train, {imgsz_test} test\n'
                f'Using {dataloader.num_workers} dataloader workers\n'
                f'Logging results to {save_dir}\n'
                f'Starting training for {epochs} epochs...')
    for epoch in range(start_epoch, epochs):  # epoch ------------------------------------------------------------------
        model.train()

# Update image weights (optional)
        if opt.image_weights:
            # Generate indices
            if rank in [-1, 0]:
                cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc  # class weights
                iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw)  # image weights
                dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n)  # rand weighted idx
            # Broadcast if DDP
            if rank != -1:
                indices = (torch.tensor(dataset.indices) if rank == 0 else torch.zeros(dataset.n)).int()
                dist.broadcast(indices, 0)
                if rank != 0:
                    dataset.indices = indices.cpu().numpy()

# Update mosaic border
        # b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs)
        # dataset.mosaic_border = [b - imgsz, -b]  # height, width borders

mloss = torch.zeros(4, device=device)  # mean losses
        if rank != -1:
            dataloader.sampler.set_epoch(epoch)
        pbar = enumerate(dataloader)
        logger.info(('\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'total', 'targets', 'img_size'))
        if rank in [-1, 0]:
            pbar = tqdm(pbar, total=nb)  # progress bar
        optimizer.zero_grad()
        for i, (imgs, targets, paths, _) in pbar:  # batch -------------------------------------------------------------
            ni = i + nb * epoch  # number integrated batches (since train start)
            imgs = imgs.to(device, non_blocking=True).float() / 255.0  # uint8 to float32, 0-255 to 0.0-1.0

# Warmup
            if ni <= nw:
                xi = [0, nw]  # x interp
                # model.gr = np.interp(ni, xi, [0.0, 1.0])  # iou loss ratio (obj_loss = 1.0 or iou)
                accumulate = max(1, np.interp(ni, xi, [1, nbs / total_batch_size]).round())
                for j, x in enumerate(optimizer.param_groups):
                    # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
                    x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 2 else 0.0, x['initial_lr'] * lf(epoch)])
                    if 'momentum' in x:
                        x['momentum'] = np.interp(ni, xi, [hyp['warmup_momentum'], hyp['momentum']])

# Multi-scale
            if opt.multi_scale:
                sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs  # size
                sf = sz / max(imgs.shape[2:])  # scale factor
                if sf != 1:
                    ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]]  # new shape (stretched to gs-multiple)
                    imgs = F.interpolate(imgs, size=ns, mode='bilinear', align_corners=False)

# Forward
            with amp.autocast(enabled=cuda):
                pred = model(imgs)  # forward
                loss, loss_items = compute_loss(pred, targets.to(device))  # loss scaled by batch_size
                if rank != -1:
                    loss *= opt.world_size  # gradient averaged between devices in DDP mode
                if opt.quad:
                    loss *= 4.

# Backward
            scaler.scale(loss).backward()

# Optimize
            if ni % accumulate == 0:
                scaler.step(optimizer)  # optimizer.step
                scaler.update()
                optimizer.zero_grad()
                if ema:
                    ema.update(model)

# Print
            if rank in [-1, 0]:
                mloss = (mloss * i + loss_items) / (i + 1)  # update mean losses
                mem = '%.3gG' % (torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0)  # (GB)
                s = ('%10s' * 2 + '%10.4g' * 6) % (
                    '%g/%g' % (epoch, epochs - 1), mem, *mloss, targets.shape[0], imgs.shape[-1])
                pbar.set_description(s)

# Plot
                if plots and ni < 3:
                    f = save_dir / f'train_batch{ni}.jpg'  # filename
                    Thread(target=plot_images, args=(imgs, targets, paths, f), daemon=True).start()
                    # if tb_writer:
                    #     tb_writer.add_image(f, result, dataformats='HWC', global_step=epoch)
                    #     tb_writer.add_graph(model, imgs)  # add model to tensorboard
                elif plots and ni == 10 and wandb:
                    wandb.log({"Mosaics": [wandb.Image(str(x), caption=x.name) for x in save_dir.glob('train*.jpg')
                                           if x.exists()]}, commit=False)

# end batch ------------------------------------------------------------------------------------------------
        # end epoch ----------------------------------------------------------------------------------------------------

# Scheduler
        lr = [x['lr'] for x in optimizer.param_groups]  # for tensorboard
        scheduler.step()

# DDP process 0 or single-GPU
        if rank in [-1, 0]:
            # mAP
            if ema:
                ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'gr', 'names', 'stride', 'class_weights'])
            final_epoch = epoch + 1 == epochs
            if not opt.notest or final_epoch:  # Calculate mAP
                results, maps, times = test.test(opt.data,
                                                 batch_size=total_batch_size,
                                                 imgsz=imgsz_test,
                                                 model=ema.ema,
                                                 single_cls=opt.single_cls,
                                                 dataloader=testloader,
                                                 save_dir=save_dir,
                                                 verbose=nc < 50 and final_epoch,
                                                 plots=plots and final_epoch,
                                                 log_imgs=opt.log_imgs if wandb else 0,
                                                 compute_loss=compute_loss)

# Write
            with open(results_file, 'a') as f:
                f.write(s + '%10.4g' * 7 % results + '\n')  # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls)
            if len(opt.name) and opt.bucket:
                os.system('gsutil cp %s gs://%s/results/results%s.txt' % (results_file, opt.bucket, opt.name))

# Log
            tags = ['train/box_loss', 'train/obj_loss', 'train/cls_loss',  # train loss
                    'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95',
                    'val/box_loss', 'val/obj_loss', 'val/cls_loss',  # val loss
                    'x/lr0', 'x/lr1', 'x/lr2']  # params
            for x, tag in zip(list(mloss[:-1]) + list(results) + lr, tags):
                if tb_writer:
                    tb_writer.add_scalar(tag, x, epoch)  # tensorboard
                if wandb:
                    wandb.log({tag: x}, step=epoch, commit=tag == tags[-1])  # W&B

# Update best mAP
            fi = fitness(np.array(results).reshape(1, -1))  # weighted combination of [P, R, mAP@.5, mAP@.5-.95]
            if fi > best_fitness:
                best_fitness = fi

# Save model
            save = (not opt.nosave) or (final_epoch and not opt.evolve)
            if save:
                with open(results_file, 'r') as f:  # create checkpoint
                    ckpt = {'epoch': epoch,
                            'best_fitness': best_fitness,
                            'training_results': f.read(),
                            'model': ema.ema,
                            'optimizer': None if final_epoch else optimizer.state_dict(),
                            'wandb_id': wandb_run.id if wandb else None}

# Save last, best and delete
                torch.save(ckpt, last)
                if best_fitness == fi:
                    torch.save(ckpt, best)
                del ckpt
        # end epoch ----------------------------------------------------------------------------------------------------
    # end training

if rank in [-1, 0]:
        # Strip optimizers
        final = best if best.exists() else last  # final model
        for f in [last, best]:
            if f.exists():
                strip_optimizer(f)  # strip optimizers
        if opt.bucket:
            os.system(f'gsutil cp {final} gs://{opt.bucket}/weights')  # upload

# Plots
        if plots:
            plot_results(save_dir=save_dir)  # save as results.png
            if wandb:
                files = ['results.png', 'confusion_matrix.png', *[f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R')]]
                wandb.log({"Results": [wandb.Image(str(save_dir / f), caption=f) for f in files
                                       if (save_dir / f).exists()]})
                if opt.log_artifacts:
                    wandb.log_artifact(artifact_or_path=str(final), type='model', name=save_dir.stem)

# Test best.pt
        logger.info('%g epochs completed in %.3f hours.\n' % (epoch - start_epoch + 1, (time.time() - t0) / 3600))
        if opt.data.endswith('coco.yaml') and nc == 80:  # if COCO
            for conf, iou, save_json in ([0.25, 0.45, False], [0.001, 0.65, True]):  # speed, mAP tests
                results, _, _ = test.test(opt.data,
                                          batch_size=total_batch_size,
                                          imgsz=imgsz_test,
                                          conf_thres=conf,
                                          iou_thres=iou,
                                          model=attempt_load(final, device).half(),
                                          single_cls=opt.single_cls,
                                          dataloader=testloader,
                                          save_dir=save_dir,
                                          save_json=save_json,
                                          plots=False)

else:
        dist.destroy_process_group()

wandb.run.finish() if wandb and wandb.run else None
    torch.cuda.empty_cache()
    return results

if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--weights', type=str, default='yolov5s.pt', help='initial weights path')
    parser.add_argument('--cfg', type=str, default='', help='model.yaml path')
    parser.add_argument('--data', type=str, default='data/tube.yaml', help='data.yaml path')
    parser.add_argument('--hyp', type=str, default='data/hyp.scratch.yaml', help='hyperparameters path')
    parser.add_argument('--epochs', type=int, default=100)
    parser.add_argument('--batch-size', type=int, default=8, help='total batch size for all GPUs')
    parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='[train, test] image sizes')
    parser.add_argument('--rect', action='store_true', help='rectangular training')
    parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training')
    parser.add_argument('--nosave', action='store_true', help='only save final checkpoint')
    parser.add_argument('--notest', action='store_true', help='only test final epoch')
    parser.add_argument('--noautoanchor', action='store_true', help='disable autoanchor check')
    parser.add_argument('--evolve', action='store_true', help='evolve hyperparameters')
    parser.add_argument('--bucket', type=str, default='', help='gsutil bucket')
    parser.add_argument('--cache-images', action='store_true', help='cache images for faster training')
    parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training')
    parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
    parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%')
    parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class')
    parser.add_argument('--adam', action='store_true', help='use torch.optim.Adam() optimizer')
    parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode')
    parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify')
    parser.add_argument('--log-imgs', type=int, default=16, help='number of images for W&B logging, max 100')
    parser.add_argument('--log-artifacts', action='store_true', help='log artifacts, i.e. final trained model')
    parser.add_argument('--workers', type=int, default=0, help='maximum number of dataloader workers')
    parser.add_argument('--project', default='runs/train', help='save to project/name')
    parser.add_argument('--name', default='exp', help='save to project/name')
    parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
    parser.add_argument('--quad', action='store_true', help='quad dataloader')
    opt = parser.parse_args()

# Set DDP variables
    opt.world_size = int(os.environ['WORLD_SIZE']) if 'WORLD_SIZE' in os.environ else 1
    opt.global_rank = int(os.environ['RANK']) if 'RANK' in os.environ else -1
    set_logging(opt.global_rank)
    if opt.global_rank in [-1, 0]:
        check_git_status()
        check_requirements()

# Resume
    if opt.resume:  # resume an interrupted run
        ckpt = opt.resume if isinstance(opt.resume, str) else get_latest_run()  # specified or most recent path
        assert os.path.isfile(ckpt), 'ERROR: --resume checkpoint does not exist'
        apriori = opt.global_rank, opt.local_rank
        with open(Path(ckpt).parent.parent / 'opt.yaml') as f:
            opt = argparse.Namespace(**yaml.load(f, Loader=yaml.SafeLoader))  # replace
        opt.cfg, opt.weights, opt.resume, opt.batch_size, opt.global_rank, opt.local_rank = '', ckpt, True, opt.total_batch_size, *apriori  # reinstate
        logger.info('Resuming training from %s' % ckpt)
    else:
        # opt.hyp = opt.hyp or ('hyp.finetune.yaml' if opt.weights else 'hyp.scratch.yaml')
        opt.data, opt.cfg, opt.hyp = check_file(opt.data), check_file(opt.cfg), check_file(opt.hyp)  # check files
        assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified'
        opt.img_size.extend([opt.img_size[-1]] * (2 - len(opt.img_size)))  # extend to 2 sizes (train, test)
        opt.name = 'evolve' if opt.evolve else opt.name
        opt.save_dir = increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok | opt.evolve)  # increment run

# DDP mode
    opt.total_batch_size = opt.batch_size
    device = select_device(opt.device, batch_size=opt.batch_size)
    if opt.local_rank != -1:
        assert torch.cuda.device_count() > opt.local_rank
        torch.cuda.set_device(opt.local_rank)
        device = torch.device('cuda', opt.local_rank)
        dist.init_process_group(backend='nccl', init_method='env://')  # distributed backend
        assert opt.batch_size % opt.world_size == 0, '--batch-size must be multiple of CUDA device count'
        opt.batch_size = opt.total_batch_size // opt.world_size

# Hyperparameters
    with open(opt.hyp) as f:
        hyp = yaml.load(f, Loader=yaml.SafeLoader)  # load hyps

# Train
    logger.info(opt)
    # try:
    #     #import wandb
    # except ImportError:
    wandb = None
    prefix = colorstr('wandb: ')
    logger.info(f"{prefix}Install Weights & Biases for YOLOv5 logging with 'pip install wandb' (recommended)")
    if not opt.evolve:
        tb_writer = None  # init loggers
        if opt.global_rank in [-1, 0]:
            logger.info(f'Start Tensorboard with "tensorboard --logdir {opt.project}", view at http://localhost:6006/')
            tb_writer = SummaryWriter(opt.save_dir)  # Tensorboard
        train(hyp, opt, device, tb_writer, wandb)

# Evolve hyperparameters (optional)
    else:
        # Hyperparameter evolution metadata (mutation scale 0-1, lower_limit, upper_limit)
        meta = {'lr0': (1, 1e-5, 1e-1),  # initial learning rate (SGD=1E-2, Adam=1E-3)
                'lrf': (1, 0.01, 1.0),  # final OneCycleLR learning rate (lr0 * lrf)
                'momentum': (0.3, 0.6, 0.98),  # SGD momentum/Adam beta1
                'weight_decay': (1, 0.0, 0.001),  # optimizer weight decay
                'warmup_epochs': (1, 0.0, 5.0),  # warmup epochs (fractions ok)
                'warmup_momentum': (1, 0.0, 0.95),  # warmup initial momentum
                'warmup_bias_lr': (1, 0.0, 0.2),  # warmup initial bias lr
                'box': (1, 0.02, 0.2),  # box loss gain
                'cls': (1, 0.2, 4.0),  # cls loss gain
                'cls_pw': (1, 0.5, 2.0),  # cls BCELoss positive_weight
                'obj': (1, 0.2, 4.0),  # obj loss gain (scale with pixels)
                'obj_pw': (1, 0.5, 2.0),  # obj BCELoss positive_weight
                'iou_t': (0, 0.1, 0.7),  # IoU training threshold
                'anchor_t': (1, 2.0, 8.0),  # anchor-multiple threshold
                'anchors': (2, 2.0, 10.0),  # anchors per output grid (0 to ignore)
                'fl_gamma': (0, 0.0, 2.0),  # focal loss gamma (efficientDet default gamma=1.5)
                'hsv_h': (1, 0.0, 0.1),  # image HSV-Hue augmentation (fraction)
                'hsv_s': (1, 0.0, 0.9),  # image HSV-Saturation augmentation (fraction)
                'hsv_v': (1, 0.0, 0.9),  # image HSV-Value augmentation (fraction)
                'degrees': (1, 0.0, 45.0),  # image rotation (+/- deg)
                'translate': (1, 0.0, 0.9),  # image translation (+/- fraction)
                'scale': (1, 0.0, 0.9),  # image scale (+/- gain)
                'shear': (1, 0.0, 10.0),  # image shear (+/- deg)
                'perspective': (0, 0.0, 0.001),  # image perspective (+/- fraction), range 0-0.001
                'flipud': (1, 0.0, 1.0),  # image flip up-down (probability)
                'fliplr': (0, 0.0, 1.0),  # image flip left-right (probability)
                'mosaic': (1, 0.0, 1.0),  # image mixup (probability)
                'mixup': (1, 0.0, 1.0)}  # image mixup (probability)

assert opt.local_rank == -1, 'DDP mode not implemented for --evolve'
        opt.notest, opt.nosave = True, True  # only test/save final epoch
        # ei = [isinstance(x, (int, float)) for x in hyp.values()]  # evolvable indices
        yaml_file = Path(opt.save_dir) / 'hyp_evolved.yaml'  # save best result here
        if opt.bucket:
            os.system('gsutil cp gs://%s/evolve.txt .' % opt.bucket)  # download evolve.txt if exists

for _ in range(300):  # generations to evolve
            if Path('evolve.txt').exists():  # if evolve.txt exists: select best hyps and mutate
                # Select parent(s)
                parent = 'single'  # parent selection method: 'single' or 'weighted'
                x = np.loadtxt('evolve.txt', ndmin=2)
                n = min(5, len(x))  # number of previous results to consider
                x = x[np.argsort(-fitness(x))][:n]  # top n mutations
                w = fitness(x) - fitness(x).min()  # weights
                if parent == 'single' or len(x) == 1:
                    # x = x[random.randint(0, n - 1)]  # random selection
                    x = x[random.choices(range(n), weights=w)[0]]  # weighted selection
                elif parent == 'weighted':
                    x = (x * w.reshape(n, 1)).sum(0) / w.sum()  # weighted combination

# Mutate
                mp, s = 0.8, 0.2  # mutation probability, sigma
                npr = np.random
                npr.seed(int(time.time()))
                g = np.array([x[0] for x in meta.values()])  # gains 0-1
                ng = len(meta)
                v = np.ones(ng)
                while all(v == 1):  # mutate until a change occurs (prevent duplicates)
                    v = (g * (npr.random(ng) < mp) * npr.randn(ng) * npr.random() * s + 1).clip(0.3, 3.0)
                for i, k in enumerate(hyp.keys()):  # plt.hist(v.ravel(), 300)
                    hyp[k] = float(x[i + 7] * v[i])  # mutate

# Constrain to limits
            for k, v in meta.items():
                hyp[k] = max(hyp[k], v[1])  # lower limit
                hyp[k] = min(hyp[k], v[2])  # upper limit
                hyp[k] = round(hyp[k], 5)  # significant digits

# Train mutation
            results = train(hyp.copy(), opt, device, wandb=wandb)

# Write mutation results
            print_mutation(hyp.copy(), results, yaml_file, opt.bucket)

# Plot results
        plot_evolution(yaml_file)
        print(f'Hyperparameter evolution complete. Best results saved as: {yaml_file}\n'
              f'Command to train a new model with these hyperparameters: $ python train.py --hyp {yaml_file}')

#6 detect.py

"""Run inference with a YOLOv5 model on images, videos, directories, streamsUsage:$ python path/to/detect.py --source path/to/img.jpg --weights yolov5s.pt --img 640"""import argparseimport sysimport timefrom pathlib import Pathimport cv2import torchimport torch.backends.cudnn as cudnnFILE = Path(__file__).absolute()sys.path.append(FILE.parents[0].as_posix())  # add yolov5/ to pathfrom models.experimental import attempt_loadfrom utils.datasets import LoadStreams, LoadImagesfrom utils.general import check_img_size, check_requirements, check_imshow, colorstr, non_max_suppression, \apply_classifier, scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path, save_one_boxfrom utils.plots import colors, plot_one_boxfrom utils.torch_utils import select_device, load_classifier, time_sync@torch.no_grad()def run(weights='E:/555/555/yolov5-master/runs/train/exp24/weights/best.pt',  # model.pt path(s),E:/555/555/yolov5-master/runs/train/exp24/weights/best.pt  默认: yolov5s.ptsource='data/images',  # file/dir/URL/glob, 0 for webcamimgsz=640,  # inference size (pixels)conf_thres=0.25,  # confidence thresholdiou_thres=0.45,  # NMS IOU thresholdmax_det=1000,  # maximum detections per imagedevice='',  # cuda device, i.e. 0 or 0,1,2,3 or cpu 默认使用GPU,选择CPU时那么会在CPU设备检测view_img=True,  # show results 显示检测的结果save_txt=False,  # save results to *.txtsave_conf=False,  # save confidences in --save-txt labelssave_crop=False,  # save cropped prediction boxesnosave=False,  # do not save images/videosclasses=None,  # filter by class: --class 0, or --class 0 2 3agnostic_nms=False,  # class-agnostic NMSaugment=False,  # augmented inferencevisualize=False,  # visualize featuresupdate=False,  # update all modelsproject='runs/detect',  # save results to project/namename='exp',  # save results to project/nameexist_ok=False,  # existing project/name ok, do not incrementline_thickness=3,  # bounding box thickness (pixels)hide_labels=False,  # hide labelshide_conf=False,  # hide confidenceshalf=False,  # use FP16 half-precision inference):"""opt参数详解weights:测试的模型权重文件data:数据集配置文件,数据集路径,类名等batch-size:前向传播时的批次, 默认32img-size:输入图片分辨率大小, 默认640conf-thres:筛选框的时候的置信度阈值, 默认0.001iou-thres:进行NMS的时候的IOU阈值, 默认0.65save-json:是否按照coco的json格式保存预测框,并且使用cocoapi做评估(需要同样coco的json格式的标签), 默认Falsetask:设置测试形式, 默认val, 具体可看下面代码解析注释device:测试的设备,cpu;0(表示一个gpu设备cuda:0);0,1,2,3(多个gpu设备)single-cls:数据集是否只有一个类别,默认Falseaugment:测试时是否使用TTA(test time augmentation), 默认Falsemerge:在进行NMS时,是否通过合并方式获得预测框, 默认Falseverbose:是否打印出每个类别的mAP, 默认Falsesave-txt:是否以txt文件的形式保存模型预测的框坐标, 默认False"""save_img = not nosave and not source.endswith('.txt')  # save inference imageswebcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://'))# Directoriessave_dir = increment_path(Path(project) / name, exist_ok=exist_ok)  # increment run(save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True)  # make dir# Initializeset_logging()device = select_device(device)half &= device.type != 'cpu'  # half precision only supported on CUDA# Load modelw = weights[0] if isinstance(weights, list) else weightsclassify, pt, onnx = False, w.endswith('.pt'), w.endswith('.onnx')  # inference typestride, names = 64, [f'class{i}' for i in range(1000)]  # assign defaultsif pt:model = attempt_load(weights, map_location=device)  # load FP32 modelstride = int(model.stride.max())  # model stridenames = model.module.names if hasattr(model, 'module') else model.names  # get class namesif half:model.half()  # to FP16if classify:  # second-stage classifiermodelc = load_classifier(name='resnet50', n=2)  # initializemodelc.load_state_dict(torch.load('resnet50.pt', map_location=device)['model']).to(device).eval()elif onnx:check_requirements(('onnx', 'onnxruntime'))import onnxruntimesession = onnxruntime.InferenceSession(w, None)imgsz = check_img_size(imgsz, s=stride)  # check image size# Dataloaderif webcam:view_img = check_imshow()cudnn.benchmark = True  # set True to speed up constant image size inferencedataset = LoadStreams(source, img_size=imgsz, stride=stride)bs = len(dataset)  # batch_sizeelse:dataset = LoadImages(source, img_size=imgsz, stride=stride)bs = 1  # batch_sizevid_path, vid_writer = [None] * bs, [None] * bs# Run inferenceif pt and device.type != 'cpu':model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters())))  # run oncet0 = time.time()for path, img, im0s, vid_cap in dataset:if pt:img = torch.from_numpy(img).to(device)img = img.half() if half else img.float()  # uint8 to fp16/32elif onnx:img = img.astype('float32')img /= 255.0  # 0 - 255 to 0.0 - 1.0if len(img.shape) == 3:img = img[None]  # expand for batch dim# Inferencet1 = time_sync()if pt:visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else Falsepred = model(img, augment=augment, visualize=visualize)[0]elif onnx:pred = torch.tensor(session.run([session.get_outputs()[0].name], {session.get_inputs()[0].name: img}))# NMSpred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det)t2 = time_sync()# Second-stage classifier (optional)if classify:pred = apply_classifier(pred, modelc, img, im0s)# Process predictionsfor i, det in enumerate(pred):  # detections per imageif webcam:  # batch_size >= 1p, s, im0, frame = path[i], f'{i}: ', im0s[i].copy(), dataset.countelse:p, s, im0, frame = path, '', im0s.copy(), getattr(dataset, 'frame', 0)p = Path(p)  # to Pathsave_path = str(save_dir / p.name)  # img.jpgtxt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}')  # img.txts += '%gx%g ' % img.shape[2:]  # print stringgn = torch.tensor(im0.shape)[[1, 0, 1, 0]]  # normalization gain whwhimc = im0.copy() if save_crop else im0  # for save_cropif len(det):# Rescale boxes from img_size to im0 sizedet[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()# Print resultsfor c in det[:, -1].unique():n = (det[:, -1] == c).sum()  # detections per classs += f"{n} {names[int(c)]}{'s' * (n > 1)}, "  # add to string# Write resultsfor *xyxy, conf, cls in reversed(det):if save_txt:  # Write to filexywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist()  # normalized xywhline = (cls, *xywh, conf) if save_conf else (cls, *xywh)  # label formatwith open(txt_path + '.txt', 'a') as f:f.write(('%g ' * len(line)).rstrip() % line + '\n')if save_img or save_crop or view_img:  # Add bbox to imagec = int(cls)  # integer classlabel = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}')plot_one_box(xyxy, im0, label=label, color=colors(c, True), line_thickness=line_thickness)if save_crop:save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True)# Print time (inference + NMS)print(f'{s}Done. ({t2 - t1:.3f}s)')# Stream resultsif view_img:cv2.imshow(str(p), im0)cv2.waitKey(1)  # 1 millisecond# Save results (image with detections)if save_img:if dataset.mode == 'image':cv2.imwrite(save_path, im0)else:  # 'video' or 'stream'if vid_path[i] != save_path:  # new videovid_path[i] = save_pathif isinstance(vid_writer[i], cv2.VideoWriter):vid_writer[i].release()  # release previous video writerif vid_cap:  # videofps = vid_cap.get(cv2.CAP_PROP_FPS)w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))else:  # streamfps, w, h = 30, im0.shape[1], im0.shape[0]save_path += '.mp4'vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))vid_writer[i].write(im0)if save_txt or save_img:s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''print(f"Results saved to {colorstr('bold', save_dir)}{s}")if update:strip_optimizer(weights)  # update model (to fix SourceChangeWarning)print(f'Done. ({time.time() - t0:.3f}s)')def parse_opt():#这里参数可以修改parser = argparse.ArgumentParser()parser.add_argument('--weights', nargs='+', type=str, default='E:/555/555/yolov5-master/runs/train/exp24/weights/best.pt', help='model.pt path(s)') #打开训练完的自己数据集(best.pt)路径: E:/555/555/yolov5-master/runs/train/exp14/weights/best.ptparser.add_argument('--source', type=str, default='0', help='file/dir/URL/glob, 0 for webcam')#data/images,打开普通照片路径:E:/555/555/mydata3/test,打开摄像头:0parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)')parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold')parser.add_argument('--iou-thres', type=float, default=0.45, help='NMS IoU threshold')parser.add_argument('--max-det', type=int, default=1000, help='maximum detections per image')parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')parser.add_argument('--view-img', action='store_true', help='show results')parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')parser.add_argument('--save-crop', action='store_true', help='save cropped prediction boxes')parser.add_argument('--nosave', action='store_true', help='do not save images/videos')parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3')parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')parser.add_argument('--augment', action='store_true', help='augmented inference')parser.add_argument('--visualize', action='store_true', help='visualize features')parser.add_argument('--update', action='store_true', help='update all models')parser.add_argument('--project', default='runs/detect', help='save results to project/name')parser.add_argument('--name', default='exp', help='save results to project/name')parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')parser.add_argument('--line-thickness', default=3, type=int, help='bounding box thickness (pixels)')parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels')parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences')parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')opt = parser.parse_args()return optdef main(opt):print(colorstr('detect: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items()))check_requirements(exclude=('tensorboard', 'thop'))run(**vars(opt))if __name__ == "__main__":opt = parse_opt()main(opt)

#7 Video.py(每间隔3秒对应的帧数采集照片并yolov5检测最后保存)

import timeimport cv2import numpy as npfrom PIL import Imageimport detect# from detect import parse_opt #这里的detect是detect.py(yolov5的检测)文件,parse_opt表示检测参数的设置# detect = parse_optcapture = cv2.VideoCapture(0)# capture=cv2.VideoCapture("D:/1.mp4")if capture.isOpened():ref, frame = capture.read()else:ref = Falsefps = 0.0timeF = 420   #yolov5每秒140帧(FPS)c = 1while ref:t1 = time.time()# 读取某一帧ref, frame = capture.read()# 此处保存图片无检测结果,用于采集训练数据和测试摄像头是否清晰稳定if (c % timeF == 0):fps = (fps + (1. / (time.time() - t1))) / 2print("fps= %.2f" % (fps))frame = cv2.putText(frame, "fps= %.2f" % (fps), (0, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)# 采集到每隔420帧的图像,保存到/runs/img./in/cv2.imwrite("./runs/img/in/" + str(c) + '.jpg', frame)# 将采集到的/runs/img./in/图像输入detect检测,结果保存在/runs/img/outdetect.run(source="./runs/img/in/" + str(c) + '.jpg', name='../img/out/photo')c += 1# print(frame)## # 格式转变,BGRtoRGB# frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)# # 转变成Image# frame = Image.fromarray(np.uint8(frame))# # 进行检测# frame = np.array(detect.run(source=frame))# # RGBtoBGR满足opencv显示格式# frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)## # 此处保存的图片有检测结果,用于保留检测结果# if (c % timeF == 0):#     cv2.imwrite("./runs/" + str(c) + '.jpg', frame)# c += 1## fps = (fps + (1. / (time.time() - t1))) / 2# print("fps= %.2f" % (fps))# frame = cv2.putText(frame, "fps= %.2f" % (fps), (0, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)# 此处保存的图片上既有检测结果,也有fps值,用于监测不同fps下的检测结果# if (c % timeF == 0):#    cv2.imwrite("D:/photo/" + str(c) + '.jpg', frame)# c += 1# 显示摄像头cv2.imshow("video", frame)k = cv2.waitKey(1)# 按q退出if k == ord('q'):capture.release()break# 按ESC退出k = cv2.waitKey(1)if k == 27:capture.release()break

基于YOLOV5动态检测19种类别相关推荐

  1. 基于深度学习的犬种识别软件(YOLOv5清新界面版,Python代码)

    摘要:基于深度学习的犬种识别软件用于识别常见多个犬品种,基于YOLOv5算法检测犬种,并通过界面显示记录和管理,智能辅助人们辨别犬种.本文详细介绍博主自主开发的犬种检测系统,在介绍算法原理的同时,给出 ...

  2. 基于YOLOv5的目标检测系统详解(附MATLAB GUI版代码)

    摘要:本文重点介绍了基于YOLOv5目标检测系统的MATLAB实现,用于智能检测物体种类并记录和保存结果,对各种物体检测结果可视化,提高目标识别的便捷性和准确性.本文详细阐述了目标检测系统的原理,并给 ...

  3. 基于YOLOv5和U-NET的火灾检测与分割

    1.文章信息 本次介绍的文章是发表在EUSIPCO 2021的一篇基于计算机视觉的火灾检测文章. 2.摘要 当今世界面临的环境危机是对人类的真正挑战.对人类和自然的一个显著危害是森林火灾的数量不断增加 ...

  4. 目标检测论文解读复现之十五:基于YOLOv5的光学遥感图像舰船 目标检测算法

    前言 此前出了目标改进算法专栏,但是对于应用于什么场景,需要什么改进方法对应与自己的应用场景有效果,并且多少改进点能发什么水平的文章,为解决大家的困惑,此系列文章旨在给大家解读最新目标检测算法论文,帮 ...

  5. 基于yolov8的检测分割跟踪软件系统(含Pyqt界面,附下载链接和演示视频,集成四种多目标跟踪算法,模型已训好)

    1.前言 本文重点介绍了基于YOLOv8目标检测分割跟踪系统的代码实现,用于智能检测物体种类并记录和保存结果,对各种物体检测结果可视化,提高目标识别的便捷性和准确性.数据集采用COCO,即可针对COC ...

  6. 基于YOLOv5的口罩佩戴检测方法

    摘 要 正确的佩戴口罩对现阶段有效减低人员之间感染新型冠状病毒具有重要意义.基于YOLOv5在图像识别检测领域的优异性能,本文研究基于基于YOLOv5的口罩佩自动戴检测方法.首先从网络和真实生活中中寻 ...

  7. 基于YOLOv5的舰船检测与识别系统(Python+清新界面+数据集)

    摘要:基于YOLOv5的舰船检测与识别系统用于识别包括渔船.游轮等多种海上船只类型,检测船舰目标并进行识别计数,以提供海洋船只的自动化监测和管理.本文详细介绍船舰类型识别系统,在介绍算法原理的同时,给 ...

  8. 基于YOLOv5的停车位检测系统(清新UI+深度学习+训练数据集)

    摘要:基于YOLOv5的停车位检测系统用于露天停车场车位检测,应用深度学习技术检测停车位是否占用,以辅助停车场对车位进行智能化管理.在介绍算法原理的同时,给出Python的实现代码.训练数据集以及Py ...

  9. 基于yolov5的PCB缺陷检测

    1.简述 基于图像识别技术实现自动印刷电路板(PCB)缺陷检测,具有高效.准确.可靠.自动化程度高和可扩展性强等优点,大大提高了检测效率,节省了人力成本.传统的检测方法通常需要大量的人力和时间,并且容 ...

最新文章

  1. sklearn使用投票器VotingClassifier算法构建多模型融合的硬投票器分类器(hard voting)并计算融合模型的混淆矩阵、可视化混淆矩阵(confusion matrix)
  2. Thymeleaf中使用select进行消息回显时提示:Exception evaluaating SpringEL expression
  3. jsf netbeans_NetBeans Java EE技巧9:从数据库创建JSF应用程序
  4. markdown引入代码_第 09 篇:让博客支持 Markdown 语法和代码高亮
  5. 《推荐系统笔记(十四)》矩阵分解(MF)以及基于矩阵分解的topN推荐
  6. typescript之初学习
  7. Flutter高级第7篇:点击穿透问题、页面禁止左右滑动
  8. 【测绘程序设计】C#将度分秒(° ‘ “)转换度(°)程序实现(附源码)
  9. JavaScript注释
  10. 统计|如何理解两个总体均值之差的区间估计的计算
  11. druid数据源检测数据库连接有效性testOnBorrow、testOnReturn、testWhileIdle属性原理分析
  12. 小学生测验(Java)
  13. html中背景条纹效果,使用CSS线性渐变 制作条纹背景
  14. SQLServer常用的字符串函数
  15. 【强化学习论文合集】三十三.2021国际人工智能联合会议论文(IJCAI2021)
  16. 裴蜀定理与扩展欧几里德算法
  17. Photoshop插件-秋色效果-脚本开发-PS插件
  18. ArtFin艺术飯+「雅贡臻品」与版权猫IP猫ipmall登陆台北!AiHi@HiFi++
  19. Visual Studio 2019 性能探查器使用
  20. 计算机基础知识(二)-word 2016

热门文章

  1. Sql 格式化时间格式为 MM-dd hh:mm
  2. 量化交易如何建立高效的交易系统(三)
  3. 西藏最新建筑八大员(机械员)模拟真题集及答案解析
  4. 对勾函数_对勾函数讲解与例题解析
  5. 纪念日页面变灰是怎么实现的( filter: grayscale(100%);)
  6. 全球首发!老子云支持三维格式多达66种,可解决95%模型展示难题
  7. 轨迹聚类光谱分析_光谱聚类
  8. 06TypeScript:项目实战—贪吃蛇
  9. java计算机毕业设计学生就业创业管理系统源程序+mysql+系统+lw文档+远程调试
  10. 9 个爱不释手的 JSON 工具