diff --git "a/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/.keep" "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/.keep" new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git "a/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/detect.py" "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/detect.py" new file mode 100644 index 0000000000000000000000000000000000000000..70c52dc5214b63700bfa5e4c0f5530cc996b2365 --- /dev/null +++ "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/detect.py" @@ -0,0 +1,307 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Run inference on images, videos, directories, streams, etc. + +Usage: + $ python path/to/detect.py --source path/to/img.jpg --weights yolov5s.pt --img 640 +""" + +import argparse +import os +import sys +from pathlib import Path + +import cv2 +import numpy as np +import torch +import torch.backends.cudnn as cudnn + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[0] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH +ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative + +from models.experimental import attempt_load +from utils.datasets import LoadImages, LoadStreams +from utils.general import apply_classifier, check_img_size, check_imshow, check_requirements, check_suffix, colorstr, \ + increment_path, non_max_suppression, print_args, save_one_box, scale_coords, set_logging, \ + strip_optimizer, xyxy2xywh +from utils.plots import Annotator, colors +from utils.torch_utils import load_classifier, select_device, time_sync + + +@torch.no_grad() +def run(weights=ROOT / 'yolov5s.pt', # model.pt path(s) + source=ROOT / 'data/images', # file/dir/URL/glob, 0 for webcam + imgsz=640, # inference size (pixels) + conf_thres=0.25, # confidence threshold + iou_thres=0.45, # NMS IOU threshold + max_det=1000, # maximum detections per image + device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu + view_img=False, # show results + save_txt=False, # save results to *.txt + save_conf=False, # save confidences in --save-txt labels + save_crop=False, # save cropped prediction boxes + nosave=False, # do not save images/videos + classes=None, # filter by class: --class 0, or --class 0 2 3 + agnostic_nms=False, # class-agnostic NMS + augment=False, # augmented inference + visualize=False, # visualize features + update=False, # update all models + project=ROOT / 'runs/detect', # save results to project/name + name='exp', # save results to project/name + exist_ok=False, # existing project/name ok, do not increment + line_thickness=3, # bounding box thickness (pixels) + hide_labels=False, # hide labels + hide_conf=False, # hide confidences + half=False, # use FP16 half-precision inference + dnn=False, # use OpenCV DNN for ONNX inference + ): + source = str(source) + save_img = not nosave and not source.endswith('.txt') # save inference images + webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith( + ('rtsp://', 'rtmp://', 'http://', 'https://')) + + # Directories + save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run + (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir + + # Initialize + set_logging() + device = select_device(device) + half &= device.type != 'cpu' # half precision only supported on CUDA + + # Load model + w = str(weights[0] if isinstance(weights, list) else weights) + classify, suffix, suffixes = False, Path(w).suffix.lower(), ['.pt', '.onnx', '.tflite', '.pb', ''] + check_suffix(w, suffixes) # check weights have acceptable suffix + pt, onnx, tflite, pb, saved_model = (suffix == x for x in suffixes) # backend booleans + stride, names = 64, [f'class{i}' for i in range(1000)] # assign defaults + if pt: + model = torch.jit.load(w) if 'torchscript' in w else attempt_load(weights, map_location=device) + stride = int(model.stride.max()) # model stride + names = model.module.names if hasattr(model, 'module') else model.names # get class names + if half: + model.half() # to FP16 + if classify: # second-stage classifier + modelc = load_classifier(name='resnet50', n=2) # initialize + modelc.load_state_dict(torch.load('resnet50.pt', map_location=device)['model']).to(device).eval() + elif onnx: + if dnn: + check_requirements(('opencv-python>=4.5.4',)) + net = cv2.dnn.readNetFromONNX(w) + else: + check_requirements(('onnx', 'onnxruntime-gpu' if torch.has_cuda else 'onnxruntime')) + import onnxruntime + session = onnxruntime.InferenceSession(w, None) + else: # TensorFlow models + check_requirements(('tensorflow>=2.4.1',)) + import tensorflow as tf + if pb: # https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt + def wrap_frozen_graph(gd, inputs, outputs): + x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=""), []) # wrapped import + return x.prune(tf.nest.map_structure(x.graph.as_graph_element, inputs), + tf.nest.map_structure(x.graph.as_graph_element, outputs)) + + graph_def = tf.Graph().as_graph_def() + graph_def.ParseFromString(open(w, 'rb').read()) + frozen_func = wrap_frozen_graph(gd=graph_def, inputs="x:0", outputs="Identity:0") + elif saved_model: + model = tf.keras.models.load_model(w) + elif tflite: + interpreter = tf.lite.Interpreter(model_path=w) # load TFLite model + interpreter.allocate_tensors() # allocate + input_details = interpreter.get_input_details() # inputs + output_details = interpreter.get_output_details() # outputs + int8 = input_details[0]['dtype'] == np.uint8 # is TFLite quantized uint8 model + imgsz = check_img_size(imgsz, s=stride) # check image size + + # Dataloader + if webcam: + view_img = check_imshow() + cudnn.benchmark = True # set True to speed up constant image size inference + dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt) + bs = len(dataset) # batch_size + else: + dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt) + bs = 1 # batch_size + vid_path, vid_writer = [None] * bs, [None] * bs + + # Run inference + if pt and device.type != 'cpu': + model(torch.zeros(1, 3, *imgsz).to(device).type_as(next(model.parameters()))) # run once + dt, seen = [0.0, 0.0, 0.0], 0 + for path, img, im0s, vid_cap in dataset: + t1 = time_sync() + if onnx: + img = img.astype('float32') + else: + img = torch.from_numpy(img).to(device) + img = img.half() if half else img.float() # uint8 to fp16/32 + img /= 255.0 # 0 - 255 to 0.0 - 1.0 + if len(img.shape) == 3: + img = img[None] # expand for batch dim + t2 = time_sync() + dt[0] += t2 - t1 + + # Inference + if pt: + visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False + pred = model(img, augment=augment, visualize=visualize)[0] + elif onnx: + if dnn: + net.setInput(img) + pred = torch.tensor(net.forward()) + else: + pred = torch.tensor(session.run([session.get_outputs()[0].name], {session.get_inputs()[0].name: img})) + else: # tensorflow model (tflite, pb, saved_model) + imn = img.permute(0, 2, 3, 1).cpu().numpy() # image in numpy + if pb: + pred = frozen_func(x=tf.constant(imn)).numpy() + elif saved_model: + pred = model(imn, training=False).numpy() + elif tflite: + if int8: + scale, zero_point = input_details[0]['quantization'] + imn = (imn / scale + zero_point).astype(np.uint8) # de-scale + interpreter.set_tensor(input_details[0]['index'], imn) + interpreter.invoke() + pred = interpreter.get_tensor(output_details[0]['index']) + if int8: + scale, zero_point = output_details[0]['quantization'] + pred = (pred.astype(np.float32) - zero_point) * scale # re-scale + pred[..., 0] *= imgsz[1] # x + pred[..., 1] *= imgsz[0] # y + pred[..., 2] *= imgsz[1] # w + pred[..., 3] *= imgsz[0] # h + pred = torch.tensor(pred) + t3 = time_sync() + dt[1] += t3 - t2 + + # NMS + pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det) + dt[2] += time_sync() - t3 + + # Second-stage classifier (optional) + if classify: + pred = apply_classifier(pred, modelc, img, im0s) + + # Process predictions + for i, det in enumerate(pred): # per image + seen += 1 + if webcam: # batch_size >= 1 + p, s, im0, frame = path[i], f'{i}: ', im0s[i].copy(), dataset.count + else: + p, s, im0, frame = path, '', im0s.copy(), getattr(dataset, 'frame', 0) + + p = Path(p) # to Path + save_path = str(save_dir / p.name) # img.jpg + txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # img.txt + s += '%gx%g ' % img.shape[2:] # print string + gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh + imc = im0.copy() if save_crop else im0 # for save_crop + annotator = Annotator(im0, line_width=line_thickness, example=str(names)) + if len(det): + # Rescale boxes from img_size to im0 size + det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round() + + # Print results + for c in det[:, -1].unique(): + n = (det[:, -1] == c).sum() # detections per class + s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string + + # Write results + for *xyxy, conf, cls in reversed(det): + if save_txt: # Write to file + xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh + line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format + with open(txt_path + '.txt', 'a') as f: + f.write(('%g ' * len(line)).rstrip() % line + '\n') + + if save_img or save_crop or view_img: # Add bbox to image + c = int(cls) # integer class + label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}') + annotator.box_label(xyxy, label, color=colors(c, True)) + if save_crop: + save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) + + # Print time (inference-only) + print(f'{s}Done. ({t3 - t2:.3f}s)') + + # Stream results + im0 = annotator.result() + if view_img: + cv2.imshow(str(p), im0) + cv2.waitKey(1) # 1 millisecond + + # Save results (image with detections) + if save_img: + if dataset.mode == 'image': + cv2.imwrite(save_path, im0) + else: # 'video' or 'stream' + if vid_path[i] != save_path: # new video + vid_path[i] = save_path + if isinstance(vid_writer[i], cv2.VideoWriter): + vid_writer[i].release() # release previous video writer + if vid_cap: # video + fps = vid_cap.get(cv2.CAP_PROP_FPS) + w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + else: # stream + fps, w, h = 30, im0.shape[1], im0.shape[0] + save_path += '.mp4' + vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) + vid_writer[i].write(im0) + + # Print results + t = tuple(x / seen * 1E3 for x in dt) # speeds per image + print(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t) + if save_txt or save_img: + s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' + print(f"Results saved to {colorstr('bold', save_dir)}{s}") + if update: + strip_optimizer(weights) # update model (to fix SourceChangeWarning) + + +def parse_opt(): + parser = argparse.ArgumentParser() + parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model path(s)') + parser.add_argument('--source', type=str, default=ROOT / 'data/images', help='file/dir/URL/glob, 0 for webcam') + parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w') + parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold') + parser.add_argument('--iou-thres', type=float, default=0.45, help='NMS IoU threshold') + parser.add_argument('--max-det', type=int, default=1000, help='maximum detections per image') + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--view-img', action='store_true', help='show results') + parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') + parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') + parser.add_argument('--save-crop', action='store_true', help='save cropped prediction boxes') + parser.add_argument('--nosave', action='store_true', help='do not save images/videos') + parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --classes 0, or --classes 0 2 3') + parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS') + parser.add_argument('--augment', action='store_true', help='augmented inference') + parser.add_argument('--visualize', action='store_true', help='visualize features') + parser.add_argument('--update', action='store_true', help='update all models') + parser.add_argument('--project', default=ROOT / 'runs/detect', help='save results to project/name') + parser.add_argument('--name', default='exp', help='save results to project/name') + parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') + parser.add_argument('--line-thickness', default=3, type=int, help='bounding box thickness (pixels)') + parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels') + parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences') + parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') + parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') + opt = parser.parse_args() + opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand + print_args(FILE.stem, opt) + return opt + + +def main(opt): + check_requirements(exclude=('tensorboard', 'thop')) + run(**vars(opt)) + + +if __name__ == "__main__": + opt = parse_opt() + main(opt) diff --git "a/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/detect/0001.jpg" "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/detect/0001.jpg" new file mode 100644 index 0000000000000000000000000000000000000000..cb92ae88da43bd4892fe814aaa4ec0235591fb42 Binary files /dev/null and "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/detect/0001.jpg" differ diff --git "a/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/detect/0002.jpg" "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/detect/0002.jpg" new file mode 100644 index 0000000000000000000000000000000000000000..d4640dbf296e375d0752fb6b05355ae3d81ffee1 Binary files /dev/null and "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/detect/0002.jpg" differ diff --git "a/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/detect/0003.jpg" "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/detect/0003.jpg" new file mode 100644 index 0000000000000000000000000000000000000000..273d42707e538fc45713788e43d218b01c7e1839 Binary files /dev/null and "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/detect/0003.jpg" differ diff --git "a/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/detect/1.jpg" "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/detect/1.jpg" new file mode 100644 index 0000000000000000000000000000000000000000..290bb4bbb7085a8eedc462e4d5832846683cb6ac Binary files /dev/null and "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/detect/1.jpg" differ diff --git "a/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/detect/2.jpg" "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/detect/2.jpg" new file mode 100644 index 0000000000000000000000000000000000000000..638a3615c56bdadd3efb208cdb62ae9b9850c936 Binary files /dev/null and "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/detect/2.jpg" differ diff --git "a/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/images/test/0001.jpg" "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/images/test/0001.jpg" new file mode 100644 index 0000000000000000000000000000000000000000..9205faf943739144b654edb715c2a8a1bb6dbd22 Binary files /dev/null and "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/images/test/0001.jpg" differ diff --git "a/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/images/test/0002.jpg" "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/images/test/0002.jpg" new file mode 100644 index 0000000000000000000000000000000000000000..67e2f77618dc68bc74ed734170baf22b6b450653 Binary files /dev/null and "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/images/test/0002.jpg" differ diff --git "a/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/images/test/0003.jpg" "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/images/test/0003.jpg" new file mode 100644 index 0000000000000000000000000000000000000000..9ed8f33e77c6a8517b9774416dc0b1b49fc60e6e Binary files /dev/null and "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/images/test/0003.jpg" differ diff --git "a/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/images/test/0004.jpg" "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/images/test/0004.jpg" new file mode 100644 index 0000000000000000000000000000000000000000..0c33b338689af9d72a32eb4632b0f1b5c6b0d3b8 Binary files /dev/null and "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/images/test/0004.jpg" differ diff --git "a/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/images/test/0005.jpg" "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/images/test/0005.jpg" new file mode 100644 index 0000000000000000000000000000000000000000..48439db02a53f7f28d8b2cb8e4095fa6e91c173e Binary files /dev/null and "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/images/test/0005.jpg" differ diff --git "a/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/images/test/1.jpg" "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/images/test/1.jpg" new file mode 100644 index 0000000000000000000000000000000000000000..6e2c5e263e502de256c33780250818806dd4ccbf Binary files /dev/null and "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/images/test/1.jpg" differ diff --git "a/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/images/test/2.jpg" "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/images/test/2.jpg" new file mode 100644 index 0000000000000000000000000000000000000000..36522e8eccd5fafc181ec94fc33cc18b53c9fda9 Binary files /dev/null and "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/images/test/2.jpg" differ diff --git "a/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/images/test/3.jpg" "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/images/test/3.jpg" new file mode 100644 index 0000000000000000000000000000000000000000..195a0922597d43944ec53da1558b81baa65a8b33 Binary files /dev/null and "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/images/test/3.jpg" differ diff --git "a/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/images/test/4.jpg" "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/images/test/4.jpg" new file mode 100644 index 0000000000000000000000000000000000000000..30e02ded3ad170061f8cefa02782fafa1f3b9c1b Binary files /dev/null and "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/images/test/4.jpg" differ diff --git "a/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/images/test/5.jpg" "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/images/test/5.jpg" new file mode 100644 index 0000000000000000000000000000000000000000..3fb65113ffa78e64857dabc297b1626b79220fe8 Binary files /dev/null and "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/images/test/5.jpg" differ diff --git "a/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/images/train/1.jpg" "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/images/train/1.jpg" new file mode 100644 index 0000000000000000000000000000000000000000..8d5f415dad48441f4a0859f0583af0678fbead0b Binary files /dev/null and "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/images/train/1.jpg" differ diff --git "a/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/images/train/10.jpg" "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/images/train/10.jpg" new file mode 100644 index 0000000000000000000000000000000000000000..d2ac21e76be6b5dc0b52e73d7f13374ca35e27bf Binary files /dev/null and "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/images/train/10.jpg" differ diff --git "a/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/images/train/2.jpg" "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/images/train/2.jpg" new file mode 100644 index 0000000000000000000000000000000000000000..6bff2de91339d85037fb78f3e62f9438e0ea60af Binary files /dev/null and "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/images/train/2.jpg" differ diff --git "a/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/images/train/3.jpg" "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/images/train/3.jpg" new file mode 100644 index 0000000000000000000000000000000000000000..7dfed7e340abeb68b9cb123b3dae054ac94e52a6 Binary files /dev/null and "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/images/train/3.jpg" differ diff --git "a/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/images/train/4.jpg" "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/images/train/4.jpg" new file mode 100644 index 0000000000000000000000000000000000000000..2eba198140af2c2ca013a35cdf5b443b19db4408 Binary files /dev/null and "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/images/train/4.jpg" differ diff --git "a/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/images/train/5.jpg" "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/images/train/5.jpg" new file mode 100644 index 0000000000000000000000000000000000000000..1d75ebaa38c3598785f78a23aca68f6cb360c616 Binary files /dev/null and "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/images/train/5.jpg" differ diff --git "a/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/images/train/6.jpg" "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/images/train/6.jpg" new file mode 100644 index 0000000000000000000000000000000000000000..bd3790fb77d0a9ce906ee2e926d8ef096991a70b Binary files /dev/null and "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/images/train/6.jpg" differ diff --git "a/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/images/train/7.jpg" "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/images/train/7.jpg" new file mode 100644 index 0000000000000000000000000000000000000000..300c96345a4d960637356cd4d2409be084409d85 Binary files /dev/null and "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/images/train/7.jpg" differ diff --git "a/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/images/train/8.jpg" "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/images/train/8.jpg" new file mode 100644 index 0000000000000000000000000000000000000000..4f72bc4238623c591dcecd6980af4c97bcd8cfb7 Binary files /dev/null and "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/images/train/8.jpg" differ diff --git "a/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/images/train/9.jpg" "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/images/train/9.jpg" new file mode 100644 index 0000000000000000000000000000000000000000..466235790ea6c422177c731f923d8e12b0705048 Binary files /dev/null and "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/images/train/9.jpg" differ diff --git "a/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/labels/train/1.txt" "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/labels/train/1.txt" new file mode 100644 index 0000000000000000000000000000000000000000..5de5a38431c7bcb836f87bfb46e3fc6b0ab2b4a2 --- /dev/null +++ "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/labels/train/1.txt" @@ -0,0 +1,6 @@ +0 0.93046875 0.36484375 0.0546875 0.0484375 +0 0.5109375 0.7609375 0.0375 0.05625 +0 0.146875 0.14765625 0.040625 0.0453125 +0 0.4609375 0.44609375 0.046875 0.0421875 +0 0.78984375 0.953125 0.0390625 0.053125 +0 0.3671875 0.2625 0.0375 0.053125 diff --git "a/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/labels/train/10.txt" "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/labels/train/10.txt" new file mode 100644 index 0000000000000000000000000000000000000000..5604f731e44ade42cb2edc8010092893462281d1 --- /dev/null +++ "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/labels/train/10.txt" @@ -0,0 +1,7 @@ +0 0.33203125 0.78515625 0.0609375 0.0578125 +0 0.759375 0.65703125 0.05 0.0515625 +0 0.60078125 0.2140625 0.0546875 0.04375 +0 0.08828125 0.43515625 0.0578125 0.0546875 +0 0.48359375 0.840625 0.0390625 0.05 +0 0.54921875 0.30625 0.0453125 0.040625 +0 0.2 0.90546875 0.05 0.0578125 diff --git "a/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/labels/train/2.txt" "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/labels/train/2.txt" new file mode 100644 index 0000000000000000000000000000000000000000..33abade06916f6bfe2ee73fb998a7139014ce0ac --- /dev/null +++ "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/labels/train/2.txt" @@ -0,0 +1,5 @@ +0 0.8609375 0.82109375 0.059375 0.0546875 +0 0.48515625 0.5234375 0.0546875 0.05 +0 0.30546875 0.7515625 0.0421875 0.059375 +0 0.7953125 0.19453125 0.040625 0.0609375 +0 0.3875 0.275 0.04375 0.053125 diff --git "a/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/labels/train/3.txt" "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/labels/train/3.txt" new file mode 100644 index 0000000000000000000000000000000000000000..8dfc54e6fc1a8c298e7a82a271794efae3a2dab0 --- /dev/null +++ "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/labels/train/3.txt" @@ -0,0 +1,8 @@ +0 0.82421875 0.83671875 0.0484375 0.0484375 +0 0.07265625 0.11640625 0.0484375 0.0609375 +0 0.55 0.24921875 0.05625 0.0390625 +0 0.290625 0.178125 0.046875 0.053125 +0 0.43046875 0.55234375 0.0546875 0.0578125 +0 0.36015625 0.6890625 0.0453125 0.053125 +0 0.95703125 0.034375 0.0578125 0.053125 +0 0.18515625 0.9703125 0.0609375 0.046875 diff --git "a/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/labels/train/4.txt" "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/labels/train/4.txt" new file mode 100644 index 0000000000000000000000000000000000000000..2e024f3656c599fe7cb3603b8d81dad531fefe67 --- /dev/null +++ "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/labels/train/4.txt" @@ -0,0 +1,5 @@ +0 0.95234375 0.5953125 0.0421875 0.05625 +0 0.61875 0.13671875 0.0375 0.0515625 +0 0.53046875 0.91796875 0.0546875 0.0578125 +0 0.0390625 0.4171875 0.05 0.059375 +0 0.453125 0.5265625 0.05 0.053125 diff --git "a/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/labels/train/5.txt" "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/labels/train/5.txt" new file mode 100644 index 0000000000000000000000000000000000000000..5790c561df203a42c9e9eb63914d88847e49ab1c --- /dev/null +++ "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/labels/train/5.txt" @@ -0,0 +1,5 @@ +0 0.97265625 0.88359375 0.0421875 0.0546875 +0 0.3375 0.6875 0.05625 0.059375 +0 0.78046875 0.2703125 0.0421875 0.053125 +0 0.48046875 0.46640625 0.0609375 0.0390625 +0 0.10859375 0.08203125 0.0609375 0.0546875 diff --git "a/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/labels/train/6.txt" "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/labels/train/6.txt" new file mode 100644 index 0000000000000000000000000000000000000000..22d2c22b8d41e64806a821bbbc7c20e76a9f6d5e --- /dev/null +++ "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/labels/train/6.txt" @@ -0,0 +1,5 @@ +0 0.78671875 0.1921875 0.0546875 0.04375 +0 0.490625 0.59375 0.059375 0.0375 +0 0.1140625 0.81640625 0.046875 0.0484375 +0 0.3890625 0.4296875 0.04375 0.05 +0 0.1109375 0.76015625 0.053125 0.0546875 diff --git "a/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/labels/train/7.txt" "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/labels/train/7.txt" new file mode 100644 index 0000000000000000000000000000000000000000..0cc89f3a22764ba321a8b979567334008f23fdd0 --- /dev/null +++ "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/labels/train/7.txt" @@ -0,0 +1,5 @@ +0 0.21640625 0.43671875 0.0421875 0.0546875 +0 0.7671875 0.78203125 0.040625 0.0609375 +0 0.6109375 0.54375 0.05625 0.059375 +0 0.90859375 0.303125 0.0515625 0.053125 +0 0.34765625 0.68515625 0.0578125 0.0609375 diff --git "a/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/labels/train/8.txt" "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/labels/train/8.txt" new file mode 100644 index 0000000000000000000000000000000000000000..133f26d3a666b2bb176bd593f68881b20b3c43d8 --- /dev/null +++ "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/labels/train/8.txt" @@ -0,0 +1,6 @@ +0 0.77890625 0.13828125 0.0484375 0.0578125 +0 0.22578125 0.87890625 0.0515625 0.0453125 +0 0.88828125 0.6328125 0.0515625 0.0375 +0 0.090625 0.2640625 0.059375 0.053125 +0 0.3625 0.54609375 0.053125 0.0421875 +0 0.7265625 0.378125 0.05625 0.0375 diff --git "a/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/labels/train/9.txt" "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/labels/train/9.txt" new file mode 100644 index 0000000000000000000000000000000000000000..790aa7549b0a246b8c55360dab64f877c0a127cd --- /dev/null +++ "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/labels/train/9.txt" @@ -0,0 +1,8 @@ +0 0.85703125 0.728125 0.0421875 0.059375 +0 0.03515625 0.44296875 0.0484375 0.0609375 +0 0.79453125 0.1421875 0.0578125 0.040625 +0 0.56953125 0.884375 0.0453125 0.059375 +0 0.30625 0.03671875 0.04375 0.0390625 +0 0.9609375 0.79765625 0.053125 0.0609375 +0 0.63203125 0.28125 0.0390625 0.046875 +0 0.63046875 0.95390625 0.0546875 0.0421875 diff --git "a/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/labels/val/5933.txt" "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/labels/val/5933.txt" new file mode 100644 index 0000000000000000000000000000000000000000..2c9017bb5e3f8e3791efa34a49bded0b5ec2f64a --- /dev/null +++ "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/labels/val/5933.txt" @@ -0,0 +1 @@ +0 0.115449 0.417752 0.038206 0.037459 diff --git "a/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/labels/val/5934.txt" "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/labels/val/5934.txt" new file mode 100644 index 0000000000000000000000000000000000000000..c4e74d8f738c017f2972dd688d89ffe10029d1d7 --- /dev/null +++ "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/labels/val/5934.txt" @@ -0,0 +1 @@ +0 0.885382 0.586319 0.046512 0.042345 diff --git "a/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/labels/val/5935.txt" "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/labels/val/5935.txt" new file mode 100644 index 0000000000000000000000000000000000000000..9ae20531829f8336920ffcf3910617191e2cfda3 --- /dev/null +++ "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/labels/val/5935.txt" @@ -0,0 +1 @@ +0 0.379368 0.451907 0.039933 0.041459 diff --git "a/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/labels/val/5936.txt" "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/labels/val/5936.txt" new file mode 100644 index 0000000000000000000000000000000000000000..d46e6156560b19c198f69913fa592a89c24673f9 --- /dev/null +++ "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/labels/val/5936.txt" @@ -0,0 +1 @@ +0 0.066556 0.034711 0.049917 0.036364 diff --git "a/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/labels/val/5937.txt" "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/labels/val/5937.txt" new file mode 100644 index 0000000000000000000000000000000000000000..be53163ec20e85d8e92b29137f7b968e383a9abe --- /dev/null +++ "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/labels/val/5937.txt" @@ -0,0 +1 @@ +0 0.922405 0.062678 0.053592 0.048433 \ No newline at end of file diff --git "a/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/labels/val/5938.txt" "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/labels/val/5938.txt" new file mode 100644 index 0000000000000000000000000000000000000000..4332c8abd1cf805f6941f46bf2dfc137d25916a4 --- /dev/null +++ "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/labels/val/5938.txt" @@ -0,0 +1 @@ +0 0.408844 0.307692 0.052707 0.031339 \ No newline at end of file diff --git "a/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/labels/val/5939.txt" "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/labels/val/5939.txt" new file mode 100644 index 0000000000000000000000000000000000000000..d63612d6824c3698347793e3e32a9e21b3aa0af3 --- /dev/null +++ "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/labels/val/5939.txt" @@ -0,0 +1 @@ +0 0.175207 0.902985 0.049587 0.034826 diff --git "a/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/labels/val/5940.txt" "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/labels/val/5940.txt" new file mode 100644 index 0000000000000000000000000000000000000000..b81310baebe4df539c46409a91cf217f21498260 --- /dev/null +++ "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/labels/val/5940.txt" @@ -0,0 +1 @@ +0 0.948739 0.476705 0.058824 0.041597 diff --git "a/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/labels/val/5941.txt" "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/labels/val/5941.txt" new file mode 100644 index 0000000000000000000000000000000000000000..c73c7a3a8098ea911add684663b4588678c8fe09 --- /dev/null +++ "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/labels/val/5941.txt" @@ -0,0 +1 @@ +0 0.112548 0.523504 0.055556 0.035613 \ No newline at end of file diff --git "a/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/labels/val/5942.txt" "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/labels/val/5942.txt" new file mode 100644 index 0000000000000000000000000000000000000000..53686ee91c39e6e16554c6badf783995556cffe1 --- /dev/null +++ "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/labels/val/5942.txt" @@ -0,0 +1 @@ +0 0.949921 0.054131 0.052184 0.039886 \ No newline at end of file diff --git "a/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/readme.txt" "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/readme.txt" new file mode 100644 index 0000000000000000000000000000000000000000..59c836d73b756259241b18755b20a0897e04c2cd --- /dev/null +++ "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/readme.txt" @@ -0,0 +1,5 @@ +源代码来自github开源 +https://github.com/ultralytics/yolov5 + +链接: https://pan.baidu.com/s/1tgH_d1355ersZHNIwasOAg 提取码: 8f6l +--来自百度网盘超级会员v5的分享 \ No newline at end of file diff --git "a/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/train.py" "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/train.py" new file mode 100644 index 0000000000000000000000000000000000000000..292f2da965f0d998bed65d6b2ea6cfd82b206f69 --- /dev/null +++ "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/train.py" @@ -0,0 +1,627 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Train a YOLOv5 model on a custom dataset + +Usage: + $ python path/to/train.py --data coco128.yaml --weights yolov5s.pt --img 640 +""" + +import argparse +import logging +import math +import os +import random +import sys +import time +from copy import deepcopy +from pathlib import Path + +import numpy as np +import torch +import torch.distributed as dist +import torch.nn as nn +import yaml +from torch.cuda import amp +from torch.nn.parallel import DistributedDataParallel as DDP +from torch.optim import Adam, SGD, lr_scheduler +from tqdm import tqdm + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[0] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH +ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative + +import val # for end-of-epoch mAP +from models.experimental import attempt_load +from models.yolo import Model +from utils.autoanchor import check_anchors +from utils.autobatch import check_train_batch_size +from utils.datasets import create_dataloader +from utils.general import labels_to_class_weights, increment_path, labels_to_image_weights, init_seeds, \ + strip_optimizer, get_latest_run, check_dataset, check_git_status, check_img_size, check_requirements, \ + check_file, check_yaml, check_suffix, print_args, print_mutation, set_logging, one_cycle, colorstr, methods +from utils.downloads import attempt_download +from utils.loss import ComputeLoss +from utils.plots import plot_labels, plot_evolve +from utils.torch_utils import EarlyStopping, ModelEMA, de_parallel, intersect_dicts, select_device, \ + torch_distributed_zero_first +from utils.loggers.wandb.wandb_utils import check_wandb_resume +from utils.metrics import fitness +from utils.loggers import Loggers +from utils.callbacks import Callbacks + +LOGGER = logging.getLogger(__name__) +LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html +RANK = int(os.getenv('RANK', -1)) +WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) + + +def train(hyp, # path/to/hyp.yaml or hyp dictionary + opt, + device, + callbacks + ): + save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, noval, nosave, workers, freeze, = \ + Path(opt.save_dir), opt.epochs, opt.batch_size, opt.weights, opt.single_cls, opt.evolve, opt.data, opt.cfg, \ + opt.resume, opt.noval, opt.nosave, opt.workers, opt.freeze + + # Directories + w = save_dir / 'weights' # weights dir + (w.parent if evolve else w).mkdir(parents=True, exist_ok=True) # make dir + last, best = w / 'last.pt', w / 'best.pt' + + # Hyperparameters + if isinstance(hyp, str): + with open(hyp, errors='ignore') as f: + hyp = yaml.safe_load(f) # load hyps dict + LOGGER.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items())) + + # Save run settings + with open(save_dir / 'hyp.yaml', 'w') as f: + yaml.safe_dump(hyp, f, sort_keys=False) + with open(save_dir / 'opt.yaml', 'w') as f: + yaml.safe_dump(vars(opt), f, sort_keys=False) + data_dict = None + + # Loggers + if RANK in [-1, 0]: + loggers = Loggers(save_dir, weights, opt, hyp, LOGGER) # loggers instance + if loggers.wandb: + data_dict = loggers.wandb.data_dict + if resume: + weights, epochs, hyp = opt.weights, opt.epochs, opt.hyp + + # Register actions + for k in methods(loggers): + callbacks.register_action(k, callback=getattr(loggers, k)) + + # Config + plots = not evolve # create plots + cuda = device.type != 'cpu' + init_seeds(1 + RANK) + with torch_distributed_zero_first(LOCAL_RANK): + data_dict = data_dict or check_dataset(data) # check if None + train_path, val_path = data_dict['train'], data_dict['val'] + nc = 1 if single_cls else int(data_dict['nc']) # number of classes + names = ['item'] if single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names + assert len(names) == nc, f'{len(names)} names found for nc={nc} dataset in {data}' # check + is_coco = data.endswith('coco.yaml') and nc == 80 # COCO dataset + + # Model + check_suffix(weights, '.pt') # check weights + pretrained = weights.endswith('.pt') + if pretrained: + with torch_distributed_zero_first(LOCAL_RANK): + weights = attempt_download(weights) # download if not found locally + ckpt = torch.load(weights, map_location=device) # load checkpoint + model = Model(cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create + exclude = ['anchor'] if (cfg or hyp.get('anchors')) and not resume else [] # exclude keys + csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32 + csd = intersect_dicts(csd, model.state_dict(), exclude=exclude) # intersect + model.load_state_dict(csd, strict=False) # load + LOGGER.info(f'Transferred {len(csd)}/{len(model.state_dict())} items from {weights}') # report + else: + model = Model(cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create + + # Freeze + freeze = [f'model.{x}.' for x in range(freeze)] # layers to freeze + for k, v in model.named_parameters(): + v.requires_grad = True # train all layers + if any(x in k for x in freeze): + print(f'freezing {k}') + v.requires_grad = False + + # Image size + gs = max(int(model.stride.max()), 32) # grid size (max stride) + imgsz = check_img_size(opt.imgsz, gs, floor=gs * 2) # verify imgsz is gs-multiple + + # Batch size + if RANK == -1 and batch_size == -1: # single-GPU only, estimate best batch size + batch_size = check_train_batch_size(model, imgsz) + + # Optimizer + nbs = 64 # nominal batch size + accumulate = max(round(nbs / batch_size), 1) # accumulate loss before optimizing + hyp['weight_decay'] *= batch_size * accumulate / nbs # scale weight_decay + LOGGER.info(f"Scaled weight_decay = {hyp['weight_decay']}") + + g0, g1, g2 = [], [], [] # optimizer parameter groups + for v in model.modules(): + if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter): # bias + g2.append(v.bias) + if isinstance(v, nn.BatchNorm2d): # weight (no decay) + g0.append(v.weight) + elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter): # weight (with decay) + g1.append(v.weight) + + if opt.adam: + optimizer = Adam(g0, lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum + else: + optimizer = SGD(g0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True) + + optimizer.add_param_group({'params': g1, 'weight_decay': hyp['weight_decay']}) # add g1 with weight_decay + optimizer.add_param_group({'params': g2}) # add g2 (biases) + LOGGER.info(f"{colorstr('optimizer:')} {type(optimizer).__name__} with parameter groups " + f"{len(g0)} weight, {len(g1)} weight (no decay), {len(g2)} bias") + del g0, g1, g2 + + # Scheduler + if opt.linear_lr: + lf = lambda x: (1 - x / (epochs - 1)) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear + else: + lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf'] + scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs) + + # EMA + ema = ModelEMA(model) if RANK in [-1, 0] else None + + # Resume + start_epoch, best_fitness = 0, 0.0 + if pretrained: + # Optimizer + if ckpt['optimizer'] is not None: + optimizer.load_state_dict(ckpt['optimizer']) + best_fitness = ckpt['best_fitness'] + + # EMA + if ema and ckpt.get('ema'): + ema.ema.load_state_dict(ckpt['ema'].float().state_dict()) + ema.updates = ckpt['updates'] + + # Epochs + start_epoch = ckpt['epoch'] + 1 + if resume: + assert start_epoch > 0, f'{weights} training to {epochs} epochs is finished, nothing to resume.' + if epochs < start_epoch: + LOGGER.info(f"{weights} has been trained for {ckpt['epoch']} epochs. Fine-tuning for {epochs} more epochs.") + epochs += ckpt['epoch'] # finetune additional epochs + + del ckpt, csd + + # DP mode + if cuda and RANK == -1 and torch.cuda.device_count() > 1: + logging.warning('DP not recommended, instead use torch.distributed.run for best DDP Multi-GPU results.\n' + 'See Multi-GPU Tutorial at https://github.com/ultralytics/yolov5/issues/475 to get started.') + model = torch.nn.DataParallel(model) + + # SyncBatchNorm + if opt.sync_bn and cuda and RANK != -1: + model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device) + LOGGER.info('Using SyncBatchNorm()') + + # Trainloader + train_loader, dataset = create_dataloader(train_path, imgsz, batch_size // WORLD_SIZE, gs, single_cls, + hyp=hyp, augment=True, cache=opt.cache, rect=opt.rect, rank=LOCAL_RANK, + workers=workers, image_weights=opt.image_weights, quad=opt.quad, + prefix=colorstr('train: ')) + mlc = int(np.concatenate(dataset.labels, 0)[:, 0].max()) # max label class + nb = len(train_loader) # number of batches + assert mlc < nc, f'Label class {mlc} exceeds nc={nc} in {data}. Possible class labels are 0-{nc - 1}' + + # Process 0 + if RANK in [-1, 0]: + val_loader = create_dataloader(val_path, imgsz, batch_size // WORLD_SIZE * 2, gs, single_cls, + hyp=hyp, cache=None if noval else opt.cache, rect=True, rank=-1, + workers=workers, pad=0.5, + prefix=colorstr('val: '))[0] + + if not resume: + labels = np.concatenate(dataset.labels, 0) + # c = torch.tensor(labels[:, 0]) # classes + # cf = torch.bincount(c.long(), minlength=nc) + 1. # frequency + # model._initialize_biases(cf.to(device)) + if plots: + plot_labels(labels, names, save_dir) + + # Anchors + if not opt.noautoanchor: + check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz) + model.half().float() # pre-reduce anchor precision + + callbacks.run('on_pretrain_routine_end') + + # DDP mode + if cuda and RANK != -1: + model = DDP(model, device_ids=[LOCAL_RANK], output_device=LOCAL_RANK) + + # Model parameters + nl = de_parallel(model).model[-1].nl # number of detection layers (to scale hyps) + hyp['box'] *= 3. / nl # scale to layers + hyp['cls'] *= nc / 80. * 3. / nl # scale to classes and layers + hyp['obj'] *= (imgsz / 640) ** 2 * 3. / nl # scale to image size and layers + hyp['label_smoothing'] = opt.label_smoothing + model.nc = nc # attach number of classes to model + model.hyp = hyp # attach hyperparameters to model + model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc # attach class weights + model.names = names + + # Start training + t0 = time.time() + nw = max(round(hyp['warmup_epochs'] * nb), 1000) # number of warmup iterations, max(3 epochs, 1k iterations) + # nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training + last_opt_step = -1 + maps = np.zeros(nc) # mAP per class + results = (0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls) + scheduler.last_epoch = start_epoch - 1 # do not move + scaler = amp.GradScaler(enabled=cuda) + stopper = EarlyStopping(patience=opt.patience) + compute_loss = ComputeLoss(model) # init loss class + LOGGER.info(f'Image sizes {imgsz} train, {imgsz} val\n' + f'Using {train_loader.num_workers} dataloader workers\n' + f"Logging results to {colorstr('bold', save_dir)}\n" + f'Starting training for {epochs} epochs...') + for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------ + model.train() + + # Update image weights (optional, single-GPU only) + if opt.image_weights: + cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc # class weights + iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights + dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx + + # Update mosaic border (optional) + # b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs) + # dataset.mosaic_border = [b - imgsz, -b] # height, width borders + + mloss = torch.zeros(3, device=device) # mean losses + if RANK != -1: + train_loader.sampler.set_epoch(epoch) + pbar = enumerate(train_loader) + LOGGER.info(('\n' + '%10s' * 7) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'labels', 'img_size')) + if RANK in [-1, 0]: + pbar = tqdm(pbar, total=nb) # progress bar + optimizer.zero_grad() + for i, (imgs, targets, paths, _) in pbar: # batch ------------------------------------------------------------- + ni = i + nb * epoch # number integrated batches (since train start) + imgs = imgs.to(device, non_blocking=True).float() / 255.0 # uint8 to float32, 0-255 to 0.0-1.0 + + # Warmup + if ni <= nw: + xi = [0, nw] # x interp + # compute_loss.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou) + accumulate = max(1, np.interp(ni, xi, [1, nbs / batch_size]).round()) + for j, x in enumerate(optimizer.param_groups): + # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0 + x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 2 else 0.0, x['initial_lr'] * lf(epoch)]) + if 'momentum' in x: + x['momentum'] = np.interp(ni, xi, [hyp['warmup_momentum'], hyp['momentum']]) + + # Multi-scale + if opt.multi_scale: + sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size + sf = sz / max(imgs.shape[2:]) # scale factor + if sf != 1: + ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple) + imgs = nn.functional.interpolate(imgs, size=ns, mode='bilinear', align_corners=False) + + # Forward + with amp.autocast(enabled=cuda): + pred = model(imgs) # forward + loss, loss_items = compute_loss(pred, targets.to(device)) # loss scaled by batch_size + if RANK != -1: + loss *= WORLD_SIZE # gradient averaged between devices in DDP mode + if opt.quad: + loss *= 4. + + # Backward + scaler.scale(loss).backward() + + # Optimize + if ni - last_opt_step >= accumulate: + scaler.step(optimizer) # optimizer.step + scaler.update() + optimizer.zero_grad() + if ema: + ema.update(model) + last_opt_step = ni + + # Log + if RANK in [-1, 0]: + mloss = (mloss * i + loss_items) / (i + 1) # update mean losses + mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G' # (GB) + pbar.set_description(('%10s' * 2 + '%10.4g' * 5) % ( + f'{epoch}/{epochs - 1}', mem, *mloss, targets.shape[0], imgs.shape[-1])) + callbacks.run('on_train_batch_end', ni, model, imgs, targets, paths, plots, opt.sync_bn) + # end batch ------------------------------------------------------------------------------------------------ + + # Scheduler + lr = [x['lr'] for x in optimizer.param_groups] # for loggers + scheduler.step() + + if RANK in [-1, 0]: + # mAP + callbacks.run('on_train_epoch_end', epoch=epoch) + ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'names', 'stride', 'class_weights']) + final_epoch = (epoch + 1 == epochs) or stopper.possible_stop + if not noval or final_epoch: # Calculate mAP + results, maps, _ = val.run(data_dict, + batch_size=batch_size // WORLD_SIZE * 2, + imgsz=imgsz, + model=ema.ema, + single_cls=single_cls, + dataloader=val_loader, + save_dir=save_dir, + plots=False, + callbacks=callbacks, + compute_loss=compute_loss) + + # Update best mAP + fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95] + if fi > best_fitness: + best_fitness = fi + log_vals = list(mloss) + list(results) + lr + callbacks.run('on_fit_epoch_end', log_vals, epoch, best_fitness, fi) + + # Save model + if (not nosave) or (final_epoch and not evolve): # if save + ckpt = {'epoch': epoch, + 'best_fitness': best_fitness, + 'model': deepcopy(de_parallel(model)).half(), + 'ema': deepcopy(ema.ema).half(), + 'updates': ema.updates, + 'optimizer': optimizer.state_dict(), + 'wandb_id': loggers.wandb.wandb_run.id if loggers.wandb else None} + + # Save last, best and delete + torch.save(ckpt, last) + if best_fitness == fi: + torch.save(ckpt, best) + if (epoch > 0) and (opt.save_period > 0) and (epoch % opt.save_period == 0): + torch.save(ckpt, w / f'epoch{epoch}.pt') + del ckpt + callbacks.run('on_model_save', last, epoch, final_epoch, best_fitness, fi) + + # Stop Single-GPU + if RANK == -1 and stopper(epoch=epoch, fitness=fi): + break + + # Stop DDP TODO: known issues shttps://github.com/ultralytics/yolov5/pull/4576 + # stop = stopper(epoch=epoch, fitness=fi) + # if RANK == 0: + # dist.broadcast_object_list([stop], 0) # broadcast 'stop' to all ranks + + # Stop DPP + # with torch_distributed_zero_first(RANK): + # if stop: + # break # must break all DDP ranks + + # end epoch ---------------------------------------------------------------------------------------------------- + # end training ----------------------------------------------------------------------------------------------------- + if RANK in [-1, 0]: + LOGGER.info(f'\n{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.') + for f in last, best: + if f.exists(): + strip_optimizer(f) # strip optimizers + if f is best: + LOGGER.info(f'\nValidating {f}...') + results, _, _ = val.run(data_dict, + batch_size=batch_size // WORLD_SIZE * 2, + imgsz=imgsz, + model=attempt_load(f, device).half(), + iou_thres=0.65 if is_coco else 0.60, # best pycocotools results at 0.65 + single_cls=single_cls, + dataloader=val_loader, + save_dir=save_dir, + save_json=is_coco, + verbose=True, + plots=True, + callbacks=callbacks, + compute_loss=compute_loss) # val best model with plots + if is_coco: + callbacks.run('on_fit_epoch_end', list(mloss) + list(results) + lr, epoch, best_fitness, fi) + + callbacks.run('on_train_end', last, best, plots, epoch, results) + LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}") + + torch.cuda.empty_cache() + return results + + +def parse_opt(known=False): + parser = argparse.ArgumentParser() + parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s.pt', help='initial weights path') + parser.add_argument('--cfg', type=str, default='', help='model.yaml path') + parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path') + parser.add_argument('--hyp', type=str, default=ROOT / 'data/hyps/hyp.scratch.yaml', help='hyperparameters path') + parser.add_argument('--epochs', type=int, default=300) + parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs, -1 for autobatch') + parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='train, val image size (pixels)') + parser.add_argument('--rect', action='store_true', help='rectangular training') + parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training') + parser.add_argument('--nosave', action='store_true', help='only save final checkpoint') + parser.add_argument('--noval', action='store_true', help='only validate final epoch') + parser.add_argument('--noautoanchor', action='store_true', help='disable autoanchor check') + parser.add_argument('--evolve', type=int, nargs='?', const=300, help='evolve hyperparameters for x generations') + parser.add_argument('--bucket', type=str, default='', help='gsutil bucket') + parser.add_argument('--cache', type=str, nargs='?', const='ram', help='--cache images in "ram" (default) or "disk"') + parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training') + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%') + parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class') + parser.add_argument('--adam', action='store_true', help='use torch.optim.Adam() optimizer') + parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode') + parser.add_argument('--workers', type=int, default=8, help='maximum number of dataloader workers') + parser.add_argument('--project', default=ROOT / 'runs/train', help='save to project/name') + parser.add_argument('--name', default='exp', help='save to project/name') + parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') + parser.add_argument('--quad', action='store_true', help='quad dataloader') + parser.add_argument('--linear-lr', action='store_true', help='linear LR') + parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon') + parser.add_argument('--patience', type=int, default=100, help='EarlyStopping patience (epochs without improvement)') + parser.add_argument('--freeze', type=int, default=0, help='Number of layers to freeze. backbone=10, all=24') + parser.add_argument('--save-period', type=int, default=-1, help='Save checkpoint every x epochs (disabled if < 1)') + parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify') + + # Weights & Biases arguments + parser.add_argument('--entity', default=None, help='W&B: Entity') + parser.add_argument('--upload_dataset', action='store_true', help='W&B: Upload dataset as artifact table') + parser.add_argument('--bbox_interval', type=int, default=-1, help='W&B: Set bounding-box image logging interval') + parser.add_argument('--artifact_alias', type=str, default='latest', help='W&B: Version of dataset artifact to use') + + opt = parser.parse_known_args()[0] if known else parser.parse_args() + return opt + + +def main(opt, callbacks=Callbacks()): + # Checks + set_logging(RANK) + if RANK in [-1, 0]: + print_args(FILE.stem, opt) + check_git_status() + check_requirements(exclude=['thop']) + + # Resume + if opt.resume and not check_wandb_resume(opt) and not opt.evolve: # resume an interrupted run + ckpt = opt.resume if isinstance(opt.resume, str) else get_latest_run() # specified or most recent path + assert os.path.isfile(ckpt), 'ERROR: --resume checkpoint does not exist' + with open(Path(ckpt).parent.parent / 'opt.yaml', errors='ignore') as f: + opt = argparse.Namespace(**yaml.safe_load(f)) # replace + opt.cfg, opt.weights, opt.resume = '', ckpt, True # reinstate + LOGGER.info(f'Resuming training from {ckpt}') + else: + opt.data, opt.cfg, opt.hyp, opt.weights, opt.project = \ + check_file(opt.data), check_yaml(opt.cfg), check_yaml(opt.hyp), str(opt.weights), str(opt.project) # checks + assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified' + if opt.evolve: + opt.project = str(ROOT / 'runs/evolve') + opt.exist_ok, opt.resume = opt.resume, False # pass resume to exist_ok and disable resume + opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) + + # DDP mode + device = select_device(opt.device, batch_size=opt.batch_size) + if LOCAL_RANK != -1: + assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command' + assert opt.batch_size % WORLD_SIZE == 0, '--batch-size must be multiple of CUDA device count' + assert not opt.image_weights, '--image-weights argument is not compatible with DDP training' + assert not opt.evolve, '--evolve argument is not compatible with DDP training' + torch.cuda.set_device(LOCAL_RANK) + device = torch.device('cuda', LOCAL_RANK) + dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo") + + # Train + if not opt.evolve: + train(opt.hyp, opt, device, callbacks) + if WORLD_SIZE > 1 and RANK == 0: + LOGGER.info('Destroying process group... ') + dist.destroy_process_group() + + # Evolve hyperparameters (optional) + else: + # Hyperparameter evolution metadata (mutation scale 0-1, lower_limit, upper_limit) + meta = {'lr0': (1, 1e-5, 1e-1), # initial learning rate (SGD=1E-2, Adam=1E-3) + 'lrf': (1, 0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf) + 'momentum': (0.3, 0.6, 0.98), # SGD momentum/Adam beta1 + 'weight_decay': (1, 0.0, 0.001), # optimizer weight decay + 'warmup_epochs': (1, 0.0, 5.0), # warmup epochs (fractions ok) + 'warmup_momentum': (1, 0.0, 0.95), # warmup initial momentum + 'warmup_bias_lr': (1, 0.0, 0.2), # warmup initial bias lr + 'box': (1, 0.02, 0.2), # box loss gain + 'cls': (1, 0.2, 4.0), # cls loss gain + 'cls_pw': (1, 0.5, 2.0), # cls BCELoss positive_weight + 'obj': (1, 0.2, 4.0), # obj loss gain (scale with pixels) + 'obj_pw': (1, 0.5, 2.0), # obj BCELoss positive_weight + 'iou_t': (0, 0.1, 0.7), # IoU training threshold + 'anchor_t': (1, 2.0, 8.0), # anchor-multiple threshold + 'anchors': (2, 2.0, 10.0), # anchors per output grid (0 to ignore) + 'fl_gamma': (0, 0.0, 2.0), # focal loss gamma (efficientDet default gamma=1.5) + 'hsv_h': (1, 0.0, 0.1), # image HSV-Hue augmentation (fraction) + 'hsv_s': (1, 0.0, 0.9), # image HSV-Saturation augmentation (fraction) + 'hsv_v': (1, 0.0, 0.9), # image HSV-Value augmentation (fraction) + 'degrees': (1, 0.0, 45.0), # image rotation (+/- deg) + 'translate': (1, 0.0, 0.9), # image translation (+/- fraction) + 'scale': (1, 0.0, 0.9), # image scale (+/- gain) + 'shear': (1, 0.0, 10.0), # image shear (+/- deg) + 'perspective': (0, 0.0, 0.001), # image perspective (+/- fraction), range 0-0.001 + 'flipud': (1, 0.0, 1.0), # image flip up-down (probability) + 'fliplr': (0, 0.0, 1.0), # image flip left-right (probability) + 'mosaic': (1, 0.0, 1.0), # image mixup (probability) + 'mixup': (1, 0.0, 1.0), # image mixup (probability) + 'copy_paste': (1, 0.0, 1.0)} # segment copy-paste (probability) + + with open(opt.hyp, errors='ignore') as f: + hyp = yaml.safe_load(f) # load hyps dict + if 'anchors' not in hyp: # anchors commented in hyp.yaml + hyp['anchors'] = 3 + opt.noval, opt.nosave, save_dir = True, True, Path(opt.save_dir) # only val/save final epoch + # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices + evolve_yaml, evolve_csv = save_dir / 'hyp_evolve.yaml', save_dir / 'evolve.csv' + if opt.bucket: + os.system(f'gsutil cp gs://{opt.bucket}/evolve.csv {save_dir}') # download evolve.csv if exists + + for _ in range(opt.evolve): # generations to evolve + if evolve_csv.exists(): # if evolve.csv exists: select best hyps and mutate + # Select parent(s) + parent = 'single' # parent selection method: 'single' or 'weighted' + x = np.loadtxt(evolve_csv, ndmin=2, delimiter=',', skiprows=1) + n = min(5, len(x)) # number of previous results to consider + x = x[np.argsort(-fitness(x))][:n] # top n mutations + w = fitness(x) - fitness(x).min() + 1E-6 # weights (sum > 0) + if parent == 'single' or len(x) == 1: + # x = x[random.randint(0, n - 1)] # random selection + x = x[random.choices(range(n), weights=w)[0]] # weighted selection + elif parent == 'weighted': + x = (x * w.reshape(n, 1)).sum(0) / w.sum() # weighted combination + + # Mutate + mp, s = 0.8, 0.2 # mutation probability, sigma + npr = np.random + npr.seed(int(time.time())) + g = np.array([meta[k][0] for k in hyp.keys()]) # gains 0-1 + ng = len(meta) + v = np.ones(ng) + while all(v == 1): # mutate until a change occurs (prevent duplicates) + v = (g * (npr.random(ng) < mp) * npr.randn(ng) * npr.random() * s + 1).clip(0.3, 3.0) + for i, k in enumerate(hyp.keys()): # plt.hist(v.ravel(), 300) + hyp[k] = float(x[i + 7] * v[i]) # mutate + + # Constrain to limits + for k, v in meta.items(): + hyp[k] = max(hyp[k], v[1]) # lower limit + hyp[k] = min(hyp[k], v[2]) # upper limit + hyp[k] = round(hyp[k], 5) # significant digits + + # Train mutation + results = train(hyp.copy(), opt, device, callbacks) + + # Write mutation results + print_mutation(results, hyp.copy(), save_dir, opt.bucket) + + # Plot results + plot_evolve(evolve_csv) + print(f'Hyperparameter evolution finished\n' + f"Results saved to {colorstr('bold', save_dir)}\n" + f'Use best hyperparameters example: $ python train.py --hyp {evolve_yaml}') + + +def run(**kwargs): + # Usage: import train; train.run(data='coco128.yaml', imgsz=320, weights='yolov5m.pt') + opt = parse_opt(True) + for k, v in kwargs.items(): + setattr(opt, k, v) + main(opt) + + +if __name__ == "__main__": + opt = parse_opt() + main(opt) diff --git "a/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/yolo.zip" "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/yolo.zip" new file mode 100644 index 0000000000000000000000000000000000000000..39fa54cf81110c841a1ec551e951a686dd008570 Binary files /dev/null and "b/code/2021_autumn/\345\255\224\344\273\244\346\230\212-\346\267\261\345\272\246\345\255\246\344\271\240\345\234\250\347\274\226\347\240\201\347\202\271\350\257\206\345\210\253\344\270\255\347\232\204\345\272\224\347\224\250/yolo.zip" differ