09API搭建与课程总结
@ -1,8 +1,7 @@
|
||||
import torch
|
||||
|
||||
|
||||
t5 = torch.randint(low=100, high=1000, size=(2, 3, 4))
|
||||
print(t5.numpy())
|
||||
|
||||
t6 = torch.tensor([[[[[[[[[[[100]]]]]]]]]]])
|
||||
print(t6.item())
|
||||
t6 = torch.tensor([41, 96, 88, 145], dtype=torch.int32)
|
||||
print(t6.tolist())
|
||||
|
@ -1,15 +1,19 @@
|
||||
import os
|
||||
|
||||
|
||||
# 第一步。对目录环境进行检查,是否满足处理要求
|
||||
def file_exists(filename, message):
|
||||
if not os.path.exists(filename):
|
||||
raise FileExistsError(message)
|
||||
|
||||
|
||||
def make_dirs(dirs):
|
||||
try:
|
||||
os.makedirs(dirs)
|
||||
except FileExistsError:
|
||||
print(dirs + ' 目录存在,自动跳过')
|
||||
|
||||
|
||||
# 第二步。创建关键目录
|
||||
file_exists('Annotations', 'VOC标注,Annotations目录不存在')
|
||||
file_exists('JPEGImages', 'VOC标注,JPEGImages 图片目录不存在')
|
||||
@ -17,4 +21,3 @@ make_dirs('images/train')
|
||||
make_dirs('images/val')
|
||||
make_dirs('labels/train')
|
||||
make_dirs('labels/val')
|
||||
|
||||
|
@ -5,7 +5,7 @@ import shutil
|
||||
|
||||
# 提取图像并将其分类为训练集与训练测试集 :train validation
|
||||
Images = os.listdir('JPEGImages')
|
||||
Images = [i for i in Images if i.split('.')[-1] == 'jpg']
|
||||
Images = [i for i in Images if i.split('.')[-1] == 'png']
|
||||
print('提取到有效jpg图片共{}张'.format(len(Images)))
|
||||
# 按照分配率将图片分类 分类率:train/validation 可以自己修改,可以不改,看心情
|
||||
distribution_rate = 0.9
|
||||
@ -41,15 +41,16 @@ with open('val.txt', 'w', encoding='utf-8') as f:
|
||||
|
||||
print('图片移动/复制完成,训练索引 train.txt and val.txt 生成完毕')
|
||||
|
||||
|
||||
# 预检测 xml与图片的对应关系,这里要求严格一一对应
|
||||
|
||||
|
||||
xml_file = os.listdir('Annotations')
|
||||
xml_file = [i for i in xml_file if i.split('.')[-1] == 'xml']
|
||||
xml_file_check = [i.split('.')[0]+'.xml' for i in Images if i.split('.')[-1] == 'jpg']
|
||||
xml_file_check = [i.split('.')[0] + '.xml' for i in Images if i.split('.')[-1] == 'png']
|
||||
if xml_file_check != xml_file:
|
||||
raise FileExistsError('Annotations 中xml文件与JPEGImages图片不对应,请仔细检测!')
|
||||
|
||||
|
||||
# 下面将 xml文件标注提取并生成label
|
||||
def convert(size, box):
|
||||
dw = 1. / (size[0])
|
||||
@ -64,6 +65,7 @@ def convert(size, box):
|
||||
h = h * dh
|
||||
return x, y, w, h
|
||||
|
||||
|
||||
def write_labels(xml_file_path, write_to_file_path):
|
||||
with open(xml_file_path, 'r', encoding='utf-8') as f:
|
||||
tree = ET.parse(f)
|
||||
@ -79,7 +81,8 @@ def write_labels(xml_file_path, write_to_file_path):
|
||||
continue
|
||||
cls_id = classes.index(xml_name)
|
||||
xmlbox = obj.find('bndbox')
|
||||
b = (float(xmlbox.find('xmin').text), float(xmlbox.find('xmax').text), float(xmlbox.find('ymin').text), float(xmlbox.find('ymax').text))
|
||||
b = (float(xmlbox.find('xmin').text), float(xmlbox.find('xmax').text), float(xmlbox.find('ymin').text),
|
||||
float(xmlbox.find('ymax').text))
|
||||
b1, b2, b3, b4 = b
|
||||
# 标注越界修正
|
||||
if b2 > w:
|
||||
@ -91,7 +94,9 @@ def write_labels(xml_file_path, write_to_file_path):
|
||||
write_message = str(cls_id) + " " + " ".join([str(a) for a in bb]) + '\n'
|
||||
f2.write(write_message)
|
||||
if not write_message:
|
||||
logging.warning('未在标注图片的xml文件中取得分类内容,此警告应引起重视,可能意味着分类参数不匹配。classes错误')
|
||||
logging.warning(
|
||||
'未在标注图片的xml文件中取得分类内容,此警告应引起重视,可能意味着分类参数不匹配。classes错误')
|
||||
|
||||
|
||||
for i in train:
|
||||
write_labels('Annotations/' + i.split('.')[0] + '.xml', 'labels/train/{}'.format(i.split('.')[0] + '.txt'))
|
||||
|
@ -1,28 +0,0 @@
|
||||
<?xml version="1.0" ?>
|
||||
<annotation>
|
||||
<folder>JPEGImages</folder>
|
||||
<filename>16329967796329784.jpg</filename>
|
||||
<path>E:\code\js_reverse\ayf_ocr\yolo_databases\VOC2012\JPEGImages\16329967796329784.jpg</path>
|
||||
<source>
|
||||
<database>Unknown</database>
|
||||
</source>
|
||||
<size>
|
||||
<width>150</width>
|
||||
<height>150</height>
|
||||
<depth>3</depth>
|
||||
</size>
|
||||
|
||||
<segmented>0</segmented>
|
||||
<object>
|
||||
<name>缺口</name>
|
||||
<pose>Unspecified</pose>
|
||||
<truncated>0</truncated>
|
||||
<difficult>0</difficult>
|
||||
<bndbox>
|
||||
<xmin>56</xmin>
|
||||
<ymin>86</ymin>
|
||||
<xmax>100</xmax>
|
||||
<ymax>135</ymax>
|
||||
</bndbox>
|
||||
</object>
|
||||
</annotation>
|
@ -1,28 +0,0 @@
|
||||
<?xml version="1.0" ?>
|
||||
<annotation>
|
||||
<folder>JPEGImages</folder>
|
||||
<filename>16329967796409855.jpg</filename>
|
||||
<path>E:\code\js_reverse\ayf_ocr\yolo_databases\VOC2012\JPEGImages\16329967796409855.jpg</path>
|
||||
<source>
|
||||
<database>Unknown</database>
|
||||
</source>
|
||||
<size>
|
||||
<width>150</width>
|
||||
<height>150</height>
|
||||
<depth>3</depth>
|
||||
</size>
|
||||
|
||||
<segmented>0</segmented>
|
||||
<object>
|
||||
<name>缺口</name>
|
||||
<pose>Unspecified</pose>
|
||||
<truncated>0</truncated>
|
||||
<difficult>0</difficult>
|
||||
<bndbox>
|
||||
<xmin>51</xmin>
|
||||
<ymin>42</ymin>
|
||||
<xmax>97</xmax>
|
||||
<ymax>93</ymax>
|
||||
</bndbox>
|
||||
</object>
|
||||
</annotation>
|
@ -1,28 +0,0 @@
|
||||
<?xml version="1.0" ?>
|
||||
<annotation>
|
||||
<folder>JPEGImages</folder>
|
||||
<filename>16329967796494925.jpg</filename>
|
||||
<path>E:\code\js_reverse\ayf_ocr\yolo_databases\VOC2012\JPEGImages\16329967796494925.jpg</path>
|
||||
<source>
|
||||
<database>Unknown</database>
|
||||
</source>
|
||||
<size>
|
||||
<width>150</width>
|
||||
<height>150</height>
|
||||
<depth>3</depth>
|
||||
</size>
|
||||
|
||||
<segmented>0</segmented>
|
||||
<object>
|
||||
<name>缺口</name>
|
||||
<pose>Unspecified</pose>
|
||||
<truncated>0</truncated>
|
||||
<difficult>0</difficult>
|
||||
<bndbox>
|
||||
<xmin>73</xmin>
|
||||
<ymin>30</ymin>
|
||||
<xmax>119</xmax>
|
||||
<ymax>85</ymax>
|
||||
</bndbox>
|
||||
</object>
|
||||
</annotation>
|
@ -1,28 +0,0 @@
|
||||
<?xml version="1.0" ?>
|
||||
<annotation>
|
||||
<folder>JPEGImages</folder>
|
||||
<filename>16329967796625037.jpg</filename>
|
||||
<path>E:\code\js_reverse\ayf_ocr\yolo_databases\VOC2012\JPEGImages\16329967796625037.jpg</path>
|
||||
<source>
|
||||
<database>Unknown</database>
|
||||
</source>
|
||||
<size>
|
||||
<width>150</width>
|
||||
<height>150</height>
|
||||
<depth>3</depth>
|
||||
</size>
|
||||
|
||||
<segmented>0</segmented>
|
||||
<object>
|
||||
<name>缺口</name>
|
||||
<pose>Unspecified</pose>
|
||||
<truncated>0</truncated>
|
||||
<difficult>0</difficult>
|
||||
<bndbox>
|
||||
<xmin>55</xmin>
|
||||
<ymin>82</ymin>
|
||||
<xmax>100</xmax>
|
||||
<ymax>130</ymax>
|
||||
</bndbox>
|
||||
</object>
|
||||
</annotation>
|
@ -1,28 +0,0 @@
|
||||
<?xml version="1.0" ?>
|
||||
<annotation>
|
||||
<folder>JPEGImages</folder>
|
||||
<filename>16329967796715117.jpg</filename>
|
||||
<path>E:\code\js_reverse\ayf_ocr\yolo_databases\VOC2012\JPEGImages\16329967796715117.jpg</path>
|
||||
<source>
|
||||
<database>Unknown</database>
|
||||
</source>
|
||||
<size>
|
||||
<width>150</width>
|
||||
<height>150</height>
|
||||
<depth>3</depth>
|
||||
</size>
|
||||
|
||||
<segmented>0</segmented>
|
||||
<object>
|
||||
<name>缺口</name>
|
||||
<pose>Unspecified</pose>
|
||||
<truncated>0</truncated>
|
||||
<difficult>0</difficult>
|
||||
<bndbox>
|
||||
<xmin>27</xmin>
|
||||
<ymin>17</ymin>
|
||||
<xmax>75</xmax>
|
||||
<ymax>69</ymax>
|
||||
</bndbox>
|
||||
</object>
|
||||
</annotation>
|
@ -1,28 +0,0 @@
|
||||
<?xml version="1.0" ?>
|
||||
<annotation>
|
||||
<folder>JPEGImages</folder>
|
||||
<filename>16329967796840222.jpg</filename>
|
||||
<path>E:\code\js_reverse\ayf_ocr\yolo_databases\VOC2012\JPEGImages\16329967796840222.jpg</path>
|
||||
<source>
|
||||
<database>Unknown</database>
|
||||
</source>
|
||||
<size>
|
||||
<width>150</width>
|
||||
<height>150</height>
|
||||
<depth>3</depth>
|
||||
</size>
|
||||
|
||||
<segmented>0</segmented>
|
||||
<object>
|
||||
<name>缺口</name>
|
||||
<pose>Unspecified</pose>
|
||||
<truncated>0</truncated>
|
||||
<difficult>0</difficult>
|
||||
<bndbox>
|
||||
<xmin>99</xmin>
|
||||
<ymin>57</ymin>
|
||||
<xmax>147</xmax>
|
||||
<ymax>109</ymax>
|
||||
</bndbox>
|
||||
</object>
|
||||
</annotation>
|
Before Width: | Height: | Size: 3.7 KiB |
Before Width: | Height: | Size: 3.8 KiB |
Before Width: | Height: | Size: 4.6 KiB |
Before Width: | Height: | Size: 3.5 KiB |
Before Width: | Height: | Size: 3.2 KiB |
Before Width: | Height: | Size: 3.7 KiB |
Before Width: | Height: | Size: 3.8 KiB |
Before Width: | Height: | Size: 4.6 KiB |
Before Width: | Height: | Size: 3.5 KiB |
Before Width: | Height: | Size: 3.7 KiB |
Before Width: | Height: | Size: 3.2 KiB |
@ -1 +0,0 @@
|
||||
0 0.5133333333333334 0.7300000000000001 0.29333333333333333 0.32666666666666666
|
@ -1 +0,0 @@
|
||||
0 0.4866666666666667 0.44333333333333336 0.3066666666666667 0.34
|
@ -1 +0,0 @@
|
||||
0 0.6333333333333334 0.3766666666666667 0.3066666666666667 0.3666666666666667
|
@ -1 +0,0 @@
|
||||
0 0.51 0.7000000000000001 0.30000000000000004 0.32
|
@ -1 +0,0 @@
|
||||
0 0.33333333333333337 0.28 0.32 0.3466666666666667
|
@ -1 +0,0 @@
|
||||
0 0.8133333333333334 0.5466666666666667 0.32 0.3466666666666667
|
@ -1,5 +0,0 @@
|
||||
E:/code/js_reverse/ayf_ocr/yolo_databases/VOC2012/images/train/16329967796329784.jpg
|
||||
E:/code/js_reverse/ayf_ocr/yolo_databases/VOC2012/images/train/16329967796409855.jpg
|
||||
E:/code/js_reverse/ayf_ocr/yolo_databases/VOC2012/images/train/16329967796494925.jpg
|
||||
E:/code/js_reverse/ayf_ocr/yolo_databases/VOC2012/images/train/16329967796625037.jpg
|
||||
E:/code/js_reverse/ayf_ocr/yolo_databases/VOC2012/images/train/16329967796715117.jpg
|
@ -1 +0,0 @@
|
||||
E:/code/js_reverse/ayf_ocr/yolo_databases/VOC2012/images/val/16329967796840222.jpg
|
Before Width: | Height: | Size: 3.7 KiB After Width: | Height: | Size: 3.7 KiB |
BIN
ayf_ocr/yolo_databases/api/168477903867549.jpg
Normal file
After Width: | Height: | Size: 3.7 KiB |
226
ayf_ocr/yolo_databases/api/classify/predict.py
Normal file
@ -0,0 +1,226 @@
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
"""
|
||||
Run YOLOv5 classification inference on images, videos, directories, globs, YouTube, webcam, streams, etc.
|
||||
|
||||
Usage - sources:
|
||||
$ python classify/predict.py --weights yolov5s-cls.pt --source 0 # webcam
|
||||
img.jpg # image
|
||||
vid.mp4 # video
|
||||
screen # screenshot
|
||||
path/ # directory
|
||||
list.txt # list of images
|
||||
list.streams # list of streams
|
||||
'path/*.jpg' # glob
|
||||
'https://youtu.be/Zgi9g1ksQHc' # YouTube
|
||||
'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream
|
||||
|
||||
Usage - formats:
|
||||
$ python classify/predict.py --weights yolov5s-cls.pt # PyTorch
|
||||
yolov5s-cls.torchscript # TorchScript
|
||||
yolov5s-cls.onnx # ONNX Runtime or OpenCV DNN with --dnn
|
||||
yolov5s-cls_openvino_model # OpenVINO
|
||||
yolov5s-cls.engine # TensorRT
|
||||
yolov5s-cls.mlmodel # CoreML (macOS-only)
|
||||
yolov5s-cls_saved_model # TensorFlow SavedModel
|
||||
yolov5s-cls.pb # TensorFlow GraphDef
|
||||
yolov5s-cls.tflite # TensorFlow Lite
|
||||
yolov5s-cls_edgetpu.tflite # TensorFlow Edge TPU
|
||||
yolov5s-cls_paddle_model # PaddlePaddle
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import os
|
||||
import platform
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
|
||||
FILE = Path(__file__).resolve()
|
||||
ROOT = FILE.parents[1] # YOLOv5 root directory
|
||||
if str(ROOT) not in sys.path:
|
||||
sys.path.append(str(ROOT)) # add ROOT to PATH
|
||||
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
|
||||
|
||||
from models.common import DetectMultiBackend
|
||||
from utils.augmentations import classify_transforms
|
||||
from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams
|
||||
from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2,
|
||||
increment_path, print_args, strip_optimizer)
|
||||
from utils.plots import Annotator
|
||||
from utils.torch_utils import select_device, smart_inference_mode
|
||||
|
||||
|
||||
@smart_inference_mode()
|
||||
def run(
|
||||
weights=ROOT / 'yolov5s-cls.pt', # model.pt path(s)
|
||||
source=ROOT / 'data/images', # file/dir/URL/glob/screen/0(webcam)
|
||||
data=ROOT / 'data/coco128.yaml', # dataset.yaml path
|
||||
imgsz=(224, 224), # inference size (height, width)
|
||||
device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
|
||||
view_img=False, # show results
|
||||
save_txt=False, # save results to *.txt
|
||||
nosave=False, # do not save images/videos
|
||||
augment=False, # augmented inference
|
||||
visualize=False, # visualize features
|
||||
update=False, # update all models
|
||||
project=ROOT / 'runs/predict-cls', # save results to project/name
|
||||
name='exp', # save results to project/name
|
||||
exist_ok=False, # existing project/name ok, do not increment
|
||||
half=False, # use FP16 half-precision inference
|
||||
dnn=False, # use OpenCV DNN for ONNX inference
|
||||
vid_stride=1, # video frame-rate stride
|
||||
):
|
||||
source = str(source)
|
||||
save_img = not nosave and not source.endswith('.txt') # save inference images
|
||||
is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS)
|
||||
is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://'))
|
||||
webcam = source.isnumeric() or source.endswith('.streams') or (is_url and not is_file)
|
||||
screenshot = source.lower().startswith('screen')
|
||||
if is_url and is_file:
|
||||
source = check_file(source) # download
|
||||
|
||||
# Directories
|
||||
save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run
|
||||
(save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
|
||||
|
||||
# Load model
|
||||
device = select_device(device)
|
||||
model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half)
|
||||
stride, names, pt = model.stride, model.names, model.pt
|
||||
imgsz = check_img_size(imgsz, s=stride) # check image size
|
||||
|
||||
# Dataloader
|
||||
bs = 1 # batch_size
|
||||
if webcam:
|
||||
view_img = check_imshow(warn=True)
|
||||
dataset = LoadStreams(source, img_size=imgsz, transforms=classify_transforms(imgsz[0]), vid_stride=vid_stride)
|
||||
bs = len(dataset)
|
||||
elif screenshot:
|
||||
dataset = LoadScreenshots(source, img_size=imgsz, stride=stride, auto=pt)
|
||||
else:
|
||||
dataset = LoadImages(source, img_size=imgsz, transforms=classify_transforms(imgsz[0]), vid_stride=vid_stride)
|
||||
vid_path, vid_writer = [None] * bs, [None] * bs
|
||||
|
||||
# Run inference
|
||||
model.warmup(imgsz=(1 if pt else bs, 3, *imgsz)) # warmup
|
||||
seen, windows, dt = 0, [], (Profile(), Profile(), Profile())
|
||||
for path, im, im0s, vid_cap, s in dataset:
|
||||
with dt[0]:
|
||||
im = torch.Tensor(im).to(model.device)
|
||||
im = im.half() if model.fp16 else im.float() # uint8 to fp16/32
|
||||
if len(im.shape) == 3:
|
||||
im = im[None] # expand for batch dim
|
||||
|
||||
# Inference
|
||||
with dt[1]:
|
||||
results = model(im)
|
||||
|
||||
# Post-process
|
||||
with dt[2]:
|
||||
pred = F.softmax(results, dim=1) # probabilities
|
||||
|
||||
# Process predictions
|
||||
for i, prob in enumerate(pred): # per image
|
||||
seen += 1
|
||||
if webcam: # batch_size >= 1
|
||||
p, im0, frame = path[i], im0s[i].copy(), dataset.count
|
||||
s += f'{i}: '
|
||||
else:
|
||||
p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0)
|
||||
|
||||
p = Path(p) # to Path
|
||||
save_path = str(save_dir / p.name) # im.jpg
|
||||
txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt
|
||||
|
||||
s += '%gx%g ' % im.shape[2:] # print string
|
||||
annotator = Annotator(im0, example=str(names), pil=True)
|
||||
|
||||
# Print results
|
||||
top5i = prob.argsort(0, descending=True)[:5].tolist() # top 5 indices
|
||||
s += f"{', '.join(f'{names[j]} {prob[j]:.2f}' for j in top5i)}, "
|
||||
|
||||
# Write results
|
||||
text = '\n'.join(f'{prob[j]:.2f} {names[j]}' for j in top5i)
|
||||
if save_img or view_img: # Add bbox to image
|
||||
annotator.text((32, 32), text, txt_color=(255, 255, 255))
|
||||
if save_txt: # Write to file
|
||||
with open(f'{txt_path}.txt', 'a') as f:
|
||||
f.write(text + '\n')
|
||||
|
||||
# Stream results
|
||||
im0 = annotator.result()
|
||||
if view_img:
|
||||
if platform.system() == 'Linux' and p not in windows:
|
||||
windows.append(p)
|
||||
cv2.namedWindow(str(p), cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO) # allow window resize (Linux)
|
||||
cv2.resizeWindow(str(p), im0.shape[1], im0.shape[0])
|
||||
cv2.imshow(str(p), im0)
|
||||
cv2.waitKey(1) # 1 millisecond
|
||||
|
||||
# Save results (image with detections)
|
||||
if save_img:
|
||||
if dataset.mode == 'image':
|
||||
cv2.imwrite(save_path, im0)
|
||||
else: # 'video' or 'stream'
|
||||
if vid_path[i] != save_path: # new video
|
||||
vid_path[i] = save_path
|
||||
if isinstance(vid_writer[i], cv2.VideoWriter):
|
||||
vid_writer[i].release() # release previous video writer
|
||||
if vid_cap: # video
|
||||
fps = vid_cap.get(cv2.CAP_PROP_FPS)
|
||||
w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
||||
h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
||||
else: # stream
|
||||
fps, w, h = 30, im0.shape[1], im0.shape[0]
|
||||
save_path = str(Path(save_path).with_suffix('.mp4')) # force *.mp4 suffix on results videos
|
||||
vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
|
||||
vid_writer[i].write(im0)
|
||||
|
||||
# Print time (inference-only)
|
||||
LOGGER.info(f'{s}{dt[1].dt * 1E3:.1f}ms')
|
||||
|
||||
# Print results
|
||||
t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image
|
||||
LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t)
|
||||
if save_txt or save_img:
|
||||
s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
|
||||
LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}")
|
||||
if update:
|
||||
strip_optimizer(weights[0]) # update model (to fix SourceChangeWarning)
|
||||
|
||||
|
||||
def parse_opt():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s-cls.pt', help='model path(s)')
|
||||
parser.add_argument('--source', type=str, default=ROOT / 'data/images', help='file/dir/URL/glob/screen/0(webcam)')
|
||||
parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='(optional) dataset.yaml path')
|
||||
parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[224], help='inference size h,w')
|
||||
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
|
||||
parser.add_argument('--view-img', action='store_true', help='show results')
|
||||
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
|
||||
parser.add_argument('--nosave', action='store_true', help='do not save images/videos')
|
||||
parser.add_argument('--augment', action='store_true', help='augmented inference')
|
||||
parser.add_argument('--visualize', action='store_true', help='visualize features')
|
||||
parser.add_argument('--update', action='store_true', help='update all models')
|
||||
parser.add_argument('--project', default=ROOT / 'runs/predict-cls', help='save results to project/name')
|
||||
parser.add_argument('--name', default='exp', help='save results to project/name')
|
||||
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
|
||||
parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
|
||||
parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference')
|
||||
parser.add_argument('--vid-stride', type=int, default=1, help='video frame-rate stride')
|
||||
opt = parser.parse_args()
|
||||
opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand
|
||||
print_args(vars(opt))
|
||||
return opt
|
||||
|
||||
|
||||
def main(opt):
|
||||
check_requirements(ROOT / 'requirements.txt', exclude=('tensorboard', 'thop'))
|
||||
run(**vars(opt))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
opt = parse_opt()
|
||||
main(opt)
|
333
ayf_ocr/yolo_databases/api/classify/train.py
Normal file
@ -0,0 +1,333 @@
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
"""
|
||||
Train a YOLOv5 classifier model on a classification dataset
|
||||
|
||||
Usage - Single-GPU training:
|
||||
$ python classify/train.py --model yolov5s-cls.pt --data imagenette160 --epochs 5 --img 224
|
||||
|
||||
Usage - Multi-GPU DDP training:
|
||||
$ python -m torch.distributed.run --nproc_per_node 4 --master_port 2022 classify/train.py --model yolov5s-cls.pt --data imagenet --epochs 5 --img 224 --device 0,1,2,3
|
||||
|
||||
Datasets: --data mnist, fashion-mnist, cifar10, cifar100, imagenette, imagewoof, imagenet, or 'path/to/data'
|
||||
YOLOv5-cls models: --model yolov5n-cls.pt, yolov5s-cls.pt, yolov5m-cls.pt, yolov5l-cls.pt, yolov5x-cls.pt
|
||||
Torchvision models: --model resnet50, efficientnet_b0, etc. See https://pytorch.org/vision/stable/models.html
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
from copy import deepcopy
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
|
||||
import torch
|
||||
import torch.distributed as dist
|
||||
import torch.hub as hub
|
||||
import torch.optim.lr_scheduler as lr_scheduler
|
||||
import torchvision
|
||||
from torch.cuda import amp
|
||||
from tqdm import tqdm
|
||||
|
||||
FILE = Path(__file__).resolve()
|
||||
ROOT = FILE.parents[1] # YOLOv5 root directory
|
||||
if str(ROOT) not in sys.path:
|
||||
sys.path.append(str(ROOT)) # add ROOT to PATH
|
||||
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
|
||||
|
||||
from classify import val as validate
|
||||
from models.experimental import attempt_load
|
||||
from models.yolo import ClassificationModel, DetectionModel
|
||||
from utils.dataloaders import create_classification_dataloader
|
||||
from utils.general import (DATASETS_DIR, LOGGER, TQDM_BAR_FORMAT, WorkingDirectory, check_git_info, check_git_status,
|
||||
check_requirements, colorstr, download, increment_path, init_seeds, print_args, yaml_save)
|
||||
from utils.loggers import GenericLogger
|
||||
from utils.plots import imshow_cls
|
||||
from utils.torch_utils import (ModelEMA, de_parallel, model_info, reshape_classifier_output, select_device, smart_DDP,
|
||||
smart_optimizer, smartCrossEntropyLoss, torch_distributed_zero_first)
|
||||
|
||||
LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html
|
||||
RANK = int(os.getenv('RANK', -1))
|
||||
WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1))
|
||||
GIT_INFO = check_git_info()
|
||||
|
||||
|
||||
def train(opt, device):
|
||||
init_seeds(opt.seed + 1 + RANK, deterministic=True)
|
||||
save_dir, data, bs, epochs, nw, imgsz, pretrained = \
|
||||
opt.save_dir, Path(opt.data), opt.batch_size, opt.epochs, min(os.cpu_count() - 1, opt.workers), \
|
||||
opt.imgsz, str(opt.pretrained).lower() == 'true'
|
||||
cuda = device.type != 'cpu'
|
||||
|
||||
# Directories
|
||||
wdir = save_dir / 'weights'
|
||||
wdir.mkdir(parents=True, exist_ok=True) # make dir
|
||||
last, best = wdir / 'last.pt', wdir / 'best.pt'
|
||||
|
||||
# Save run settings
|
||||
yaml_save(save_dir / 'opt.yaml', vars(opt))
|
||||
|
||||
# Logger
|
||||
logger = GenericLogger(opt=opt, console_logger=LOGGER) if RANK in {-1, 0} else None
|
||||
|
||||
# Download Dataset
|
||||
with torch_distributed_zero_first(LOCAL_RANK), WorkingDirectory(ROOT):
|
||||
data_dir = data if data.is_dir() else (DATASETS_DIR / data)
|
||||
if not data_dir.is_dir():
|
||||
LOGGER.info(f'\nDataset not found ⚠️, missing path {data_dir}, attempting download...')
|
||||
t = time.time()
|
||||
if str(data) == 'imagenet':
|
||||
subprocess.run(['bash', str(ROOT / 'data/scripts/get_imagenet.sh')], shell=True, check=True)
|
||||
else:
|
||||
url = f'https://github.com/ultralytics/yolov5/releases/download/v1.0/{data}.zip'
|
||||
download(url, dir=data_dir.parent)
|
||||
s = f"Dataset download success ✅ ({time.time() - t:.1f}s), saved to {colorstr('bold', data_dir)}\n"
|
||||
LOGGER.info(s)
|
||||
|
||||
# Dataloaders
|
||||
nc = len([x for x in (data_dir / 'train').glob('*') if x.is_dir()]) # number of classes
|
||||
trainloader = create_classification_dataloader(path=data_dir / 'train',
|
||||
imgsz=imgsz,
|
||||
batch_size=bs // WORLD_SIZE,
|
||||
augment=True,
|
||||
cache=opt.cache,
|
||||
rank=LOCAL_RANK,
|
||||
workers=nw)
|
||||
|
||||
test_dir = data_dir / 'test' if (data_dir / 'test').exists() else data_dir / 'val' # data/test or data/val
|
||||
if RANK in {-1, 0}:
|
||||
testloader = create_classification_dataloader(path=test_dir,
|
||||
imgsz=imgsz,
|
||||
batch_size=bs // WORLD_SIZE * 2,
|
||||
augment=False,
|
||||
cache=opt.cache,
|
||||
rank=-1,
|
||||
workers=nw)
|
||||
|
||||
# Model
|
||||
with torch_distributed_zero_first(LOCAL_RANK), WorkingDirectory(ROOT):
|
||||
if Path(opt.model).is_file() or opt.model.endswith('.pt'):
|
||||
model = attempt_load(opt.model, device='cpu', fuse=False)
|
||||
elif opt.model in torchvision.models.__dict__: # TorchVision models i.e. resnet50, efficientnet_b0
|
||||
model = torchvision.models.__dict__[opt.model](weights='IMAGENET1K_V1' if pretrained else None)
|
||||
else:
|
||||
m = hub.list('ultralytics/yolov5') # + hub.list('pytorch/vision') # models
|
||||
raise ModuleNotFoundError(f'--model {opt.model} not found. Available models are: \n' + '\n'.join(m))
|
||||
if isinstance(model, DetectionModel):
|
||||
LOGGER.warning("WARNING ⚠️ pass YOLOv5 classifier model with '-cls' suffix, i.e. '--model yolov5s-cls.pt'")
|
||||
model = ClassificationModel(model=model, nc=nc, cutoff=opt.cutoff or 10) # convert to classification model
|
||||
reshape_classifier_output(model, nc) # update class count
|
||||
for m in model.modules():
|
||||
if not pretrained and hasattr(m, 'reset_parameters'):
|
||||
m.reset_parameters()
|
||||
if isinstance(m, torch.nn.Dropout) and opt.dropout is not None:
|
||||
m.p = opt.dropout # set dropout
|
||||
for p in model.parameters():
|
||||
p.requires_grad = True # for training
|
||||
model = model.to(device)
|
||||
|
||||
# Info
|
||||
if RANK in {-1, 0}:
|
||||
model.names = trainloader.dataset.classes # attach class names
|
||||
model.transforms = testloader.dataset.torch_transforms # attach inference transforms
|
||||
model_info(model)
|
||||
if opt.verbose:
|
||||
LOGGER.info(model)
|
||||
images, labels = next(iter(trainloader))
|
||||
file = imshow_cls(images[:25], labels[:25], names=model.names, f=save_dir / 'train_images.jpg')
|
||||
logger.log_images(file, name='Train Examples')
|
||||
logger.log_graph(model, imgsz) # log model
|
||||
|
||||
# Optimizer
|
||||
optimizer = smart_optimizer(model, opt.optimizer, opt.lr0, momentum=0.9, decay=opt.decay)
|
||||
|
||||
# Scheduler
|
||||
lrf = 0.01 # final lr (fraction of lr0)
|
||||
# lf = lambda x: ((1 + math.cos(x * math.pi / epochs)) / 2) * (1 - lrf) + lrf # cosine
|
||||
lf = lambda x: (1 - x / epochs) * (1 - lrf) + lrf # linear
|
||||
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
|
||||
# scheduler = lr_scheduler.OneCycleLR(optimizer, max_lr=lr0, total_steps=epochs, pct_start=0.1,
|
||||
# final_div_factor=1 / 25 / lrf)
|
||||
|
||||
# EMA
|
||||
ema = ModelEMA(model) if RANK in {-1, 0} else None
|
||||
|
||||
# DDP mode
|
||||
if cuda and RANK != -1:
|
||||
model = smart_DDP(model)
|
||||
|
||||
# Train
|
||||
t0 = time.time()
|
||||
criterion = smartCrossEntropyLoss(label_smoothing=opt.label_smoothing) # loss function
|
||||
best_fitness = 0.0
|
||||
scaler = amp.GradScaler(enabled=cuda)
|
||||
val = test_dir.stem # 'val' or 'test'
|
||||
LOGGER.info(f'Image sizes {imgsz} train, {imgsz} test\n'
|
||||
f'Using {nw * WORLD_SIZE} dataloader workers\n'
|
||||
f"Logging results to {colorstr('bold', save_dir)}\n"
|
||||
f'Starting {opt.model} training on {data} dataset with {nc} classes for {epochs} epochs...\n\n'
|
||||
f"{'Epoch':>10}{'GPU_mem':>10}{'train_loss':>12}{f'{val}_loss':>12}{'top1_acc':>12}{'top5_acc':>12}")
|
||||
for epoch in range(epochs): # loop over the dataset multiple times
|
||||
tloss, vloss, fitness = 0.0, 0.0, 0.0 # train loss, val loss, fitness
|
||||
model.train()
|
||||
if RANK != -1:
|
||||
trainloader.sampler.set_epoch(epoch)
|
||||
pbar = enumerate(trainloader)
|
||||
if RANK in {-1, 0}:
|
||||
pbar = tqdm(enumerate(trainloader), total=len(trainloader), bar_format=TQDM_BAR_FORMAT)
|
||||
for i, (images, labels) in pbar: # progress bar
|
||||
images, labels = images.to(device, non_blocking=True), labels.to(device)
|
||||
|
||||
# Forward
|
||||
with amp.autocast(enabled=cuda): # stability issues when enabled
|
||||
loss = criterion(model(images), labels)
|
||||
|
||||
# Backward
|
||||
scaler.scale(loss).backward()
|
||||
|
||||
# Optimize
|
||||
scaler.unscale_(optimizer) # unscale gradients
|
||||
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=10.0) # clip gradients
|
||||
scaler.step(optimizer)
|
||||
scaler.update()
|
||||
optimizer.zero_grad()
|
||||
if ema:
|
||||
ema.update(model)
|
||||
|
||||
if RANK in {-1, 0}:
|
||||
# Print
|
||||
tloss = (tloss * i + loss.item()) / (i + 1) # update mean losses
|
||||
mem = '%.3gG' % (torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0) # (GB)
|
||||
pbar.desc = f"{f'{epoch + 1}/{epochs}':>10}{mem:>10}{tloss:>12.3g}" + ' ' * 36
|
||||
|
||||
# Test
|
||||
if i == len(pbar) - 1: # last batch
|
||||
top1, top5, vloss = validate.run(model=ema.ema,
|
||||
dataloader=testloader,
|
||||
criterion=criterion,
|
||||
pbar=pbar) # test accuracy, loss
|
||||
fitness = top1 # define fitness as top1 accuracy
|
||||
|
||||
# Scheduler
|
||||
scheduler.step()
|
||||
|
||||
# Log metrics
|
||||
if RANK in {-1, 0}:
|
||||
# Best fitness
|
||||
if fitness > best_fitness:
|
||||
best_fitness = fitness
|
||||
|
||||
# Log
|
||||
metrics = {
|
||||
'train/loss': tloss,
|
||||
f'{val}/loss': vloss,
|
||||
'metrics/accuracy_top1': top1,
|
||||
'metrics/accuracy_top5': top5,
|
||||
'lr/0': optimizer.param_groups[0]['lr']} # learning rate
|
||||
logger.log_metrics(metrics, epoch)
|
||||
|
||||
# Save model
|
||||
final_epoch = epoch + 1 == epochs
|
||||
if (not opt.nosave) or final_epoch:
|
||||
ckpt = {
|
||||
'epoch': epoch,
|
||||
'best_fitness': best_fitness,
|
||||
'model': deepcopy(ema.ema).half(), # deepcopy(de_parallel(model)).half(),
|
||||
'ema': None, # deepcopy(ema.ema).half(),
|
||||
'updates': ema.updates,
|
||||
'optimizer': None, # optimizer.state_dict(),
|
||||
'opt': vars(opt),
|
||||
'git': GIT_INFO, # {remote, branch, commit} if a git repo
|
||||
'date': datetime.now().isoformat()}
|
||||
|
||||
# Save last, best and delete
|
||||
torch.save(ckpt, last)
|
||||
if best_fitness == fitness:
|
||||
torch.save(ckpt, best)
|
||||
del ckpt
|
||||
|
||||
# Train complete
|
||||
if RANK in {-1, 0} and final_epoch:
|
||||
LOGGER.info(f'\nTraining complete ({(time.time() - t0) / 3600:.3f} hours)'
|
||||
f"\nResults saved to {colorstr('bold', save_dir)}"
|
||||
f'\nPredict: python classify/predict.py --weights {best} --source im.jpg'
|
||||
f'\nValidate: python classify/val.py --weights {best} --data {data_dir}'
|
||||
f'\nExport: python export.py --weights {best} --include onnx'
|
||||
f"\nPyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', '{best}')"
|
||||
f'\nVisualize: https://netron.app\n')
|
||||
|
||||
# Plot examples
|
||||
images, labels = (x[:25] for x in next(iter(testloader))) # first 25 images and labels
|
||||
pred = torch.max(ema.ema(images.to(device)), 1)[1]
|
||||
file = imshow_cls(images, labels, pred, de_parallel(model).names, verbose=False, f=save_dir / 'test_images.jpg')
|
||||
|
||||
# Log results
|
||||
meta = {'epochs': epochs, 'top1_acc': best_fitness, 'date': datetime.now().isoformat()}
|
||||
logger.log_images(file, name='Test Examples (true-predicted)', epoch=epoch)
|
||||
logger.log_model(best, epochs, metadata=meta)
|
||||
|
||||
|
||||
def parse_opt(known=False):
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--model', type=str, default='yolov5s-cls.pt', help='initial weights path')
|
||||
parser.add_argument('--data', type=str, default='imagenette160', help='cifar10, cifar100, mnist, imagenet, ...')
|
||||
parser.add_argument('--epochs', type=int, default=10, help='total training epochs')
|
||||
parser.add_argument('--batch-size', type=int, default=64, help='total batch size for all GPUs')
|
||||
parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=224, help='train, val image size (pixels)')
|
||||
parser.add_argument('--nosave', action='store_true', help='only save final checkpoint')
|
||||
parser.add_argument('--cache', type=str, nargs='?', const='ram', help='--cache images in "ram" (default) or "disk"')
|
||||
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
|
||||
parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)')
|
||||
parser.add_argument('--project', default=ROOT / 'runs/train-cls', help='save to project/name')
|
||||
parser.add_argument('--name', default='exp', help='save to project/name')
|
||||
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
|
||||
parser.add_argument('--pretrained', nargs='?', const=True, default=True, help='start from i.e. --pretrained False')
|
||||
parser.add_argument('--optimizer', choices=['SGD', 'Adam', 'AdamW', 'RMSProp'], default='Adam', help='optimizer')
|
||||
parser.add_argument('--lr0', type=float, default=0.001, help='initial learning rate')
|
||||
parser.add_argument('--decay', type=float, default=5e-5, help='weight decay')
|
||||
parser.add_argument('--label-smoothing', type=float, default=0.1, help='Label smoothing epsilon')
|
||||
parser.add_argument('--cutoff', type=int, default=None, help='Model layer cutoff index for Classify() head')
|
||||
parser.add_argument('--dropout', type=float, default=None, help='Dropout (fraction)')
|
||||
parser.add_argument('--verbose', action='store_true', help='Verbose mode')
|
||||
parser.add_argument('--seed', type=int, default=0, help='Global training seed')
|
||||
parser.add_argument('--local_rank', type=int, default=-1, help='Automatic DDP Multi-GPU argument, do not modify')
|
||||
return parser.parse_known_args()[0] if known else parser.parse_args()
|
||||
|
||||
|
||||
def main(opt):
|
||||
# Checks
|
||||
if RANK in {-1, 0}:
|
||||
print_args(vars(opt))
|
||||
check_git_status()
|
||||
check_requirements(ROOT / 'requirements.txt')
|
||||
|
||||
# DDP mode
|
||||
device = select_device(opt.device, batch_size=opt.batch_size)
|
||||
if LOCAL_RANK != -1:
|
||||
assert opt.batch_size != -1, 'AutoBatch is coming soon for classification, please pass a valid --batch-size'
|
||||
assert opt.batch_size % WORLD_SIZE == 0, f'--batch-size {opt.batch_size} must be multiple of WORLD_SIZE'
|
||||
assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command'
|
||||
torch.cuda.set_device(LOCAL_RANK)
|
||||
device = torch.device('cuda', LOCAL_RANK)
|
||||
dist.init_process_group(backend='nccl' if dist.is_nccl_available() else 'gloo')
|
||||
|
||||
# Parameters
|
||||
opt.save_dir = increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok) # increment run
|
||||
|
||||
# Train
|
||||
train(opt, device)
|
||||
|
||||
|
||||
def run(**kwargs):
|
||||
# Usage: from yolov5 import classify; classify.train.run(data=mnist, imgsz=320, model='yolov5m')
|
||||
opt = parse_opt(True)
|
||||
for k, v in kwargs.items():
|
||||
setattr(opt, k, v)
|
||||
main(opt)
|
||||
return opt
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
opt = parse_opt()
|
||||
main(opt)
|
1481
ayf_ocr/yolo_databases/api/classify/tutorial.ipynb
Normal file
170
ayf_ocr/yolo_databases/api/classify/val.py
Normal file
@ -0,0 +1,170 @@
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
"""
|
||||
Validate a trained YOLOv5 classification model on a classification dataset
|
||||
|
||||
Usage:
|
||||
$ bash data/scripts/get_imagenet.sh --val # download ImageNet val split (6.3G, 50000 images)
|
||||
$ python classify/val.py --weights yolov5m-cls.pt --data ../datasets/imagenet --img 224 # validate ImageNet
|
||||
|
||||
Usage - formats:
|
||||
$ python classify/val.py --weights yolov5s-cls.pt # PyTorch
|
||||
yolov5s-cls.torchscript # TorchScript
|
||||
yolov5s-cls.onnx # ONNX Runtime or OpenCV DNN with --dnn
|
||||
yolov5s-cls_openvino_model # OpenVINO
|
||||
yolov5s-cls.engine # TensorRT
|
||||
yolov5s-cls.mlmodel # CoreML (macOS-only)
|
||||
yolov5s-cls_saved_model # TensorFlow SavedModel
|
||||
yolov5s-cls.pb # TensorFlow GraphDef
|
||||
yolov5s-cls.tflite # TensorFlow Lite
|
||||
yolov5s-cls_edgetpu.tflite # TensorFlow Edge TPU
|
||||
yolov5s-cls_paddle_model # PaddlePaddle
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import os
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
import torch
|
||||
from tqdm import tqdm
|
||||
|
||||
FILE = Path(__file__).resolve()
|
||||
ROOT = FILE.parents[1] # YOLOv5 root directory
|
||||
if str(ROOT) not in sys.path:
|
||||
sys.path.append(str(ROOT)) # add ROOT to PATH
|
||||
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
|
||||
|
||||
from models.common import DetectMultiBackend
|
||||
from utils.dataloaders import create_classification_dataloader
|
||||
from utils.general import (LOGGER, TQDM_BAR_FORMAT, Profile, check_img_size, check_requirements, colorstr,
|
||||
increment_path, print_args)
|
||||
from utils.torch_utils import select_device, smart_inference_mode
|
||||
|
||||
|
||||
@smart_inference_mode()
|
||||
def run(
|
||||
data=ROOT / '../datasets/mnist', # dataset dir
|
||||
weights=ROOT / 'yolov5s-cls.pt', # model.pt path(s)
|
||||
batch_size=128, # batch size
|
||||
imgsz=224, # inference size (pixels)
|
||||
device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
|
||||
workers=8, # max dataloader workers (per RANK in DDP mode)
|
||||
verbose=False, # verbose output
|
||||
project=ROOT / 'runs/val-cls', # save to project/name
|
||||
name='exp', # save to project/name
|
||||
exist_ok=False, # existing project/name ok, do not increment
|
||||
half=False, # use FP16 half-precision inference
|
||||
dnn=False, # use OpenCV DNN for ONNX inference
|
||||
model=None,
|
||||
dataloader=None,
|
||||
criterion=None,
|
||||
pbar=None,
|
||||
):
|
||||
# Initialize/load model and set device
|
||||
training = model is not None
|
||||
if training: # called by train.py
|
||||
device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model
|
||||
half &= device.type != 'cpu' # half precision only supported on CUDA
|
||||
model.half() if half else model.float()
|
||||
else: # called directly
|
||||
device = select_device(device, batch_size=batch_size)
|
||||
|
||||
# Directories
|
||||
save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run
|
||||
save_dir.mkdir(parents=True, exist_ok=True) # make dir
|
||||
|
||||
# Load model
|
||||
model = DetectMultiBackend(weights, device=device, dnn=dnn, fp16=half)
|
||||
stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine
|
||||
imgsz = check_img_size(imgsz, s=stride) # check image size
|
||||
half = model.fp16 # FP16 supported on limited backends with CUDA
|
||||
if engine:
|
||||
batch_size = model.batch_size
|
||||
else:
|
||||
device = model.device
|
||||
if not (pt or jit):
|
||||
batch_size = 1 # export.py models default to batch-size 1
|
||||
LOGGER.info(f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models')
|
||||
|
||||
# Dataloader
|
||||
data = Path(data)
|
||||
test_dir = data / 'test' if (data / 'test').exists() else data / 'val' # data/test or data/val
|
||||
dataloader = create_classification_dataloader(path=test_dir,
|
||||
imgsz=imgsz,
|
||||
batch_size=batch_size,
|
||||
augment=False,
|
||||
rank=-1,
|
||||
workers=workers)
|
||||
|
||||
model.eval()
|
||||
pred, targets, loss, dt = [], [], 0, (Profile(), Profile(), Profile())
|
||||
n = len(dataloader) # number of batches
|
||||
action = 'validating' if dataloader.dataset.root.stem == 'val' else 'testing'
|
||||
desc = f'{pbar.desc[:-36]}{action:>36}' if pbar else f'{action}'
|
||||
bar = tqdm(dataloader, desc, n, not training, bar_format=TQDM_BAR_FORMAT, position=0)
|
||||
with torch.cuda.amp.autocast(enabled=device.type != 'cpu'):
|
||||
for images, labels in bar:
|
||||
with dt[0]:
|
||||
images, labels = images.to(device, non_blocking=True), labels.to(device)
|
||||
|
||||
with dt[1]:
|
||||
y = model(images)
|
||||
|
||||
with dt[2]:
|
||||
pred.append(y.argsort(1, descending=True)[:, :5])
|
||||
targets.append(labels)
|
||||
if criterion:
|
||||
loss += criterion(y, labels)
|
||||
|
||||
loss /= n
|
||||
pred, targets = torch.cat(pred), torch.cat(targets)
|
||||
correct = (targets[:, None] == pred).float()
|
||||
acc = torch.stack((correct[:, 0], correct.max(1).values), dim=1) # (top1, top5) accuracy
|
||||
top1, top5 = acc.mean(0).tolist()
|
||||
|
||||
if pbar:
|
||||
pbar.desc = f'{pbar.desc[:-36]}{loss:>12.3g}{top1:>12.3g}{top5:>12.3g}'
|
||||
if verbose: # all classes
|
||||
LOGGER.info(f"{'Class':>24}{'Images':>12}{'top1_acc':>12}{'top5_acc':>12}")
|
||||
LOGGER.info(f"{'all':>24}{targets.shape[0]:>12}{top1:>12.3g}{top5:>12.3g}")
|
||||
for i, c in model.names.items():
|
||||
acc_i = acc[targets == i]
|
||||
top1i, top5i = acc_i.mean(0).tolist()
|
||||
LOGGER.info(f'{c:>24}{acc_i.shape[0]:>12}{top1i:>12.3g}{top5i:>12.3g}')
|
||||
|
||||
# Print results
|
||||
t = tuple(x.t / len(dataloader.dataset.samples) * 1E3 for x in dt) # speeds per image
|
||||
shape = (1, 3, imgsz, imgsz)
|
||||
LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms post-process per image at shape {shape}' % t)
|
||||
LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}")
|
||||
|
||||
return top1, top5, loss
|
||||
|
||||
|
||||
def parse_opt():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--data', type=str, default=ROOT / '../datasets/mnist', help='dataset path')
|
||||
parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s-cls.pt', help='model.pt path(s)')
|
||||
parser.add_argument('--batch-size', type=int, default=128, help='batch size')
|
||||
parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=224, help='inference size (pixels)')
|
||||
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
|
||||
parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)')
|
||||
parser.add_argument('--verbose', nargs='?', const=True, default=True, help='verbose output')
|
||||
parser.add_argument('--project', default=ROOT / 'runs/val-cls', help='save to project/name')
|
||||
parser.add_argument('--name', default='exp', help='save to project/name')
|
||||
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
|
||||
parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
|
||||
parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference')
|
||||
opt = parser.parse_args()
|
||||
print_args(vars(opt))
|
||||
return opt
|
||||
|
||||
|
||||
def main(opt):
|
||||
check_requirements(ROOT / 'requirements.txt', exclude=('tensorboard', 'thop'))
|
||||
run(**vars(opt))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
opt = parse_opt()
|
||||
main(opt)
|
74
ayf_ocr/yolo_databases/api/data/Argoverse.yaml
Normal file
@ -0,0 +1,74 @@
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
# Argoverse-HD dataset (ring-front-center camera) http://www.cs.cmu.edu/~mengtial/proj/streaming/ by Argo AI
|
||||
# Example usage: python train.py --data Argoverse.yaml
|
||||
# parent
|
||||
# ├── yolov5
|
||||
# └── datasets
|
||||
# └── Argoverse ← downloads here (31.3 GB)
|
||||
|
||||
|
||||
# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
|
||||
path: ../datasets/Argoverse # dataset root dir
|
||||
train: Argoverse-1.1/images/train/ # train images (relative to 'path') 39384 images
|
||||
val: Argoverse-1.1/images/val/ # val images (relative to 'path') 15062 images
|
||||
test: Argoverse-1.1/images/test/ # test images (optional) https://eval.ai/web/challenges/challenge-page/800/overview
|
||||
|
||||
# Classes
|
||||
names:
|
||||
0: person
|
||||
1: bicycle
|
||||
2: car
|
||||
3: motorcycle
|
||||
4: bus
|
||||
5: truck
|
||||
6: traffic_light
|
||||
7: stop_sign
|
||||
|
||||
|
||||
# Download script/URL (optional) ---------------------------------------------------------------------------------------
|
||||
download: |
|
||||
import json
|
||||
|
||||
from tqdm import tqdm
|
||||
from utils.general import download, Path
|
||||
|
||||
|
||||
def argoverse2yolo(set):
|
||||
labels = {}
|
||||
a = json.load(open(set, "rb"))
|
||||
for annot in tqdm(a['annotations'], desc=f"Converting {set} to YOLOv5 format..."):
|
||||
img_id = annot['image_id']
|
||||
img_name = a['images'][img_id]['name']
|
||||
img_label_name = f'{img_name[:-3]}txt'
|
||||
|
||||
cls = annot['category_id'] # instance class id
|
||||
x_center, y_center, width, height = annot['bbox']
|
||||
x_center = (x_center + width / 2) / 1920.0 # offset and scale
|
||||
y_center = (y_center + height / 2) / 1200.0 # offset and scale
|
||||
width /= 1920.0 # scale
|
||||
height /= 1200.0 # scale
|
||||
|
||||
img_dir = set.parents[2] / 'Argoverse-1.1' / 'labels' / a['seq_dirs'][a['images'][annot['image_id']]['sid']]
|
||||
if not img_dir.exists():
|
||||
img_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
k = str(img_dir / img_label_name)
|
||||
if k not in labels:
|
||||
labels[k] = []
|
||||
labels[k].append(f"{cls} {x_center} {y_center} {width} {height}\n")
|
||||
|
||||
for k in labels:
|
||||
with open(k, "w") as f:
|
||||
f.writelines(labels[k])
|
||||
|
||||
|
||||
# Download
|
||||
dir = Path(yaml['path']) # dataset root dir
|
||||
urls = ['https://argoverse-hd.s3.us-east-2.amazonaws.com/Argoverse-HD-Full.zip']
|
||||
download(urls, dir=dir, delete=False)
|
||||
|
||||
# Convert
|
||||
annotations_dir = 'Argoverse-HD/annotations/'
|
||||
(dir / 'Argoverse-1.1' / 'tracking').rename(dir / 'Argoverse-1.1' / 'images') # rename 'tracking' to 'images'
|
||||
for d in "train.json", "val.json":
|
||||
argoverse2yolo(dir / annotations_dir / d) # convert VisDrone annotations to YOLO labels
|
54
ayf_ocr/yolo_databases/api/data/GlobalWheat2020.yaml
Normal file
@ -0,0 +1,54 @@
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
# Global Wheat 2020 dataset http://www.global-wheat.com/ by University of Saskatchewan
|
||||
# Example usage: python train.py --data GlobalWheat2020.yaml
|
||||
# parent
|
||||
# ├── yolov5
|
||||
# └── datasets
|
||||
# └── GlobalWheat2020 ← downloads here (7.0 GB)
|
||||
|
||||
|
||||
# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
|
||||
path: ../datasets/GlobalWheat2020 # dataset root dir
|
||||
train: # train images (relative to 'path') 3422 images
|
||||
- images/arvalis_1
|
||||
- images/arvalis_2
|
||||
- images/arvalis_3
|
||||
- images/ethz_1
|
||||
- images/rres_1
|
||||
- images/inrae_1
|
||||
- images/usask_1
|
||||
val: # val images (relative to 'path') 748 images (WARNING: train set contains ethz_1)
|
||||
- images/ethz_1
|
||||
test: # test images (optional) 1276 images
|
||||
- images/utokyo_1
|
||||
- images/utokyo_2
|
||||
- images/nau_1
|
||||
- images/uq_1
|
||||
|
||||
# Classes
|
||||
names:
|
||||
0: wheat_head
|
||||
|
||||
|
||||
# Download script/URL (optional) ---------------------------------------------------------------------------------------
|
||||
download: |
|
||||
from utils.general import download, Path
|
||||
|
||||
|
||||
# Download
|
||||
dir = Path(yaml['path']) # dataset root dir
|
||||
urls = ['https://zenodo.org/record/4298502/files/global-wheat-codalab-official.zip',
|
||||
'https://github.com/ultralytics/yolov5/releases/download/v1.0/GlobalWheat2020_labels.zip']
|
||||
download(urls, dir=dir)
|
||||
|
||||
# Make Directories
|
||||
for p in 'annotations', 'images', 'labels':
|
||||
(dir / p).mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Move
|
||||
for p in 'arvalis_1', 'arvalis_2', 'arvalis_3', 'ethz_1', 'rres_1', 'inrae_1', 'usask_1', \
|
||||
'utokyo_1', 'utokyo_2', 'nau_1', 'uq_1':
|
||||
(dir / p).rename(dir / 'images' / p) # move to /images
|
||||
f = (dir / p).with_suffix('.json') # json file
|
||||
if f.exists():
|
||||
f.rename((dir / 'annotations' / p).with_suffix('.json')) # move to /annotations
|
1022
ayf_ocr/yolo_databases/api/data/ImageNet.yaml
Normal file
438
ayf_ocr/yolo_databases/api/data/Objects365.yaml
Normal file
@ -0,0 +1,438 @@
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
# Objects365 dataset https://www.objects365.org/ by Megvii
|
||||
# Example usage: python train.py --data Objects365.yaml
|
||||
# parent
|
||||
# ├── yolov5
|
||||
# └── datasets
|
||||
# └── Objects365 ← downloads here (712 GB = 367G data + 345G zips)
|
||||
|
||||
|
||||
# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
|
||||
path: ../datasets/Objects365 # dataset root dir
|
||||
train: images/train # train images (relative to 'path') 1742289 images
|
||||
val: images/val # val images (relative to 'path') 80000 images
|
||||
test: # test images (optional)
|
||||
|
||||
# Classes
|
||||
names:
|
||||
0: Person
|
||||
1: Sneakers
|
||||
2: Chair
|
||||
3: Other Shoes
|
||||
4: Hat
|
||||
5: Car
|
||||
6: Lamp
|
||||
7: Glasses
|
||||
8: Bottle
|
||||
9: Desk
|
||||
10: Cup
|
||||
11: Street Lights
|
||||
12: Cabinet/shelf
|
||||
13: Handbag/Satchel
|
||||
14: Bracelet
|
||||
15: Plate
|
||||
16: Picture/Frame
|
||||
17: Helmet
|
||||
18: Book
|
||||
19: Gloves
|
||||
20: Storage box
|
||||
21: Boat
|
||||
22: Leather Shoes
|
||||
23: Flower
|
||||
24: Bench
|
||||
25: Potted Plant
|
||||
26: Bowl/Basin
|
||||
27: Flag
|
||||
28: Pillow
|
||||
29: Boots
|
||||
30: Vase
|
||||
31: Microphone
|
||||
32: Necklace
|
||||
33: Ring
|
||||
34: SUV
|
||||
35: Wine Glass
|
||||
36: Belt
|
||||
37: Monitor/TV
|
||||
38: Backpack
|
||||
39: Umbrella
|
||||
40: Traffic Light
|
||||
41: Speaker
|
||||
42: Watch
|
||||
43: Tie
|
||||
44: Trash bin Can
|
||||
45: Slippers
|
||||
46: Bicycle
|
||||
47: Stool
|
||||
48: Barrel/bucket
|
||||
49: Van
|
||||
50: Couch
|
||||
51: Sandals
|
||||
52: Basket
|
||||
53: Drum
|
||||
54: Pen/Pencil
|
||||
55: Bus
|
||||
56: Wild Bird
|
||||
57: High Heels
|
||||
58: Motorcycle
|
||||
59: Guitar
|
||||
60: Carpet
|
||||
61: Cell Phone
|
||||
62: Bread
|
||||
63: Camera
|
||||
64: Canned
|
||||
65: Truck
|
||||
66: Traffic cone
|
||||
67: Cymbal
|
||||
68: Lifesaver
|
||||
69: Towel
|
||||
70: Stuffed Toy
|
||||
71: Candle
|
||||
72: Sailboat
|
||||
73: Laptop
|
||||
74: Awning
|
||||
75: Bed
|
||||
76: Faucet
|
||||
77: Tent
|
||||
78: Horse
|
||||
79: Mirror
|
||||
80: Power outlet
|
||||
81: Sink
|
||||
82: Apple
|
||||
83: Air Conditioner
|
||||
84: Knife
|
||||
85: Hockey Stick
|
||||
86: Paddle
|
||||
87: Pickup Truck
|
||||
88: Fork
|
||||
89: Traffic Sign
|
||||
90: Balloon
|
||||
91: Tripod
|
||||
92: Dog
|
||||
93: Spoon
|
||||
94: Clock
|
||||
95: Pot
|
||||
96: Cow
|
||||
97: Cake
|
||||
98: Dinning Table
|
||||
99: Sheep
|
||||
100: Hanger
|
||||
101: Blackboard/Whiteboard
|
||||
102: Napkin
|
||||
103: Other Fish
|
||||
104: Orange/Tangerine
|
||||
105: Toiletry
|
||||
106: Keyboard
|
||||
107: Tomato
|
||||
108: Lantern
|
||||
109: Machinery Vehicle
|
||||
110: Fan
|
||||
111: Green Vegetables
|
||||
112: Banana
|
||||
113: Baseball Glove
|
||||
114: Airplane
|
||||
115: Mouse
|
||||
116: Train
|
||||
117: Pumpkin
|
||||
118: Soccer
|
||||
119: Skiboard
|
||||
120: Luggage
|
||||
121: Nightstand
|
||||
122: Tea pot
|
||||
123: Telephone
|
||||
124: Trolley
|
||||
125: Head Phone
|
||||
126: Sports Car
|
||||
127: Stop Sign
|
||||
128: Dessert
|
||||
129: Scooter
|
||||
130: Stroller
|
||||
131: Crane
|
||||
132: Remote
|
||||
133: Refrigerator
|
||||
134: Oven
|
||||
135: Lemon
|
||||
136: Duck
|
||||
137: Baseball Bat
|
||||
138: Surveillance Camera
|
||||
139: Cat
|
||||
140: Jug
|
||||
141: Broccoli
|
||||
142: Piano
|
||||
143: Pizza
|
||||
144: Elephant
|
||||
145: Skateboard
|
||||
146: Surfboard
|
||||
147: Gun
|
||||
148: Skating and Skiing shoes
|
||||
149: Gas stove
|
||||
150: Donut
|
||||
151: Bow Tie
|
||||
152: Carrot
|
||||
153: Toilet
|
||||
154: Kite
|
||||
155: Strawberry
|
||||
156: Other Balls
|
||||
157: Shovel
|
||||
158: Pepper
|
||||
159: Computer Box
|
||||
160: Toilet Paper
|
||||
161: Cleaning Products
|
||||
162: Chopsticks
|
||||
163: Microwave
|
||||
164: Pigeon
|
||||
165: Baseball
|
||||
166: Cutting/chopping Board
|
||||
167: Coffee Table
|
||||
168: Side Table
|
||||
169: Scissors
|
||||
170: Marker
|
||||
171: Pie
|
||||
172: Ladder
|
||||
173: Snowboard
|
||||
174: Cookies
|
||||
175: Radiator
|
||||
176: Fire Hydrant
|
||||
177: Basketball
|
||||
178: Zebra
|
||||
179: Grape
|
||||
180: Giraffe
|
||||
181: Potato
|
||||
182: Sausage
|
||||
183: Tricycle
|
||||
184: Violin
|
||||
185: Egg
|
||||
186: Fire Extinguisher
|
||||
187: Candy
|
||||
188: Fire Truck
|
||||
189: Billiards
|
||||
190: Converter
|
||||
191: Bathtub
|
||||
192: Wheelchair
|
||||
193: Golf Club
|
||||
194: Briefcase
|
||||
195: Cucumber
|
||||
196: Cigar/Cigarette
|
||||
197: Paint Brush
|
||||
198: Pear
|
||||
199: Heavy Truck
|
||||
200: Hamburger
|
||||
201: Extractor
|
||||
202: Extension Cord
|
||||
203: Tong
|
||||
204: Tennis Racket
|
||||
205: Folder
|
||||
206: American Football
|
||||
207: earphone
|
||||
208: Mask
|
||||
209: Kettle
|
||||
210: Tennis
|
||||
211: Ship
|
||||
212: Swing
|
||||
213: Coffee Machine
|
||||
214: Slide
|
||||
215: Carriage
|
||||
216: Onion
|
||||
217: Green beans
|
||||
218: Projector
|
||||
219: Frisbee
|
||||
220: Washing Machine/Drying Machine
|
||||
221: Chicken
|
||||
222: Printer
|
||||
223: Watermelon
|
||||
224: Saxophone
|
||||
225: Tissue
|
||||
226: Toothbrush
|
||||
227: Ice cream
|
||||
228: Hot-air balloon
|
||||
229: Cello
|
||||
230: French Fries
|
||||
231: Scale
|
||||
232: Trophy
|
||||
233: Cabbage
|
||||
234: Hot dog
|
||||
235: Blender
|
||||
236: Peach
|
||||
237: Rice
|
||||
238: Wallet/Purse
|
||||
239: Volleyball
|
||||
240: Deer
|
||||
241: Goose
|
||||
242: Tape
|
||||
243: Tablet
|
||||
244: Cosmetics
|
||||
245: Trumpet
|
||||
246: Pineapple
|
||||
247: Golf Ball
|
||||
248: Ambulance
|
||||
249: Parking meter
|
||||
250: Mango
|
||||
251: Key
|
||||
252: Hurdle
|
||||
253: Fishing Rod
|
||||
254: Medal
|
||||
255: Flute
|
||||
256: Brush
|
||||
257: Penguin
|
||||
258: Megaphone
|
||||
259: Corn
|
||||
260: Lettuce
|
||||
261: Garlic
|
||||
262: Swan
|
||||
263: Helicopter
|
||||
264: Green Onion
|
||||
265: Sandwich
|
||||
266: Nuts
|
||||
267: Speed Limit Sign
|
||||
268: Induction Cooker
|
||||
269: Broom
|
||||
270: Trombone
|
||||
271: Plum
|
||||
272: Rickshaw
|
||||
273: Goldfish
|
||||
274: Kiwi fruit
|
||||
275: Router/modem
|
||||
276: Poker Card
|
||||
277: Toaster
|
||||
278: Shrimp
|
||||
279: Sushi
|
||||
280: Cheese
|
||||
281: Notepaper
|
||||
282: Cherry
|
||||
283: Pliers
|
||||
284: CD
|
||||
285: Pasta
|
||||
286: Hammer
|
||||
287: Cue
|
||||
288: Avocado
|
||||
289: Hamimelon
|
||||
290: Flask
|
||||
291: Mushroom
|
||||
292: Screwdriver
|
||||
293: Soap
|
||||
294: Recorder
|
||||
295: Bear
|
||||
296: Eggplant
|
||||
297: Board Eraser
|
||||
298: Coconut
|
||||
299: Tape Measure/Ruler
|
||||
300: Pig
|
||||
301: Showerhead
|
||||
302: Globe
|
||||
303: Chips
|
||||
304: Steak
|
||||
305: Crosswalk Sign
|
||||
306: Stapler
|
||||
307: Camel
|
||||
308: Formula 1
|
||||
309: Pomegranate
|
||||
310: Dishwasher
|
||||
311: Crab
|
||||
312: Hoverboard
|
||||
313: Meat ball
|
||||
314: Rice Cooker
|
||||
315: Tuba
|
||||
316: Calculator
|
||||
317: Papaya
|
||||
318: Antelope
|
||||
319: Parrot
|
||||
320: Seal
|
||||
321: Butterfly
|
||||
322: Dumbbell
|
||||
323: Donkey
|
||||
324: Lion
|
||||
325: Urinal
|
||||
326: Dolphin
|
||||
327: Electric Drill
|
||||
328: Hair Dryer
|
||||
329: Egg tart
|
||||
330: Jellyfish
|
||||
331: Treadmill
|
||||
332: Lighter
|
||||
333: Grapefruit
|
||||
334: Game board
|
||||
335: Mop
|
||||
336: Radish
|
||||
337: Baozi
|
||||
338: Target
|
||||
339: French
|
||||
340: Spring Rolls
|
||||
341: Monkey
|
||||
342: Rabbit
|
||||
343: Pencil Case
|
||||
344: Yak
|
||||
345: Red Cabbage
|
||||
346: Binoculars
|
||||
347: Asparagus
|
||||
348: Barbell
|
||||
349: Scallop
|
||||
350: Noddles
|
||||
351: Comb
|
||||
352: Dumpling
|
||||
353: Oyster
|
||||
354: Table Tennis paddle
|
||||
355: Cosmetics Brush/Eyeliner Pencil
|
||||
356: Chainsaw
|
||||
357: Eraser
|
||||
358: Lobster
|
||||
359: Durian
|
||||
360: Okra
|
||||
361: Lipstick
|
||||
362: Cosmetics Mirror
|
||||
363: Curling
|
||||
364: Table Tennis
|
||||
|
||||
|
||||
# Download script/URL (optional) ---------------------------------------------------------------------------------------
|
||||
download: |
|
||||
from tqdm import tqdm
|
||||
|
||||
from utils.general import Path, check_requirements, download, np, xyxy2xywhn
|
||||
|
||||
check_requirements('pycocotools>=2.0')
|
||||
from pycocotools.coco import COCO
|
||||
|
||||
# Make Directories
|
||||
dir = Path(yaml['path']) # dataset root dir
|
||||
for p in 'images', 'labels':
|
||||
(dir / p).mkdir(parents=True, exist_ok=True)
|
||||
for q in 'train', 'val':
|
||||
(dir / p / q).mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Train, Val Splits
|
||||
for split, patches in [('train', 50 + 1), ('val', 43 + 1)]:
|
||||
print(f"Processing {split} in {patches} patches ...")
|
||||
images, labels = dir / 'images' / split, dir / 'labels' / split
|
||||
|
||||
# Download
|
||||
url = f"https://dorc.ks3-cn-beijing.ksyun.com/data-set/2020Objects365%E6%95%B0%E6%8D%AE%E9%9B%86/{split}/"
|
||||
if split == 'train':
|
||||
download([f'{url}zhiyuan_objv2_{split}.tar.gz'], dir=dir, delete=False) # annotations json
|
||||
download([f'{url}patch{i}.tar.gz' for i in range(patches)], dir=images, curl=True, delete=False, threads=8)
|
||||
elif split == 'val':
|
||||
download([f'{url}zhiyuan_objv2_{split}.json'], dir=dir, delete=False) # annotations json
|
||||
download([f'{url}images/v1/patch{i}.tar.gz' for i in range(15 + 1)], dir=images, curl=True, delete=False, threads=8)
|
||||
download([f'{url}images/v2/patch{i}.tar.gz' for i in range(16, patches)], dir=images, curl=True, delete=False, threads=8)
|
||||
|
||||
# Move
|
||||
for f in tqdm(images.rglob('*.jpg'), desc=f'Moving {split} images'):
|
||||
f.rename(images / f.name) # move to /images/{split}
|
||||
|
||||
# Labels
|
||||
coco = COCO(dir / f'zhiyuan_objv2_{split}.json')
|
||||
names = [x["name"] for x in coco.loadCats(coco.getCatIds())]
|
||||
for cid, cat in enumerate(names):
|
||||
catIds = coco.getCatIds(catNms=[cat])
|
||||
imgIds = coco.getImgIds(catIds=catIds)
|
||||
for im in tqdm(coco.loadImgs(imgIds), desc=f'Class {cid + 1}/{len(names)} {cat}'):
|
||||
width, height = im["width"], im["height"]
|
||||
path = Path(im["file_name"]) # image filename
|
||||
try:
|
||||
with open(labels / path.with_suffix('.txt').name, 'a') as file:
|
||||
annIds = coco.getAnnIds(imgIds=im["id"], catIds=catIds, iscrowd=None)
|
||||
for a in coco.loadAnns(annIds):
|
||||
x, y, w, h = a['bbox'] # bounding box in xywh (xy top-left corner)
|
||||
xyxy = np.array([x, y, x + w, y + h])[None] # pixels(1,4)
|
||||
x, y, w, h = xyxy2xywhn(xyxy, w=width, h=height, clip=True)[0] # normalized and clipped
|
||||
file.write(f"{cid} {x:.5f} {y:.5f} {w:.5f} {h:.5f}\n")
|
||||
except Exception as e:
|
||||
print(e)
|
53
ayf_ocr/yolo_databases/api/data/SKU-110K.yaml
Normal file
@ -0,0 +1,53 @@
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
# SKU-110K retail items dataset https://github.com/eg4000/SKU110K_CVPR19 by Trax Retail
|
||||
# Example usage: python train.py --data SKU-110K.yaml
|
||||
# parent
|
||||
# ├── yolov5
|
||||
# └── datasets
|
||||
# └── SKU-110K ← downloads here (13.6 GB)
|
||||
|
||||
|
||||
# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
|
||||
path: ../datasets/SKU-110K # dataset root dir
|
||||
train: train.txt # train images (relative to 'path') 8219 images
|
||||
val: val.txt # val images (relative to 'path') 588 images
|
||||
test: test.txt # test images (optional) 2936 images
|
||||
|
||||
# Classes
|
||||
names:
|
||||
0: object
|
||||
|
||||
|
||||
# Download script/URL (optional) ---------------------------------------------------------------------------------------
|
||||
download: |
|
||||
import shutil
|
||||
from tqdm import tqdm
|
||||
from utils.general import np, pd, Path, download, xyxy2xywh
|
||||
|
||||
|
||||
# Download
|
||||
dir = Path(yaml['path']) # dataset root dir
|
||||
parent = Path(dir.parent) # download dir
|
||||
urls = ['http://trax-geometry.s3.amazonaws.com/cvpr_challenge/SKU110K_fixed.tar.gz']
|
||||
download(urls, dir=parent, delete=False)
|
||||
|
||||
# Rename directories
|
||||
if dir.exists():
|
||||
shutil.rmtree(dir)
|
||||
(parent / 'SKU110K_fixed').rename(dir) # rename dir
|
||||
(dir / 'labels').mkdir(parents=True, exist_ok=True) # create labels dir
|
||||
|
||||
# Convert labels
|
||||
names = 'image', 'x1', 'y1', 'x2', 'y2', 'class', 'image_width', 'image_height' # column names
|
||||
for d in 'annotations_train.csv', 'annotations_val.csv', 'annotations_test.csv':
|
||||
x = pd.read_csv(dir / 'annotations' / d, names=names).values # annotations
|
||||
images, unique_images = x[:, 0], np.unique(x[:, 0])
|
||||
with open((dir / d).with_suffix('.txt').__str__().replace('annotations_', ''), 'w') as f:
|
||||
f.writelines(f'./images/{s}\n' for s in unique_images)
|
||||
for im in tqdm(unique_images, desc=f'Converting {dir / d}'):
|
||||
cls = 0 # single-class dataset
|
||||
with open((dir / 'labels' / im).with_suffix('.txt'), 'a') as f:
|
||||
for r in x[images == im]:
|
||||
w, h = r[6], r[7] # image width, height
|
||||
xywh = xyxy2xywh(np.array([[r[1] / w, r[2] / h, r[3] / w, r[4] / h]]))[0] # instance
|
||||
f.write(f"{cls} {xywh[0]:.5f} {xywh[1]:.5f} {xywh[2]:.5f} {xywh[3]:.5f}\n") # write label
|
100
ayf_ocr/yolo_databases/api/data/VOC.yaml
Normal file
@ -0,0 +1,100 @@
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
# PASCAL VOC dataset http://host.robots.ox.ac.uk/pascal/VOC by University of Oxford
|
||||
# Example usage: python train.py --data VOC.yaml
|
||||
# parent
|
||||
# ├── yolov5
|
||||
# └── datasets
|
||||
# └── VOC ← downloads here (2.8 GB)
|
||||
|
||||
|
||||
# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
|
||||
path: ../datasets/VOC
|
||||
train: # train images (relative to 'path') 16551 images
|
||||
- images/train2012
|
||||
- images/train2007
|
||||
- images/val2012
|
||||
- images/val2007
|
||||
val: # val images (relative to 'path') 4952 images
|
||||
- images/test2007
|
||||
test: # test images (optional)
|
||||
- images/test2007
|
||||
|
||||
# Classes
|
||||
names:
|
||||
0: aeroplane
|
||||
1: bicycle
|
||||
2: bird
|
||||
3: boat
|
||||
4: bottle
|
||||
5: bus
|
||||
6: car
|
||||
7: cat
|
||||
8: chair
|
||||
9: cow
|
||||
10: diningtable
|
||||
11: dog
|
||||
12: horse
|
||||
13: motorbike
|
||||
14: person
|
||||
15: pottedplant
|
||||
16: sheep
|
||||
17: sofa
|
||||
18: train
|
||||
19: tvmonitor
|
||||
|
||||
|
||||
# Download script/URL (optional) ---------------------------------------------------------------------------------------
|
||||
download: |
|
||||
import xml.etree.ElementTree as ET
|
||||
|
||||
from tqdm import tqdm
|
||||
from utils.general import download, Path
|
||||
|
||||
|
||||
def convert_label(path, lb_path, year, image_id):
|
||||
def convert_box(size, box):
|
||||
dw, dh = 1. / size[0], 1. / size[1]
|
||||
x, y, w, h = (box[0] + box[1]) / 2.0 - 1, (box[2] + box[3]) / 2.0 - 1, box[1] - box[0], box[3] - box[2]
|
||||
return x * dw, y * dh, w * dw, h * dh
|
||||
|
||||
in_file = open(path / f'VOC{year}/Annotations/{image_id}.xml')
|
||||
out_file = open(lb_path, 'w')
|
||||
tree = ET.parse(in_file)
|
||||
root = tree.getroot()
|
||||
size = root.find('size')
|
||||
w = int(size.find('width').text)
|
||||
h = int(size.find('height').text)
|
||||
|
||||
names = list(yaml['names'].values()) # names list
|
||||
for obj in root.iter('object'):
|
||||
cls = obj.find('name').text
|
||||
if cls in names and int(obj.find('difficult').text) != 1:
|
||||
xmlbox = obj.find('bndbox')
|
||||
bb = convert_box((w, h), [float(xmlbox.find(x).text) for x in ('xmin', 'xmax', 'ymin', 'ymax')])
|
||||
cls_id = names.index(cls) # class id
|
||||
out_file.write(" ".join([str(a) for a in (cls_id, *bb)]) + '\n')
|
||||
|
||||
|
||||
# Download
|
||||
dir = Path(yaml['path']) # dataset root dir
|
||||
url = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/'
|
||||
urls = [f'{url}VOCtrainval_06-Nov-2007.zip', # 446MB, 5012 images
|
||||
f'{url}VOCtest_06-Nov-2007.zip', # 438MB, 4953 images
|
||||
f'{url}VOCtrainval_11-May-2012.zip'] # 1.95GB, 17126 images
|
||||
download(urls, dir=dir / 'images', delete=False, curl=True, threads=3)
|
||||
|
||||
# Convert
|
||||
path = dir / 'images/VOCdevkit'
|
||||
for year, image_set in ('2012', 'train'), ('2012', 'val'), ('2007', 'train'), ('2007', 'val'), ('2007', 'test'):
|
||||
imgs_path = dir / 'images' / f'{image_set}{year}'
|
||||
lbs_path = dir / 'labels' / f'{image_set}{year}'
|
||||
imgs_path.mkdir(exist_ok=True, parents=True)
|
||||
lbs_path.mkdir(exist_ok=True, parents=True)
|
||||
|
||||
with open(path / f'VOC{year}/ImageSets/Main/{image_set}.txt') as f:
|
||||
image_ids = f.read().strip().split()
|
||||
for id in tqdm(image_ids, desc=f'{image_set}{year}'):
|
||||
f = path / f'VOC{year}/JPEGImages/{id}.jpg' # old img path
|
||||
lb_path = (lbs_path / f.name).with_suffix('.txt') # new label path
|
||||
f.rename(imgs_path / f.name) # move image
|
||||
convert_label(path, lb_path, year, id) # convert labels to YOLO format
|
70
ayf_ocr/yolo_databases/api/data/VisDrone.yaml
Normal file
@ -0,0 +1,70 @@
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
# VisDrone2019-DET dataset https://github.com/VisDrone/VisDrone-Dataset by Tianjin University
|
||||
# Example usage: python train.py --data VisDrone.yaml
|
||||
# parent
|
||||
# ├── yolov5
|
||||
# └── datasets
|
||||
# └── VisDrone ← downloads here (2.3 GB)
|
||||
|
||||
|
||||
# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
|
||||
path: ../datasets/VisDrone # dataset root dir
|
||||
train: VisDrone2019-DET-train/images # train images (relative to 'path') 6471 images
|
||||
val: VisDrone2019-DET-val/images # val images (relative to 'path') 548 images
|
||||
test: VisDrone2019-DET-test-dev/images # test images (optional) 1610 images
|
||||
|
||||
# Classes
|
||||
names:
|
||||
0: pedestrian
|
||||
1: people
|
||||
2: bicycle
|
||||
3: car
|
||||
4: van
|
||||
5: truck
|
||||
6: tricycle
|
||||
7: awning-tricycle
|
||||
8: bus
|
||||
9: motor
|
||||
|
||||
|
||||
# Download script/URL (optional) ---------------------------------------------------------------------------------------
|
||||
download: |
|
||||
from utils.general import download, os, Path
|
||||
|
||||
def visdrone2yolo(dir):
|
||||
from PIL import Image
|
||||
from tqdm import tqdm
|
||||
|
||||
def convert_box(size, box):
|
||||
# Convert VisDrone box to YOLO xywh box
|
||||
dw = 1. / size[0]
|
||||
dh = 1. / size[1]
|
||||
return (box[0] + box[2] / 2) * dw, (box[1] + box[3] / 2) * dh, box[2] * dw, box[3] * dh
|
||||
|
||||
(dir / 'labels').mkdir(parents=True, exist_ok=True) # make labels directory
|
||||
pbar = tqdm((dir / 'annotations').glob('*.txt'), desc=f'Converting {dir}')
|
||||
for f in pbar:
|
||||
img_size = Image.open((dir / 'images' / f.name).with_suffix('.jpg')).size
|
||||
lines = []
|
||||
with open(f, 'r') as file: # read annotation.txt
|
||||
for row in [x.split(',') for x in file.read().strip().splitlines()]:
|
||||
if row[4] == '0': # VisDrone 'ignored regions' class 0
|
||||
continue
|
||||
cls = int(row[5]) - 1
|
||||
box = convert_box(img_size, tuple(map(int, row[:4])))
|
||||
lines.append(f"{cls} {' '.join(f'{x:.6f}' for x in box)}\n")
|
||||
with open(str(f).replace(os.sep + 'annotations' + os.sep, os.sep + 'labels' + os.sep), 'w') as fl:
|
||||
fl.writelines(lines) # write label.txt
|
||||
|
||||
|
||||
# Download
|
||||
dir = Path(yaml['path']) # dataset root dir
|
||||
urls = ['https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-train.zip',
|
||||
'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-val.zip',
|
||||
'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-test-dev.zip',
|
||||
'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-test-challenge.zip']
|
||||
download(urls, dir=dir, curl=True, threads=4)
|
||||
|
||||
# Convert
|
||||
for d in 'VisDrone2019-DET-train', 'VisDrone2019-DET-val', 'VisDrone2019-DET-test-dev':
|
||||
visdrone2yolo(dir / d) # convert VisDrone annotations to YOLO labels
|
116
ayf_ocr/yolo_databases/api/data/coco.yaml
Normal file
@ -0,0 +1,116 @@
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
# COCO 2017 dataset http://cocodataset.org by Microsoft
|
||||
# Example usage: python train.py --data coco.yaml
|
||||
# parent
|
||||
# ├── yolov5
|
||||
# └── datasets
|
||||
# └── coco ← downloads here (20.1 GB)
|
||||
|
||||
|
||||
# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
|
||||
path: ../datasets/coco # dataset root dir
|
||||
train: train2017.txt # train images (relative to 'path') 118287 images
|
||||
val: val2017.txt # val images (relative to 'path') 5000 images
|
||||
test: test-dev2017.txt # 20288 of 40670 images, submit to https://competitions.codalab.org/competitions/20794
|
||||
|
||||
# Classes
|
||||
names:
|
||||
0: person
|
||||
1: bicycle
|
||||
2: car
|
||||
3: motorcycle
|
||||
4: airplane
|
||||
5: bus
|
||||
6: train
|
||||
7: truck
|
||||
8: boat
|
||||
9: traffic light
|
||||
10: fire hydrant
|
||||
11: stop sign
|
||||
12: parking meter
|
||||
13: bench
|
||||
14: bird
|
||||
15: cat
|
||||
16: dog
|
||||
17: horse
|
||||
18: sheep
|
||||
19: cow
|
||||
20: elephant
|
||||
21: bear
|
||||
22: zebra
|
||||
23: giraffe
|
||||
24: backpack
|
||||
25: umbrella
|
||||
26: handbag
|
||||
27: tie
|
||||
28: suitcase
|
||||
29: frisbee
|
||||
30: skis
|
||||
31: snowboard
|
||||
32: sports ball
|
||||
33: kite
|
||||
34: baseball bat
|
||||
35: baseball glove
|
||||
36: skateboard
|
||||
37: surfboard
|
||||
38: tennis racket
|
||||
39: bottle
|
||||
40: wine glass
|
||||
41: cup
|
||||
42: fork
|
||||
43: knife
|
||||
44: spoon
|
||||
45: bowl
|
||||
46: banana
|
||||
47: apple
|
||||
48: sandwich
|
||||
49: orange
|
||||
50: broccoli
|
||||
51: carrot
|
||||
52: hot dog
|
||||
53: pizza
|
||||
54: donut
|
||||
55: cake
|
||||
56: chair
|
||||
57: couch
|
||||
58: potted plant
|
||||
59: bed
|
||||
60: dining table
|
||||
61: toilet
|
||||
62: tv
|
||||
63: laptop
|
||||
64: mouse
|
||||
65: remote
|
||||
66: keyboard
|
||||
67: cell phone
|
||||
68: microwave
|
||||
69: oven
|
||||
70: toaster
|
||||
71: sink
|
||||
72: refrigerator
|
||||
73: book
|
||||
74: clock
|
||||
75: vase
|
||||
76: scissors
|
||||
77: teddy bear
|
||||
78: hair drier
|
||||
79: toothbrush
|
||||
|
||||
|
||||
# Download script/URL (optional)
|
||||
download: |
|
||||
from utils.general import download, Path
|
||||
|
||||
|
||||
# Download labels
|
||||
segments = False # segment or box labels
|
||||
dir = Path(yaml['path']) # dataset root dir
|
||||
url = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/'
|
||||
urls = [url + ('coco2017labels-segments.zip' if segments else 'coco2017labels.zip')] # labels
|
||||
download(urls, dir=dir.parent)
|
||||
|
||||
# Download data
|
||||
urls = ['http://images.cocodataset.org/zips/train2017.zip', # 19G, 118k images
|
||||
'http://images.cocodataset.org/zips/val2017.zip', # 1G, 5k images
|
||||
'http://images.cocodataset.org/zips/test2017.zip'] # 7G, 41k images (optional)
|
||||
download(urls, dir=dir / 'images', threads=3)
|
101
ayf_ocr/yolo_databases/api/data/coco128-seg.yaml
Normal file
@ -0,0 +1,101 @@
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
# COCO128-seg dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) by Ultralytics
|
||||
# Example usage: python train.py --data coco128.yaml
|
||||
# parent
|
||||
# ├── yolov5
|
||||
# └── datasets
|
||||
# └── coco128-seg ← downloads here (7 MB)
|
||||
|
||||
|
||||
# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
|
||||
path: ../datasets/coco128-seg # dataset root dir
|
||||
train: images/train2017 # train images (relative to 'path') 128 images
|
||||
val: images/train2017 # val images (relative to 'path') 128 images
|
||||
test: # test images (optional)
|
||||
|
||||
# Classes
|
||||
names:
|
||||
0: person
|
||||
1: bicycle
|
||||
2: car
|
||||
3: motorcycle
|
||||
4: airplane
|
||||
5: bus
|
||||
6: train
|
||||
7: truck
|
||||
8: boat
|
||||
9: traffic light
|
||||
10: fire hydrant
|
||||
11: stop sign
|
||||
12: parking meter
|
||||
13: bench
|
||||
14: bird
|
||||
15: cat
|
||||
16: dog
|
||||
17: horse
|
||||
18: sheep
|
||||
19: cow
|
||||
20: elephant
|
||||
21: bear
|
||||
22: zebra
|
||||
23: giraffe
|
||||
24: backpack
|
||||
25: umbrella
|
||||
26: handbag
|
||||
27: tie
|
||||
28: suitcase
|
||||
29: frisbee
|
||||
30: skis
|
||||
31: snowboard
|
||||
32: sports ball
|
||||
33: kite
|
||||
34: baseball bat
|
||||
35: baseball glove
|
||||
36: skateboard
|
||||
37: surfboard
|
||||
38: tennis racket
|
||||
39: bottle
|
||||
40: wine glass
|
||||
41: cup
|
||||
42: fork
|
||||
43: knife
|
||||
44: spoon
|
||||
45: bowl
|
||||
46: banana
|
||||
47: apple
|
||||
48: sandwich
|
||||
49: orange
|
||||
50: broccoli
|
||||
51: carrot
|
||||
52: hot dog
|
||||
53: pizza
|
||||
54: donut
|
||||
55: cake
|
||||
56: chair
|
||||
57: couch
|
||||
58: potted plant
|
||||
59: bed
|
||||
60: dining table
|
||||
61: toilet
|
||||
62: tv
|
||||
63: laptop
|
||||
64: mouse
|
||||
65: remote
|
||||
66: keyboard
|
||||
67: cell phone
|
||||
68: microwave
|
||||
69: oven
|
||||
70: toaster
|
||||
71: sink
|
||||
72: refrigerator
|
||||
73: book
|
||||
74: clock
|
||||
75: vase
|
||||
76: scissors
|
||||
77: teddy bear
|
||||
78: hair drier
|
||||
79: toothbrush
|
||||
|
||||
|
||||
# Download script/URL (optional)
|
||||
download: https://ultralytics.com/assets/coco128-seg.zip
|
101
ayf_ocr/yolo_databases/api/data/coco128.yaml
Normal file
@ -0,0 +1,101 @@
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
# COCO128 dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) by Ultralytics
|
||||
# Example usage: python train.py --data coco128.yaml
|
||||
# parent
|
||||
# ├── yolov5
|
||||
# └── datasets
|
||||
# └── coco128 ← downloads here (7 MB)
|
||||
|
||||
|
||||
# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
|
||||
path: ../datasets/coco128 # dataset root dir
|
||||
train: images/train2017 # train images (relative to 'path') 128 images
|
||||
val: images/train2017 # val images (relative to 'path') 128 images
|
||||
test: # test images (optional)
|
||||
|
||||
# Classes
|
||||
names:
|
||||
0: person
|
||||
1: bicycle
|
||||
2: car
|
||||
3: motorcycle
|
||||
4: airplane
|
||||
5: bus
|
||||
6: train
|
||||
7: truck
|
||||
8: boat
|
||||
9: traffic light
|
||||
10: fire hydrant
|
||||
11: stop sign
|
||||
12: parking meter
|
||||
13: bench
|
||||
14: bird
|
||||
15: cat
|
||||
16: dog
|
||||
17: horse
|
||||
18: sheep
|
||||
19: cow
|
||||
20: elephant
|
||||
21: bear
|
||||
22: zebra
|
||||
23: giraffe
|
||||
24: backpack
|
||||
25: umbrella
|
||||
26: handbag
|
||||
27: tie
|
||||
28: suitcase
|
||||
29: frisbee
|
||||
30: skis
|
||||
31: snowboard
|
||||
32: sports ball
|
||||
33: kite
|
||||
34: baseball bat
|
||||
35: baseball glove
|
||||
36: skateboard
|
||||
37: surfboard
|
||||
38: tennis racket
|
||||
39: bottle
|
||||
40: wine glass
|
||||
41: cup
|
||||
42: fork
|
||||
43: knife
|
||||
44: spoon
|
||||
45: bowl
|
||||
46: banana
|
||||
47: apple
|
||||
48: sandwich
|
||||
49: orange
|
||||
50: broccoli
|
||||
51: carrot
|
||||
52: hot dog
|
||||
53: pizza
|
||||
54: donut
|
||||
55: cake
|
||||
56: chair
|
||||
57: couch
|
||||
58: potted plant
|
||||
59: bed
|
||||
60: dining table
|
||||
61: toilet
|
||||
62: tv
|
||||
63: laptop
|
||||
64: mouse
|
||||
65: remote
|
||||
66: keyboard
|
||||
67: cell phone
|
||||
68: microwave
|
||||
69: oven
|
||||
70: toaster
|
||||
71: sink
|
||||
72: refrigerator
|
||||
73: book
|
||||
74: clock
|
||||
75: vase
|
||||
76: scissors
|
||||
77: teddy bear
|
||||
78: hair drier
|
||||
79: toothbrush
|
||||
|
||||
|
||||
# Download script/URL (optional)
|
||||
download: https://ultralytics.com/assets/coco128.zip
|
34
ayf_ocr/yolo_databases/api/data/hyps/hyp.Objects365.yaml
Normal file
@ -0,0 +1,34 @@
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
# Hyperparameters for Objects365 training
|
||||
# python train.py --weights yolov5m.pt --data Objects365.yaml --evolve
|
||||
# See Hyperparameter Evolution tutorial for details https://github.com/ultralytics/yolov5#tutorials
|
||||
|
||||
lr0: 0.00258
|
||||
lrf: 0.17
|
||||
momentum: 0.779
|
||||
weight_decay: 0.00058
|
||||
warmup_epochs: 1.33
|
||||
warmup_momentum: 0.86
|
||||
warmup_bias_lr: 0.0711
|
||||
box: 0.0539
|
||||
cls: 0.299
|
||||
cls_pw: 0.825
|
||||
obj: 0.632
|
||||
obj_pw: 1.0
|
||||
iou_t: 0.2
|
||||
anchor_t: 3.44
|
||||
anchors: 3.2
|
||||
fl_gamma: 0.0
|
||||
hsv_h: 0.0188
|
||||
hsv_s: 0.704
|
||||
hsv_v: 0.36
|
||||
degrees: 0.0
|
||||
translate: 0.0902
|
||||
scale: 0.491
|
||||
shear: 0.0
|
||||
perspective: 0.0
|
||||
flipud: 0.0
|
||||
fliplr: 0.5
|
||||
mosaic: 1.0
|
||||
mixup: 0.0
|
||||
copy_paste: 0.0
|
40
ayf_ocr/yolo_databases/api/data/hyps/hyp.VOC.yaml
Normal file
@ -0,0 +1,40 @@
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
# Hyperparameters for VOC training
|
||||
# python train.py --batch 128 --weights yolov5m6.pt --data VOC.yaml --epochs 50 --img 512 --hyp hyp.scratch-med.yaml --evolve
|
||||
# See Hyperparameter Evolution tutorial for details https://github.com/ultralytics/yolov5#tutorials
|
||||
|
||||
# YOLOv5 Hyperparameter Evolution Results
|
||||
# Best generation: 467
|
||||
# Last generation: 996
|
||||
# metrics/precision, metrics/recall, metrics/mAP_0.5, metrics/mAP_0.5:0.95, val/box_loss, val/obj_loss, val/cls_loss
|
||||
# 0.87729, 0.85125, 0.91286, 0.72664, 0.0076739, 0.0042529, 0.0013865
|
||||
|
||||
lr0: 0.00334
|
||||
lrf: 0.15135
|
||||
momentum: 0.74832
|
||||
weight_decay: 0.00025
|
||||
warmup_epochs: 3.3835
|
||||
warmup_momentum: 0.59462
|
||||
warmup_bias_lr: 0.18657
|
||||
box: 0.02
|
||||
cls: 0.21638
|
||||
cls_pw: 0.5
|
||||
obj: 0.51728
|
||||
obj_pw: 0.67198
|
||||
iou_t: 0.2
|
||||
anchor_t: 3.3744
|
||||
fl_gamma: 0.0
|
||||
hsv_h: 0.01041
|
||||
hsv_s: 0.54703
|
||||
hsv_v: 0.27739
|
||||
degrees: 0.0
|
||||
translate: 0.04591
|
||||
scale: 0.75544
|
||||
shear: 0.0
|
||||
perspective: 0.0
|
||||
flipud: 0.0
|
||||
fliplr: 0.5
|
||||
mosaic: 0.85834
|
||||
mixup: 0.04266
|
||||
copy_paste: 0.0
|
||||
anchors: 3.412
|
@ -0,0 +1,35 @@
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
# Hyperparameters when using Albumentations frameworks
|
||||
# python train.py --hyp hyp.no-augmentation.yaml
|
||||
# See https://github.com/ultralytics/yolov5/pull/3882 for YOLOv5 + Albumentations Usage examples
|
||||
|
||||
lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3)
|
||||
lrf: 0.1 # final OneCycleLR learning rate (lr0 * lrf)
|
||||
momentum: 0.937 # SGD momentum/Adam beta1
|
||||
weight_decay: 0.0005 # optimizer weight decay 5e-4
|
||||
warmup_epochs: 3.0 # warmup epochs (fractions ok)
|
||||
warmup_momentum: 0.8 # warmup initial momentum
|
||||
warmup_bias_lr: 0.1 # warmup initial bias lr
|
||||
box: 0.05 # box loss gain
|
||||
cls: 0.3 # cls loss gain
|
||||
cls_pw: 1.0 # cls BCELoss positive_weight
|
||||
obj: 0.7 # obj loss gain (scale with pixels)
|
||||
obj_pw: 1.0 # obj BCELoss positive_weight
|
||||
iou_t: 0.20 # IoU training threshold
|
||||
anchor_t: 4.0 # anchor-multiple threshold
|
||||
# anchors: 3 # anchors per output layer (0 to ignore)
|
||||
# this parameters are all zero since we want to use albumentation framework
|
||||
fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5)
|
||||
hsv_h: 0 # image HSV-Hue augmentation (fraction)
|
||||
hsv_s: 0 # image HSV-Saturation augmentation (fraction)
|
||||
hsv_v: 0 # image HSV-Value augmentation (fraction)
|
||||
degrees: 0.0 # image rotation (+/- deg)
|
||||
translate: 0 # image translation (+/- fraction)
|
||||
scale: 0 # image scale (+/- gain)
|
||||
shear: 0 # image shear (+/- deg)
|
||||
perspective: 0.0 # image perspective (+/- fraction), range 0-0.001
|
||||
flipud: 0.0 # image flip up-down (probability)
|
||||
fliplr: 0.0 # image flip left-right (probability)
|
||||
mosaic: 0.0 # image mosaic (probability)
|
||||
mixup: 0.0 # image mixup (probability)
|
||||
copy_paste: 0.0 # segment copy-paste (probability)
|
34
ayf_ocr/yolo_databases/api/data/hyps/hyp.scratch-high.yaml
Normal file
@ -0,0 +1,34 @@
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
# Hyperparameters for high-augmentation COCO training from scratch
|
||||
# python train.py --batch 32 --cfg yolov5m6.yaml --weights '' --data coco.yaml --img 1280 --epochs 300
|
||||
# See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials
|
||||
|
||||
lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3)
|
||||
lrf: 0.1 # final OneCycleLR learning rate (lr0 * lrf)
|
||||
momentum: 0.937 # SGD momentum/Adam beta1
|
||||
weight_decay: 0.0005 # optimizer weight decay 5e-4
|
||||
warmup_epochs: 3.0 # warmup epochs (fractions ok)
|
||||
warmup_momentum: 0.8 # warmup initial momentum
|
||||
warmup_bias_lr: 0.1 # warmup initial bias lr
|
||||
box: 0.05 # box loss gain
|
||||
cls: 0.3 # cls loss gain
|
||||
cls_pw: 1.0 # cls BCELoss positive_weight
|
||||
obj: 0.7 # obj loss gain (scale with pixels)
|
||||
obj_pw: 1.0 # obj BCELoss positive_weight
|
||||
iou_t: 0.20 # IoU training threshold
|
||||
anchor_t: 4.0 # anchor-multiple threshold
|
||||
# anchors: 3 # anchors per output layer (0 to ignore)
|
||||
fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5)
|
||||
hsv_h: 0.015 # image HSV-Hue augmentation (fraction)
|
||||
hsv_s: 0.7 # image HSV-Saturation augmentation (fraction)
|
||||
hsv_v: 0.4 # image HSV-Value augmentation (fraction)
|
||||
degrees: 0.0 # image rotation (+/- deg)
|
||||
translate: 0.1 # image translation (+/- fraction)
|
||||
scale: 0.9 # image scale (+/- gain)
|
||||
shear: 0.0 # image shear (+/- deg)
|
||||
perspective: 0.0 # image perspective (+/- fraction), range 0-0.001
|
||||
flipud: 0.0 # image flip up-down (probability)
|
||||
fliplr: 0.5 # image flip left-right (probability)
|
||||
mosaic: 1.0 # image mosaic (probability)
|
||||
mixup: 0.1 # image mixup (probability)
|
||||
copy_paste: 0.1 # segment copy-paste (probability)
|
34
ayf_ocr/yolo_databases/api/data/hyps/hyp.scratch-low.yaml
Normal file
@ -0,0 +1,34 @@
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
# Hyperparameters for low-augmentation COCO training from scratch
|
||||
# python train.py --batch 64 --cfg yolov5n6.yaml --weights '' --data coco.yaml --img 640 --epochs 300 --linear
|
||||
# See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials
|
||||
|
||||
lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3)
|
||||
lrf: 0.01 # final OneCycleLR learning rate (lr0 * lrf)
|
||||
momentum: 0.937 # SGD momentum/Adam beta1
|
||||
weight_decay: 0.0005 # optimizer weight decay 5e-4
|
||||
warmup_epochs: 3.0 # warmup epochs (fractions ok)
|
||||
warmup_momentum: 0.8 # warmup initial momentum
|
||||
warmup_bias_lr: 0.1 # warmup initial bias lr
|
||||
box: 0.05 # box loss gain
|
||||
cls: 0.5 # cls loss gain
|
||||
cls_pw: 1.0 # cls BCELoss positive_weight
|
||||
obj: 1.0 # obj loss gain (scale with pixels)
|
||||
obj_pw: 1.0 # obj BCELoss positive_weight
|
||||
iou_t: 0.20 # IoU training threshold
|
||||
anchor_t: 4.0 # anchor-multiple threshold
|
||||
# anchors: 3 # anchors per output layer (0 to ignore)
|
||||
fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5)
|
||||
hsv_h: 0.015 # image HSV-Hue augmentation (fraction)
|
||||
hsv_s: 0.7 # image HSV-Saturation augmentation (fraction)
|
||||
hsv_v: 0.4 # image HSV-Value augmentation (fraction)
|
||||
degrees: 0.0 # image rotation (+/- deg)
|
||||
translate: 0.1 # image translation (+/- fraction)
|
||||
scale: 0.5 # image scale (+/- gain)
|
||||
shear: 0.0 # image shear (+/- deg)
|
||||
perspective: 0.0 # image perspective (+/- fraction), range 0-0.001
|
||||
flipud: 0.0 # image flip up-down (probability)
|
||||
fliplr: 0.5 # image flip left-right (probability)
|
||||
mosaic: 1.0 # image mosaic (probability)
|
||||
mixup: 0.0 # image mixup (probability)
|
||||
copy_paste: 0.0 # segment copy-paste (probability)
|
34
ayf_ocr/yolo_databases/api/data/hyps/hyp.scratch-med.yaml
Normal file
@ -0,0 +1,34 @@
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
# Hyperparameters for medium-augmentation COCO training from scratch
|
||||
# python train.py --batch 32 --cfg yolov5m6.yaml --weights '' --data coco.yaml --img 1280 --epochs 300
|
||||
# See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials
|
||||
|
||||
lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3)
|
||||
lrf: 0.1 # final OneCycleLR learning rate (lr0 * lrf)
|
||||
momentum: 0.937 # SGD momentum/Adam beta1
|
||||
weight_decay: 0.0005 # optimizer weight decay 5e-4
|
||||
warmup_epochs: 3.0 # warmup epochs (fractions ok)
|
||||
warmup_momentum: 0.8 # warmup initial momentum
|
||||
warmup_bias_lr: 0.1 # warmup initial bias lr
|
||||
box: 0.05 # box loss gain
|
||||
cls: 0.3 # cls loss gain
|
||||
cls_pw: 1.0 # cls BCELoss positive_weight
|
||||
obj: 0.7 # obj loss gain (scale with pixels)
|
||||
obj_pw: 1.0 # obj BCELoss positive_weight
|
||||
iou_t: 0.20 # IoU training threshold
|
||||
anchor_t: 4.0 # anchor-multiple threshold
|
||||
# anchors: 3 # anchors per output layer (0 to ignore)
|
||||
fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5)
|
||||
hsv_h: 0.015 # image HSV-Hue augmentation (fraction)
|
||||
hsv_s: 0.7 # image HSV-Saturation augmentation (fraction)
|
||||
hsv_v: 0.4 # image HSV-Value augmentation (fraction)
|
||||
degrees: 0.0 # image rotation (+/- deg)
|
||||
translate: 0.1 # image translation (+/- fraction)
|
||||
scale: 0.9 # image scale (+/- gain)
|
||||
shear: 0.0 # image shear (+/- deg)
|
||||
perspective: 0.0 # image perspective (+/- fraction), range 0-0.001
|
||||
flipud: 0.0 # image flip up-down (probability)
|
||||
fliplr: 0.5 # image flip left-right (probability)
|
||||
mosaic: 1.0 # image mosaic (probability)
|
||||
mixup: 0.1 # image mixup (probability)
|
||||
copy_paste: 0.0 # segment copy-paste (probability)
|
7
ayf_ocr/yolo_databases/api/data/my.yaml
Normal file
@ -0,0 +1,7 @@
|
||||
train: E:\code\js_reverse\ayf_ocr\yolo_databases\slide\Annotations\train.txt
|
||||
val: E:\code\js_reverse\ayf_ocr\yolo_databases\slide\Annotations\val.txt
|
||||
|
||||
|
||||
# Classes
|
||||
names:
|
||||
0: 0
|
22
ayf_ocr/yolo_databases/api/data/scripts/download_weights.sh
Normal file
@ -0,0 +1,22 @@
|
||||
#!/bin/bash
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
# Download latest models from https://github.com/ultralytics/yolov5/releases
|
||||
# Example usage: bash data/scripts/download_weights.sh
|
||||
# parent
|
||||
# └── yolov5
|
||||
# ├── yolov5s.pt ← downloads here
|
||||
# ├── yolov5m.pt
|
||||
# └── ...
|
||||
|
||||
python - <<EOF
|
||||
from utils.downloads import attempt_download
|
||||
|
||||
p5 = list('nsmlx') # P5 models
|
||||
p6 = [f'{x}6' for x in p5] # P6 models
|
||||
cls = [f'{x}-cls' for x in p5] # classification models
|
||||
seg = [f'{x}-seg' for x in p5] # classification models
|
||||
|
||||
for x in p5 + p6 + cls + seg:
|
||||
attempt_download(f'weights/yolov5{x}.pt')
|
||||
|
||||
EOF
|
56
ayf_ocr/yolo_databases/api/data/scripts/get_coco.sh
Normal file
@ -0,0 +1,56 @@
|
||||
#!/bin/bash
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
# Download COCO 2017 dataset http://cocodataset.org
|
||||
# Example usage: bash data/scripts/get_coco.sh
|
||||
# parent
|
||||
# ├── yolov5
|
||||
# └── datasets
|
||||
# └── coco ← downloads here
|
||||
|
||||
# Arguments (optional) Usage: bash data/scripts/get_coco.sh --train --val --test --segments
|
||||
if [ "$#" -gt 0 ]; then
|
||||
for opt in "$@"; do
|
||||
case "${opt}" in
|
||||
--train) train=true ;;
|
||||
--val) val=true ;;
|
||||
--test) test=true ;;
|
||||
--segments) segments=true ;;
|
||||
esac
|
||||
done
|
||||
else
|
||||
train=true
|
||||
val=true
|
||||
test=false
|
||||
segments=false
|
||||
fi
|
||||
|
||||
# Download/unzip labels
|
||||
d='../datasets' # unzip directory
|
||||
url=https://github.com/ultralytics/yolov5/releases/download/v1.0/
|
||||
if [ "$segments" == "true" ]; then
|
||||
f='coco2017labels-segments.zip' # 168 MB
|
||||
else
|
||||
f='coco2017labels.zip' # 46 MB
|
||||
fi
|
||||
echo 'Downloading' $url$f ' ...'
|
||||
curl -L $url$f -o $f -# && unzip -q $f -d $d && rm $f &
|
||||
|
||||
# Download/unzip images
|
||||
d='../datasets/coco/images' # unzip directory
|
||||
url=http://images.cocodataset.org/zips/
|
||||
if [ "$train" == "true" ]; then
|
||||
f='train2017.zip' # 19G, 118k images
|
||||
echo 'Downloading' $url$f '...'
|
||||
curl -L $url$f -o $f -# && unzip -q $f -d $d && rm $f &
|
||||
fi
|
||||
if [ "$val" == "true" ]; then
|
||||
f='val2017.zip' # 1G, 5k images
|
||||
echo 'Downloading' $url$f '...'
|
||||
curl -L $url$f -o $f -# && unzip -q $f -d $d && rm $f &
|
||||
fi
|
||||
if [ "$test" == "true" ]; then
|
||||
f='test2017.zip' # 7G, 41k images (optional)
|
||||
echo 'Downloading' $url$f '...'
|
||||
curl -L $url$f -o $f -# && unzip -q $f -d $d && rm $f &
|
||||
fi
|
||||
wait # finish background tasks
|
17
ayf_ocr/yolo_databases/api/data/scripts/get_coco128.sh
Normal file
@ -0,0 +1,17 @@
|
||||
#!/bin/bash
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
# Download COCO128 dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017)
|
||||
# Example usage: bash data/scripts/get_coco128.sh
|
||||
# parent
|
||||
# ├── yolov5
|
||||
# └── datasets
|
||||
# └── coco128 ← downloads here
|
||||
|
||||
# Download/unzip images and labels
|
||||
d='../datasets' # unzip directory
|
||||
url=https://github.com/ultralytics/yolov5/releases/download/v1.0/
|
||||
f='coco128.zip' # or 'coco128-segments.zip', 68 MB
|
||||
echo 'Downloading' $url$f ' ...'
|
||||
curl -L $url$f -o $f -# && unzip -q $f -d $d && rm $f &
|
||||
|
||||
wait # finish background tasks
|
51
ayf_ocr/yolo_databases/api/data/scripts/get_imagenet.sh
Normal file
@ -0,0 +1,51 @@
|
||||
#!/bin/bash
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
# Download ILSVRC2012 ImageNet dataset https://image-net.org
|
||||
# Example usage: bash data/scripts/get_imagenet.sh
|
||||
# parent
|
||||
# ├── yolov5
|
||||
# └── datasets
|
||||
# └── imagenet ← downloads here
|
||||
|
||||
# Arguments (optional) Usage: bash data/scripts/get_imagenet.sh --train --val
|
||||
if [ "$#" -gt 0 ]; then
|
||||
for opt in "$@"; do
|
||||
case "${opt}" in
|
||||
--train) train=true ;;
|
||||
--val) val=true ;;
|
||||
esac
|
||||
done
|
||||
else
|
||||
train=true
|
||||
val=true
|
||||
fi
|
||||
|
||||
# Make dir
|
||||
d='../datasets/imagenet' # unzip directory
|
||||
mkdir -p $d && cd $d
|
||||
|
||||
# Download/unzip train
|
||||
if [ "$train" == "true" ]; then
|
||||
wget https://image-net.org/data/ILSVRC/2012/ILSVRC2012_img_train.tar # download 138G, 1281167 images
|
||||
mkdir train && mv ILSVRC2012_img_train.tar train/ && cd train
|
||||
tar -xf ILSVRC2012_img_train.tar && rm -f ILSVRC2012_img_train.tar
|
||||
find . -name "*.tar" | while read NAME; do
|
||||
mkdir -p "${NAME%.tar}"
|
||||
tar -xf "${NAME}" -C "${NAME%.tar}"
|
||||
rm -f "${NAME}"
|
||||
done
|
||||
cd ..
|
||||
fi
|
||||
|
||||
# Download/unzip val
|
||||
if [ "$val" == "true" ]; then
|
||||
wget https://image-net.org/data/ILSVRC/2012/ILSVRC2012_img_val.tar # download 6.3G, 50000 images
|
||||
mkdir val && mv ILSVRC2012_img_val.tar val/ && cd val && tar -xf ILSVRC2012_img_val.tar
|
||||
wget -qO- https://raw.githubusercontent.com/soumith/imagenetloader.torch/master/valprep.sh | bash # move into subdirs
|
||||
fi
|
||||
|
||||
# Delete corrupted image (optional: PNG under JPEG name that may cause dataloaders to fail)
|
||||
# rm train/n04266014/n04266014_10835.JPEG
|
||||
|
||||
# TFRecords (optional)
|
||||
# wget https://raw.githubusercontent.com/tensorflow/models/master/research/slim/datasets/imagenet_lsvrc_2015_synsets.txt
|
153
ayf_ocr/yolo_databases/api/data/xView.yaml
Normal file
@ -0,0 +1,153 @@
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
# DIUx xView 2018 Challenge https://challenge.xviewdataset.org by U.S. National Geospatial-Intelligence Agency (NGA)
|
||||
# -------- DOWNLOAD DATA MANUALLY and jar xf val_images.zip to 'datasets/xView' before running train command! --------
|
||||
# Example usage: python train.py --data xView.yaml
|
||||
# parent
|
||||
# ├── yolov5
|
||||
# └── datasets
|
||||
# └── xView ← downloads here (20.7 GB)
|
||||
|
||||
|
||||
# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
|
||||
path: ../datasets/xView # dataset root dir
|
||||
train: images/autosplit_train.txt # train images (relative to 'path') 90% of 847 train images
|
||||
val: images/autosplit_val.txt # train images (relative to 'path') 10% of 847 train images
|
||||
|
||||
# Classes
|
||||
names:
|
||||
0: Fixed-wing Aircraft
|
||||
1: Small Aircraft
|
||||
2: Cargo Plane
|
||||
3: Helicopter
|
||||
4: Passenger Vehicle
|
||||
5: Small Car
|
||||
6: Bus
|
||||
7: Pickup Truck
|
||||
8: Utility Truck
|
||||
9: Truck
|
||||
10: Cargo Truck
|
||||
11: Truck w/Box
|
||||
12: Truck Tractor
|
||||
13: Trailer
|
||||
14: Truck w/Flatbed
|
||||
15: Truck w/Liquid
|
||||
16: Crane Truck
|
||||
17: Railway Vehicle
|
||||
18: Passenger Car
|
||||
19: Cargo Car
|
||||
20: Flat Car
|
||||
21: Tank car
|
||||
22: Locomotive
|
||||
23: Maritime Vessel
|
||||
24: Motorboat
|
||||
25: Sailboat
|
||||
26: Tugboat
|
||||
27: Barge
|
||||
28: Fishing Vessel
|
||||
29: Ferry
|
||||
30: Yacht
|
||||
31: Container Ship
|
||||
32: Oil Tanker
|
||||
33: Engineering Vehicle
|
||||
34: Tower crane
|
||||
35: Container Crane
|
||||
36: Reach Stacker
|
||||
37: Straddle Carrier
|
||||
38: Mobile Crane
|
||||
39: Dump Truck
|
||||
40: Haul Truck
|
||||
41: Scraper/Tractor
|
||||
42: Front loader/Bulldozer
|
||||
43: Excavator
|
||||
44: Cement Mixer
|
||||
45: Ground Grader
|
||||
46: Hut/Tent
|
||||
47: Shed
|
||||
48: Building
|
||||
49: Aircraft Hangar
|
||||
50: Damaged Building
|
||||
51: Facility
|
||||
52: Construction Site
|
||||
53: Vehicle Lot
|
||||
54: Helipad
|
||||
55: Storage Tank
|
||||
56: Shipping container lot
|
||||
57: Shipping Container
|
||||
58: Pylon
|
||||
59: Tower
|
||||
|
||||
|
||||
# Download script/URL (optional) ---------------------------------------------------------------------------------------
|
||||
download: |
|
||||
import json
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
import numpy as np
|
||||
from PIL import Image
|
||||
from tqdm import tqdm
|
||||
|
||||
from utils.dataloaders import autosplit
|
||||
from utils.general import download, xyxy2xywhn
|
||||
|
||||
|
||||
def convert_labels(fname=Path('xView/xView_train.geojson')):
|
||||
# Convert xView geoJSON labels to YOLO format
|
||||
path = fname.parent
|
||||
with open(fname) as f:
|
||||
print(f'Loading {fname}...')
|
||||
data = json.load(f)
|
||||
|
||||
# Make dirs
|
||||
labels = Path(path / 'labels' / 'train')
|
||||
os.system(f'rm -rf {labels}')
|
||||
labels.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# xView classes 11-94 to 0-59
|
||||
xview_class2index = [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 1, 2, -1, 3, -1, 4, 5, 6, 7, 8, -1, 9, 10, 11,
|
||||
12, 13, 14, 15, -1, -1, 16, 17, 18, 19, 20, 21, 22, -1, 23, 24, 25, -1, 26, 27, -1, 28, -1,
|
||||
29, 30, 31, 32, 33, 34, 35, 36, 37, -1, 38, 39, 40, 41, 42, 43, 44, 45, -1, -1, -1, -1, 46,
|
||||
47, 48, 49, -1, 50, 51, -1, 52, -1, -1, -1, 53, 54, -1, 55, -1, -1, 56, -1, 57, -1, 58, 59]
|
||||
|
||||
shapes = {}
|
||||
for feature in tqdm(data['features'], desc=f'Converting {fname}'):
|
||||
p = feature['properties']
|
||||
if p['bounds_imcoords']:
|
||||
id = p['image_id']
|
||||
file = path / 'train_images' / id
|
||||
if file.exists(): # 1395.tif missing
|
||||
try:
|
||||
box = np.array([int(num) for num in p['bounds_imcoords'].split(",")])
|
||||
assert box.shape[0] == 4, f'incorrect box shape {box.shape[0]}'
|
||||
cls = p['type_id']
|
||||
cls = xview_class2index[int(cls)] # xView class to 0-60
|
||||
assert 59 >= cls >= 0, f'incorrect class index {cls}'
|
||||
|
||||
# Write YOLO label
|
||||
if id not in shapes:
|
||||
shapes[id] = Image.open(file).size
|
||||
box = xyxy2xywhn(box[None].astype(np.float), w=shapes[id][0], h=shapes[id][1], clip=True)
|
||||
with open((labels / id).with_suffix('.txt'), 'a') as f:
|
||||
f.write(f"{cls} {' '.join(f'{x:.6f}' for x in box[0])}\n") # write label.txt
|
||||
except Exception as e:
|
||||
print(f'WARNING: skipping one label for {file}: {e}')
|
||||
|
||||
|
||||
# Download manually from https://challenge.xviewdataset.org
|
||||
dir = Path(yaml['path']) # dataset root dir
|
||||
# urls = ['https://d307kc0mrhucc3.cloudfront.net/train_labels.zip', # train labels
|
||||
# 'https://d307kc0mrhucc3.cloudfront.net/train_images.zip', # 15G, 847 train images
|
||||
# 'https://d307kc0mrhucc3.cloudfront.net/val_images.zip'] # 5G, 282 val images (no labels)
|
||||
# download(urls, dir=dir, delete=False)
|
||||
|
||||
# Convert labels
|
||||
convert_labels(dir / 'xView_train.geojson')
|
||||
|
||||
# Move images
|
||||
images = Path(dir / 'images')
|
||||
images.mkdir(parents=True, exist_ok=True)
|
||||
Path(dir / 'train_images').rename(dir / 'images' / 'train')
|
||||
Path(dir / 'val_images').rename(dir / 'images' / 'val')
|
||||
|
||||
# Split
|
||||
autosplit(dir / 'images' / 'train')
|
106
ayf_ocr/yolo_databases/api/detect.py
Normal file
@ -0,0 +1,106 @@
|
||||
import torch
|
||||
from models.common import DetectMultiBackend
|
||||
from utils.general import (check_img_size, cv2, non_max_suppression, scale_boxes)
|
||||
from utils.torch_utils import select_device
|
||||
from utils.augmentations import letterbox
|
||||
import numpy as np
|
||||
|
||||
|
||||
class YOLOv5(object):
|
||||
# 参数设置
|
||||
_defaults = {
|
||||
"weights": "./best.pt",
|
||||
"source": "data/images/val",
|
||||
"data": "data/coco128.yaml",
|
||||
"imgsz": (150, 150),
|
||||
"conf_thres": 0.25,
|
||||
"iou_thres": 0.45,
|
||||
"max_det": 1000,
|
||||
"device": "",
|
||||
"view_img": False,
|
||||
"save_txt": False,
|
||||
"save_conf": False,
|
||||
"save_crop": False,
|
||||
"nosave": False,
|
||||
"classes": None,
|
||||
"agnostic_nms": False,
|
||||
"augment": False,
|
||||
"visualize": False,
|
||||
"update": False,
|
||||
"project": "runs/detect",
|
||||
"name": 'exp',
|
||||
"exist_ok": False,
|
||||
"line_thickness": 3,
|
||||
"hide_labels": False,
|
||||
"hide_conf": False,
|
||||
"half": False,
|
||||
"dnn": False,
|
||||
"vid_stride": 1
|
||||
}
|
||||
|
||||
# 初始化操作,加载模型
|
||||
def __init__(self, device='0', **kwargs):
|
||||
self.__dict__.update(self._defaults)
|
||||
self.device = select_device(device)
|
||||
self.half = self.device != "cpu"
|
||||
self.model = DetectMultiBackend(weights=self.weights, device=self.device, dnn=self.dnn, data=self.data,
|
||||
fp16=self.half)
|
||||
self.stride, self.names, self.pt = self.model.stride, self.model.names, self.model.pt
|
||||
self.imgsz = check_img_size(self.imgsz, s=self.stride)
|
||||
|
||||
# 推理部分
|
||||
def infer(self, inImg):
|
||||
# 使用letterbox方法将图像大小调整为150大小
|
||||
img = letterbox(inImg, new_shape=self.imgsz)[0]
|
||||
|
||||
# 归一化与张量转换
|
||||
img = img[:, :, ::-1].transpose(2, 0, 1) # h w c to c h w
|
||||
img = np.ascontiguousarray(img) # 令数组内存连续
|
||||
img = torch.from_numpy(img).to(self.device)
|
||||
img = img.half() if self.half else img.float() # uint8 to fp16/32
|
||||
img /= 255.0 # 0 - 255 to 0.0 - 1.0
|
||||
if img.ndimension() == 3:
|
||||
img = img.unsqueeze(0)
|
||||
|
||||
# 推理
|
||||
pred = self.model(img, augment=True)[0]
|
||||
# NMS
|
||||
pred = non_max_suppression(pred, self.conf_thres, self.iou_thres, classes=self.classes, agnostic=True)
|
||||
|
||||
bbox_xyxy = []
|
||||
confs = []
|
||||
cls_ids = []
|
||||
|
||||
# 解析检测结果
|
||||
for i, det in enumerate(pred):
|
||||
if det is not None and len(det):
|
||||
# 将检测框映射到原始图像大小
|
||||
det[:, :4] = scale_boxes(img.shape[2:], det[:, :4], inImg.shape).round()
|
||||
# 保存结果
|
||||
for *xyxy, conf, cls in reversed(det):
|
||||
bbox_xyxy.append(xyxy)
|
||||
confs.append(conf.item())
|
||||
cls_ids.append(int(cls.item()))
|
||||
|
||||
xyxys = torch.Tensor(bbox_xyxy)
|
||||
confss = torch.Tensor(confs)
|
||||
cls_ids = torch.Tensor(cls_ids)
|
||||
return xyxys, confss, cls_ids
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
with open('./16329967796715117.jpg', 'rb') as f:
|
||||
input_image = f.read()
|
||||
imBytes = np.frombuffer(input_image, np.uint8)
|
||||
iImage = cv2.imdecode(imBytes, cv2.IMREAD_COLOR)
|
||||
yolov5_c = YOLOv5()
|
||||
result = yolov5_c.infer(iImage)
|
||||
result = result[0].view(-1).int()
|
||||
image_rec = cv2.rectangle(iImage, (result[0].item(), result[1].item()), (result[2].item(), result[3].item()),
|
||||
(0, 0, 255), 1, 8)
|
||||
import matplotlib.pyplot as plt
|
||||
import matplotlib
|
||||
|
||||
matplotlib.use('TkAgg')
|
||||
plt.imshow(image_rec)
|
||||
plt.show()
|
818
ayf_ocr/yolo_databases/api/export.py
Normal file
@ -0,0 +1,818 @@
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
"""
|
||||
Export a YOLOv5 PyTorch model to other formats. TensorFlow exports authored by https://github.com/zldrobit
|
||||
|
||||
Format | `export.py --include` | Model
|
||||
--- | --- | ---
|
||||
PyTorch | - | yolov5s.pt
|
||||
TorchScript | `torchscript` | yolov5s.torchscript
|
||||
ONNX | `onnx` | yolov5s.onnx
|
||||
OpenVINO | `openvino` | yolov5s_openvino_model/
|
||||
TensorRT | `engine` | yolov5s.engine
|
||||
CoreML | `coreml` | yolov5s.mlmodel
|
||||
TensorFlow SavedModel | `saved_model` | yolov5s_saved_model/
|
||||
TensorFlow GraphDef | `pb` | yolov5s.pb
|
||||
TensorFlow Lite | `tflite` | yolov5s.tflite
|
||||
TensorFlow Edge TPU | `edgetpu` | yolov5s_edgetpu.tflite
|
||||
TensorFlow.js | `tfjs` | yolov5s_web_model/
|
||||
PaddlePaddle | `paddle` | yolov5s_paddle_model/
|
||||
|
||||
Requirements:
|
||||
$ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime openvino-dev tensorflow-cpu # CPU
|
||||
$ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime-gpu openvino-dev tensorflow # GPU
|
||||
|
||||
Usage:
|
||||
$ python export.py --weights yolov5s.pt --include torchscript onnx openvino engine coreml tflite ...
|
||||
|
||||
Inference:
|
||||
$ python detect.py --weights yolov5s.pt # PyTorch
|
||||
yolov5s.torchscript # TorchScript
|
||||
yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn
|
||||
yolov5s_openvino_model # OpenVINO
|
||||
yolov5s.engine # TensorRT
|
||||
yolov5s.mlmodel # CoreML (macOS-only)
|
||||
yolov5s_saved_model # TensorFlow SavedModel
|
||||
yolov5s.pb # TensorFlow GraphDef
|
||||
yolov5s.tflite # TensorFlow Lite
|
||||
yolov5s_edgetpu.tflite # TensorFlow Edge TPU
|
||||
yolov5s_paddle_model # PaddlePaddle
|
||||
|
||||
TensorFlow.js:
|
||||
$ cd .. && git clone https://github.com/zldrobit/tfjs-yolov5-example.git && cd tfjs-yolov5-example
|
||||
$ npm install
|
||||
$ ln -s ../../yolov5/yolov5s_web_model public/yolov5s_web_model
|
||||
$ npm start
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import contextlib
|
||||
import json
|
||||
import os
|
||||
import platform
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
import warnings
|
||||
from pathlib import Path
|
||||
|
||||
import pandas as pd
|
||||
import torch
|
||||
from torch.utils.mobile_optimizer import optimize_for_mobile
|
||||
|
||||
FILE = Path(__file__).resolve()
|
||||
ROOT = FILE.parents[0] # YOLOv5 root directory
|
||||
if str(ROOT) not in sys.path:
|
||||
sys.path.append(str(ROOT)) # add ROOT to PATH
|
||||
if platform.system() != 'Windows':
|
||||
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
|
||||
|
||||
from models.experimental import attempt_load
|
||||
from models.yolo import ClassificationModel, Detect, DetectionModel, SegmentationModel
|
||||
from utils.dataloaders import LoadImages
|
||||
from utils.general import (LOGGER, Profile, check_dataset, check_img_size, check_requirements, check_version,
|
||||
check_yaml, colorstr, file_size, get_default_args, print_args, url2file, yaml_save)
|
||||
from utils.torch_utils import select_device, smart_inference_mode
|
||||
|
||||
MACOS = platform.system() == 'Darwin' # macOS environment
|
||||
|
||||
|
||||
class iOSModel(torch.nn.Module):
|
||||
|
||||
def __init__(self, model, im):
|
||||
super().__init__()
|
||||
b, c, h, w = im.shape # batch, channel, height, width
|
||||
self.model = model
|
||||
self.nc = model.nc # number of classes
|
||||
if w == h:
|
||||
self.normalize = 1. / w
|
||||
else:
|
||||
self.normalize = torch.tensor([1. / w, 1. / h, 1. / w, 1. / h]) # broadcast (slower, smaller)
|
||||
# np = model(im)[0].shape[1] # number of points
|
||||
# self.normalize = torch.tensor([1. / w, 1. / h, 1. / w, 1. / h]).expand(np, 4) # explicit (faster, larger)
|
||||
|
||||
def forward(self, x):
|
||||
xywh, conf, cls = self.model(x)[0].squeeze().split((4, 1, self.nc), 1)
|
||||
return cls * conf, xywh * self.normalize # confidence (3780, 80), coordinates (3780, 4)
|
||||
|
||||
|
||||
def export_formats():
|
||||
# YOLOv5 export formats
|
||||
x = [
|
||||
['PyTorch', '-', '.pt', True, True],
|
||||
['TorchScript', 'torchscript', '.torchscript', True, True],
|
||||
['ONNX', 'onnx', '.onnx', True, True],
|
||||
['OpenVINO', 'openvino', '_openvino_model', True, False],
|
||||
['TensorRT', 'engine', '.engine', False, True],
|
||||
['CoreML', 'coreml', '.mlmodel', True, False],
|
||||
['TensorFlow SavedModel', 'saved_model', '_saved_model', True, True],
|
||||
['TensorFlow GraphDef', 'pb', '.pb', True, True],
|
||||
['TensorFlow Lite', 'tflite', '.tflite', True, False],
|
||||
['TensorFlow Edge TPU', 'edgetpu', '_edgetpu.tflite', False, False],
|
||||
['TensorFlow.js', 'tfjs', '_web_model', False, False],
|
||||
['PaddlePaddle', 'paddle', '_paddle_model', True, True],]
|
||||
return pd.DataFrame(x, columns=['Format', 'Argument', 'Suffix', 'CPU', 'GPU'])
|
||||
|
||||
|
||||
def try_export(inner_func):
|
||||
# YOLOv5 export decorator, i..e @try_export
|
||||
inner_args = get_default_args(inner_func)
|
||||
|
||||
def outer_func(*args, **kwargs):
|
||||
prefix = inner_args['prefix']
|
||||
try:
|
||||
with Profile() as dt:
|
||||
f, model = inner_func(*args, **kwargs)
|
||||
LOGGER.info(f'{prefix} export success ✅ {dt.t:.1f}s, saved as {f} ({file_size(f):.1f} MB)')
|
||||
return f, model
|
||||
except Exception as e:
|
||||
LOGGER.info(f'{prefix} export failure ❌ {dt.t:.1f}s: {e}')
|
||||
return None, None
|
||||
|
||||
return outer_func
|
||||
|
||||
|
||||
@try_export
|
||||
def export_torchscript(model, im, file, optimize, prefix=colorstr('TorchScript:')):
|
||||
# YOLOv5 TorchScript model export
|
||||
LOGGER.info(f'\n{prefix} starting export with torch {torch.__version__}...')
|
||||
f = file.with_suffix('.torchscript')
|
||||
|
||||
ts = torch.jit.trace(model, im, strict=False)
|
||||
d = {'shape': im.shape, 'stride': int(max(model.stride)), 'names': model.names}
|
||||
extra_files = {'config.txt': json.dumps(d)} # torch._C.ExtraFilesMap()
|
||||
if optimize: # https://pytorch.org/tutorials/recipes/mobile_interpreter.html
|
||||
optimize_for_mobile(ts)._save_for_lite_interpreter(str(f), _extra_files=extra_files)
|
||||
else:
|
||||
ts.save(str(f), _extra_files=extra_files)
|
||||
return f, None
|
||||
|
||||
|
||||
@try_export
|
||||
def export_onnx(model, im, file, opset, dynamic, simplify, prefix=colorstr('ONNX:')):
|
||||
# YOLOv5 ONNX export
|
||||
check_requirements('onnx>=1.12.0')
|
||||
import onnx
|
||||
|
||||
LOGGER.info(f'\n{prefix} starting export with onnx {onnx.__version__}...')
|
||||
f = file.with_suffix('.onnx')
|
||||
|
||||
output_names = ['output0', 'output1'] if isinstance(model, SegmentationModel) else ['output0']
|
||||
if dynamic:
|
||||
dynamic = {'images': {0: 'batch', 2: 'height', 3: 'width'}} # shape(1,3,640,640)
|
||||
if isinstance(model, SegmentationModel):
|
||||
dynamic['output0'] = {0: 'batch', 1: 'anchors'} # shape(1,25200,85)
|
||||
dynamic['output1'] = {0: 'batch', 2: 'mask_height', 3: 'mask_width'} # shape(1,32,160,160)
|
||||
elif isinstance(model, DetectionModel):
|
||||
dynamic['output0'] = {0: 'batch', 1: 'anchors'} # shape(1,25200,85)
|
||||
|
||||
torch.onnx.export(
|
||||
model.cpu() if dynamic else model, # --dynamic only compatible with cpu
|
||||
im.cpu() if dynamic else im,
|
||||
f,
|
||||
verbose=False,
|
||||
opset_version=opset,
|
||||
do_constant_folding=True, # WARNING: DNN inference with torch>=1.12 may require do_constant_folding=False
|
||||
input_names=['images'],
|
||||
output_names=output_names,
|
||||
dynamic_axes=dynamic or None)
|
||||
|
||||
# Checks
|
||||
model_onnx = onnx.load(f) # load onnx model
|
||||
onnx.checker.check_model(model_onnx) # check onnx model
|
||||
|
||||
# Metadata
|
||||
d = {'stride': int(max(model.stride)), 'names': model.names}
|
||||
for k, v in d.items():
|
||||
meta = model_onnx.metadata_props.add()
|
||||
meta.key, meta.value = k, str(v)
|
||||
onnx.save(model_onnx, f)
|
||||
|
||||
# Simplify
|
||||
if simplify:
|
||||
try:
|
||||
cuda = torch.cuda.is_available()
|
||||
check_requirements(('onnxruntime-gpu' if cuda else 'onnxruntime', 'onnx-simplifier>=0.4.1'))
|
||||
import onnxsim
|
||||
|
||||
LOGGER.info(f'{prefix} simplifying with onnx-simplifier {onnxsim.__version__}...')
|
||||
model_onnx, check = onnxsim.simplify(model_onnx)
|
||||
assert check, 'assert check failed'
|
||||
onnx.save(model_onnx, f)
|
||||
except Exception as e:
|
||||
LOGGER.info(f'{prefix} simplifier failure: {e}')
|
||||
return f, model_onnx
|
||||
|
||||
|
||||
@try_export
|
||||
def export_openvino(file, metadata, half, prefix=colorstr('OpenVINO:')):
|
||||
# YOLOv5 OpenVINO export
|
||||
check_requirements('openvino-dev') # requires openvino-dev: https://pypi.org/project/openvino-dev/
|
||||
import openvino.inference_engine as ie
|
||||
|
||||
LOGGER.info(f'\n{prefix} starting export with openvino {ie.__version__}...')
|
||||
f = str(file).replace('.pt', f'_openvino_model{os.sep}')
|
||||
|
||||
args = [
|
||||
'mo',
|
||||
'--input_model',
|
||||
str(file.with_suffix('.onnx')),
|
||||
'--output_dir',
|
||||
f,
|
||||
'--data_type',
|
||||
('FP16' if half else 'FP32'),]
|
||||
subprocess.run(args, check=True, env=os.environ) # export
|
||||
yaml_save(Path(f) / file.with_suffix('.yaml').name, metadata) # add metadata.yaml
|
||||
return f, None
|
||||
|
||||
|
||||
@try_export
|
||||
def export_paddle(model, im, file, metadata, prefix=colorstr('PaddlePaddle:')):
|
||||
# YOLOv5 Paddle export
|
||||
check_requirements(('paddlepaddle', 'x2paddle'))
|
||||
import x2paddle
|
||||
from x2paddle.convert import pytorch2paddle
|
||||
|
||||
LOGGER.info(f'\n{prefix} starting export with X2Paddle {x2paddle.__version__}...')
|
||||
f = str(file).replace('.pt', f'_paddle_model{os.sep}')
|
||||
|
||||
pytorch2paddle(module=model, save_dir=f, jit_type='trace', input_examples=[im]) # export
|
||||
yaml_save(Path(f) / file.with_suffix('.yaml').name, metadata) # add metadata.yaml
|
||||
return f, None
|
||||
|
||||
|
||||
@try_export
|
||||
def export_coreml(model, im, file, int8, half, nms, prefix=colorstr('CoreML:')):
|
||||
# YOLOv5 CoreML export
|
||||
check_requirements('coremltools')
|
||||
import coremltools as ct
|
||||
|
||||
LOGGER.info(f'\n{prefix} starting export with coremltools {ct.__version__}...')
|
||||
f = file.with_suffix('.mlmodel')
|
||||
|
||||
if nms:
|
||||
model = iOSModel(model, im)
|
||||
ts = torch.jit.trace(model, im, strict=False) # TorchScript model
|
||||
ct_model = ct.convert(ts, inputs=[ct.ImageType('image', shape=im.shape, scale=1 / 255, bias=[0, 0, 0])])
|
||||
bits, mode = (8, 'kmeans_lut') if int8 else (16, 'linear') if half else (32, None)
|
||||
if bits < 32:
|
||||
if MACOS: # quantization only supported on macOS
|
||||
with warnings.catch_warnings():
|
||||
warnings.filterwarnings('ignore', category=DeprecationWarning) # suppress numpy==1.20 float warning
|
||||
ct_model = ct.models.neural_network.quantization_utils.quantize_weights(ct_model, bits, mode)
|
||||
else:
|
||||
print(f'{prefix} quantization only supported on macOS, skipping...')
|
||||
ct_model.save(f)
|
||||
return f, ct_model
|
||||
|
||||
|
||||
@try_export
|
||||
def export_engine(model, im, file, half, dynamic, simplify, workspace=4, verbose=False, prefix=colorstr('TensorRT:')):
|
||||
# YOLOv5 TensorRT export https://developer.nvidia.com/tensorrt
|
||||
assert im.device.type != 'cpu', 'export running on CPU but must be on GPU, i.e. `python export.py --device 0`'
|
||||
try:
|
||||
import tensorrt as trt
|
||||
except Exception:
|
||||
if platform.system() == 'Linux':
|
||||
check_requirements('nvidia-tensorrt', cmds='-U --index-url https://pypi.ngc.nvidia.com')
|
||||
import tensorrt as trt
|
||||
|
||||
if trt.__version__[0] == '7': # TensorRT 7 handling https://github.com/ultralytics/yolov5/issues/6012
|
||||
grid = model.model[-1].anchor_grid
|
||||
model.model[-1].anchor_grid = [a[..., :1, :1, :] for a in grid]
|
||||
export_onnx(model, im, file, 12, dynamic, simplify) # opset 12
|
||||
model.model[-1].anchor_grid = grid
|
||||
else: # TensorRT >= 8
|
||||
check_version(trt.__version__, '8.0.0', hard=True) # require tensorrt>=8.0.0
|
||||
export_onnx(model, im, file, 12, dynamic, simplify) # opset 12
|
||||
onnx = file.with_suffix('.onnx')
|
||||
|
||||
LOGGER.info(f'\n{prefix} starting export with TensorRT {trt.__version__}...')
|
||||
assert onnx.exists(), f'failed to export ONNX file: {onnx}'
|
||||
f = file.with_suffix('.engine') # TensorRT engine file
|
||||
logger = trt.Logger(trt.Logger.INFO)
|
||||
if verbose:
|
||||
logger.min_severity = trt.Logger.Severity.VERBOSE
|
||||
|
||||
builder = trt.Builder(logger)
|
||||
config = builder.create_builder_config()
|
||||
config.max_workspace_size = workspace * 1 << 30
|
||||
# config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, workspace << 30) # fix TRT 8.4 deprecation notice
|
||||
|
||||
flag = (1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
|
||||
network = builder.create_network(flag)
|
||||
parser = trt.OnnxParser(network, logger)
|
||||
if not parser.parse_from_file(str(onnx)):
|
||||
raise RuntimeError(f'failed to load ONNX file: {onnx}')
|
||||
|
||||
inputs = [network.get_input(i) for i in range(network.num_inputs)]
|
||||
outputs = [network.get_output(i) for i in range(network.num_outputs)]
|
||||
for inp in inputs:
|
||||
LOGGER.info(f'{prefix} input "{inp.name}" with shape{inp.shape} {inp.dtype}')
|
||||
for out in outputs:
|
||||
LOGGER.info(f'{prefix} output "{out.name}" with shape{out.shape} {out.dtype}')
|
||||
|
||||
if dynamic:
|
||||
if im.shape[0] <= 1:
|
||||
LOGGER.warning(f'{prefix} WARNING ⚠️ --dynamic model requires maximum --batch-size argument')
|
||||
profile = builder.create_optimization_profile()
|
||||
for inp in inputs:
|
||||
profile.set_shape(inp.name, (1, *im.shape[1:]), (max(1, im.shape[0] // 2), *im.shape[1:]), im.shape)
|
||||
config.add_optimization_profile(profile)
|
||||
|
||||
LOGGER.info(f'{prefix} building FP{16 if builder.platform_has_fast_fp16 and half else 32} engine as {f}')
|
||||
if builder.platform_has_fast_fp16 and half:
|
||||
config.set_flag(trt.BuilderFlag.FP16)
|
||||
with builder.build_engine(network, config) as engine, open(f, 'wb') as t:
|
||||
t.write(engine.serialize())
|
||||
return f, None
|
||||
|
||||
|
||||
@try_export
|
||||
def export_saved_model(model,
|
||||
im,
|
||||
file,
|
||||
dynamic,
|
||||
tf_nms=False,
|
||||
agnostic_nms=False,
|
||||
topk_per_class=100,
|
||||
topk_all=100,
|
||||
iou_thres=0.45,
|
||||
conf_thres=0.25,
|
||||
keras=False,
|
||||
prefix=colorstr('TensorFlow SavedModel:')):
|
||||
# YOLOv5 TensorFlow SavedModel export
|
||||
try:
|
||||
import tensorflow as tf
|
||||
except Exception:
|
||||
check_requirements(f"tensorflow{'' if torch.cuda.is_available() else '-macos' if MACOS else '-cpu'}")
|
||||
import tensorflow as tf
|
||||
from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2
|
||||
|
||||
from models.tf import TFModel
|
||||
|
||||
LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...')
|
||||
f = str(file).replace('.pt', '_saved_model')
|
||||
batch_size, ch, *imgsz = list(im.shape) # BCHW
|
||||
|
||||
tf_model = TFModel(cfg=model.yaml, model=model, nc=model.nc, imgsz=imgsz)
|
||||
im = tf.zeros((batch_size, *imgsz, ch)) # BHWC order for TensorFlow
|
||||
_ = tf_model.predict(im, tf_nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres)
|
||||
inputs = tf.keras.Input(shape=(*imgsz, ch), batch_size=None if dynamic else batch_size)
|
||||
outputs = tf_model.predict(inputs, tf_nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres)
|
||||
keras_model = tf.keras.Model(inputs=inputs, outputs=outputs)
|
||||
keras_model.trainable = False
|
||||
keras_model.summary()
|
||||
if keras:
|
||||
keras_model.save(f, save_format='tf')
|
||||
else:
|
||||
spec = tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype)
|
||||
m = tf.function(lambda x: keras_model(x)) # full model
|
||||
m = m.get_concrete_function(spec)
|
||||
frozen_func = convert_variables_to_constants_v2(m)
|
||||
tfm = tf.Module()
|
||||
tfm.__call__ = tf.function(lambda x: frozen_func(x)[:4] if tf_nms else frozen_func(x), [spec])
|
||||
tfm.__call__(im)
|
||||
tf.saved_model.save(tfm,
|
||||
f,
|
||||
options=tf.saved_model.SaveOptions(experimental_custom_gradients=False) if check_version(
|
||||
tf.__version__, '2.6') else tf.saved_model.SaveOptions())
|
||||
return f, keras_model
|
||||
|
||||
|
||||
@try_export
|
||||
def export_pb(keras_model, file, prefix=colorstr('TensorFlow GraphDef:')):
|
||||
# YOLOv5 TensorFlow GraphDef *.pb export https://github.com/leimao/Frozen_Graph_TensorFlow
|
||||
import tensorflow as tf
|
||||
from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2
|
||||
|
||||
LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...')
|
||||
f = file.with_suffix('.pb')
|
||||
|
||||
m = tf.function(lambda x: keras_model(x)) # full model
|
||||
m = m.get_concrete_function(tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype))
|
||||
frozen_func = convert_variables_to_constants_v2(m)
|
||||
frozen_func.graph.as_graph_def()
|
||||
tf.io.write_graph(graph_or_graph_def=frozen_func.graph, logdir=str(f.parent), name=f.name, as_text=False)
|
||||
return f, None
|
||||
|
||||
|
||||
@try_export
|
||||
def export_tflite(keras_model, im, file, int8, data, nms, agnostic_nms, prefix=colorstr('TensorFlow Lite:')):
|
||||
# YOLOv5 TensorFlow Lite export
|
||||
import tensorflow as tf
|
||||
|
||||
LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...')
|
||||
batch_size, ch, *imgsz = list(im.shape) # BCHW
|
||||
f = str(file).replace('.pt', '-fp16.tflite')
|
||||
|
||||
converter = tf.lite.TFLiteConverter.from_keras_model(keras_model)
|
||||
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS]
|
||||
converter.target_spec.supported_types = [tf.float16]
|
||||
converter.optimizations = [tf.lite.Optimize.DEFAULT]
|
||||
if int8:
|
||||
from models.tf import representative_dataset_gen
|
||||
dataset = LoadImages(check_dataset(check_yaml(data))['train'], img_size=imgsz, auto=False)
|
||||
converter.representative_dataset = lambda: representative_dataset_gen(dataset, ncalib=100)
|
||||
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
|
||||
converter.target_spec.supported_types = []
|
||||
converter.inference_input_type = tf.uint8 # or tf.int8
|
||||
converter.inference_output_type = tf.uint8 # or tf.int8
|
||||
converter.experimental_new_quantizer = True
|
||||
f = str(file).replace('.pt', '-int8.tflite')
|
||||
if nms or agnostic_nms:
|
||||
converter.target_spec.supported_ops.append(tf.lite.OpsSet.SELECT_TF_OPS)
|
||||
|
||||
tflite_model = converter.convert()
|
||||
open(f, 'wb').write(tflite_model)
|
||||
return f, None
|
||||
|
||||
|
||||
@try_export
|
||||
def export_edgetpu(file, prefix=colorstr('Edge TPU:')):
|
||||
# YOLOv5 Edge TPU export https://coral.ai/docs/edgetpu/models-intro/
|
||||
cmd = 'edgetpu_compiler --version'
|
||||
help_url = 'https://coral.ai/docs/edgetpu/compiler/'
|
||||
assert platform.system() == 'Linux', f'export only supported on Linux. See {help_url}'
|
||||
if subprocess.run(f'{cmd} > /dev/null 2>&1', shell=True).returncode != 0:
|
||||
LOGGER.info(f'\n{prefix} export requires Edge TPU compiler. Attempting install from {help_url}')
|
||||
sudo = subprocess.run('sudo --version >/dev/null', shell=True).returncode == 0 # sudo installed on system
|
||||
for c in (
|
||||
'curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -',
|
||||
'echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" | sudo tee /etc/apt/sources.list.d/coral-edgetpu.list',
|
||||
'sudo apt-get update', 'sudo apt-get install edgetpu-compiler'):
|
||||
subprocess.run(c if sudo else c.replace('sudo ', ''), shell=True, check=True)
|
||||
ver = subprocess.run(cmd, shell=True, capture_output=True, check=True).stdout.decode().split()[-1]
|
||||
|
||||
LOGGER.info(f'\n{prefix} starting export with Edge TPU compiler {ver}...')
|
||||
f = str(file).replace('.pt', '-int8_edgetpu.tflite') # Edge TPU model
|
||||
f_tfl = str(file).replace('.pt', '-int8.tflite') # TFLite model
|
||||
|
||||
subprocess.run([
|
||||
'edgetpu_compiler',
|
||||
'-s',
|
||||
'-d',
|
||||
'-k',
|
||||
'10',
|
||||
'--out_dir',
|
||||
str(file.parent),
|
||||
f_tfl,], check=True)
|
||||
return f, None
|
||||
|
||||
|
||||
@try_export
|
||||
def export_tfjs(file, int8, prefix=colorstr('TensorFlow.js:')):
|
||||
# YOLOv5 TensorFlow.js export
|
||||
check_requirements('tensorflowjs')
|
||||
import tensorflowjs as tfjs
|
||||
|
||||
LOGGER.info(f'\n{prefix} starting export with tensorflowjs {tfjs.__version__}...')
|
||||
f = str(file).replace('.pt', '_web_model') # js dir
|
||||
f_pb = file.with_suffix('.pb') # *.pb path
|
||||
f_json = f'{f}/model.json' # *.json path
|
||||
|
||||
args = [
|
||||
'tensorflowjs_converter',
|
||||
'--input_format=tf_frozen_model',
|
||||
'--quantize_uint8' if int8 else '',
|
||||
'--output_node_names=Identity,Identity_1,Identity_2,Identity_3',
|
||||
str(f_pb),
|
||||
str(f),]
|
||||
subprocess.run([arg for arg in args if arg], check=True)
|
||||
|
||||
json = Path(f_json).read_text()
|
||||
with open(f_json, 'w') as j: # sort JSON Identity_* in ascending order
|
||||
subst = re.sub(
|
||||
r'{"outputs": {"Identity.?.?": {"name": "Identity.?.?"}, '
|
||||
r'"Identity.?.?": {"name": "Identity.?.?"}, '
|
||||
r'"Identity.?.?": {"name": "Identity.?.?"}, '
|
||||
r'"Identity.?.?": {"name": "Identity.?.?"}}}', r'{"outputs": {"Identity": {"name": "Identity"}, '
|
||||
r'"Identity_1": {"name": "Identity_1"}, '
|
||||
r'"Identity_2": {"name": "Identity_2"}, '
|
||||
r'"Identity_3": {"name": "Identity_3"}}}', json)
|
||||
j.write(subst)
|
||||
return f, None
|
||||
|
||||
|
||||
def add_tflite_metadata(file, metadata, num_outputs):
|
||||
# Add metadata to *.tflite models per https://www.tensorflow.org/lite/models/convert/metadata
|
||||
with contextlib.suppress(ImportError):
|
||||
# check_requirements('tflite_support')
|
||||
from tflite_support import flatbuffers
|
||||
from tflite_support import metadata as _metadata
|
||||
from tflite_support import metadata_schema_py_generated as _metadata_fb
|
||||
|
||||
tmp_file = Path('/tmp/meta.txt')
|
||||
with open(tmp_file, 'w') as meta_f:
|
||||
meta_f.write(str(metadata))
|
||||
|
||||
model_meta = _metadata_fb.ModelMetadataT()
|
||||
label_file = _metadata_fb.AssociatedFileT()
|
||||
label_file.name = tmp_file.name
|
||||
model_meta.associatedFiles = [label_file]
|
||||
|
||||
subgraph = _metadata_fb.SubGraphMetadataT()
|
||||
subgraph.inputTensorMetadata = [_metadata_fb.TensorMetadataT()]
|
||||
subgraph.outputTensorMetadata = [_metadata_fb.TensorMetadataT()] * num_outputs
|
||||
model_meta.subgraphMetadata = [subgraph]
|
||||
|
||||
b = flatbuffers.Builder(0)
|
||||
b.Finish(model_meta.Pack(b), _metadata.MetadataPopulator.METADATA_FILE_IDENTIFIER)
|
||||
metadata_buf = b.Output()
|
||||
|
||||
populator = _metadata.MetadataPopulator.with_model_file(file)
|
||||
populator.load_metadata_buffer(metadata_buf)
|
||||
populator.load_associated_files([str(tmp_file)])
|
||||
populator.populate()
|
||||
tmp_file.unlink()
|
||||
|
||||
|
||||
def pipeline_coreml(model, im, file, names, y, prefix=colorstr('CoreML Pipeline:')):
|
||||
# YOLOv5 CoreML pipeline
|
||||
import coremltools as ct
|
||||
from PIL import Image
|
||||
|
||||
print(f'{prefix} starting pipeline with coremltools {ct.__version__}...')
|
||||
batch_size, ch, h, w = list(im.shape) # BCHW
|
||||
t = time.time()
|
||||
|
||||
# Output shapes
|
||||
spec = model.get_spec()
|
||||
out0, out1 = iter(spec.description.output)
|
||||
if platform.system() == 'Darwin':
|
||||
img = Image.new('RGB', (w, h)) # img(192 width, 320 height)
|
||||
# img = torch.zeros((*opt.img_size, 3)).numpy() # img size(320,192,3) iDetection
|
||||
out = model.predict({'image': img})
|
||||
out0_shape, out1_shape = out[out0.name].shape, out[out1.name].shape
|
||||
else: # linux and windows can not run model.predict(), get sizes from pytorch output y
|
||||
s = tuple(y[0].shape)
|
||||
out0_shape, out1_shape = (s[1], s[2] - 5), (s[1], 4) # (3780, 80), (3780, 4)
|
||||
|
||||
# Checks
|
||||
nx, ny = spec.description.input[0].type.imageType.width, spec.description.input[0].type.imageType.height
|
||||
na, nc = out0_shape
|
||||
# na, nc = out0.type.multiArrayType.shape # number anchors, classes
|
||||
assert len(names) == nc, f'{len(names)} names found for nc={nc}' # check
|
||||
|
||||
# Define output shapes (missing)
|
||||
out0.type.multiArrayType.shape[:] = out0_shape # (3780, 80)
|
||||
out1.type.multiArrayType.shape[:] = out1_shape # (3780, 4)
|
||||
# spec.neuralNetwork.preprocessing[0].featureName = '0'
|
||||
|
||||
# Flexible input shapes
|
||||
# from coremltools.models.neural_network import flexible_shape_utils
|
||||
# s = [] # shapes
|
||||
# s.append(flexible_shape_utils.NeuralNetworkImageSize(320, 192))
|
||||
# s.append(flexible_shape_utils.NeuralNetworkImageSize(640, 384)) # (height, width)
|
||||
# flexible_shape_utils.add_enumerated_image_sizes(spec, feature_name='image', sizes=s)
|
||||
# r = flexible_shape_utils.NeuralNetworkImageSizeRange() # shape ranges
|
||||
# r.add_height_range((192, 640))
|
||||
# r.add_width_range((192, 640))
|
||||
# flexible_shape_utils.update_image_size_range(spec, feature_name='image', size_range=r)
|
||||
|
||||
# Print
|
||||
print(spec.description)
|
||||
|
||||
# Model from spec
|
||||
model = ct.models.MLModel(spec)
|
||||
|
||||
# 3. Create NMS protobuf
|
||||
nms_spec = ct.proto.Model_pb2.Model()
|
||||
nms_spec.specificationVersion = 5
|
||||
for i in range(2):
|
||||
decoder_output = model._spec.description.output[i].SerializeToString()
|
||||
nms_spec.description.input.add()
|
||||
nms_spec.description.input[i].ParseFromString(decoder_output)
|
||||
nms_spec.description.output.add()
|
||||
nms_spec.description.output[i].ParseFromString(decoder_output)
|
||||
|
||||
nms_spec.description.output[0].name = 'confidence'
|
||||
nms_spec.description.output[1].name = 'coordinates'
|
||||
|
||||
output_sizes = [nc, 4]
|
||||
for i in range(2):
|
||||
ma_type = nms_spec.description.output[i].type.multiArrayType
|
||||
ma_type.shapeRange.sizeRanges.add()
|
||||
ma_type.shapeRange.sizeRanges[0].lowerBound = 0
|
||||
ma_type.shapeRange.sizeRanges[0].upperBound = -1
|
||||
ma_type.shapeRange.sizeRanges.add()
|
||||
ma_type.shapeRange.sizeRanges[1].lowerBound = output_sizes[i]
|
||||
ma_type.shapeRange.sizeRanges[1].upperBound = output_sizes[i]
|
||||
del ma_type.shape[:]
|
||||
|
||||
nms = nms_spec.nonMaximumSuppression
|
||||
nms.confidenceInputFeatureName = out0.name # 1x507x80
|
||||
nms.coordinatesInputFeatureName = out1.name # 1x507x4
|
||||
nms.confidenceOutputFeatureName = 'confidence'
|
||||
nms.coordinatesOutputFeatureName = 'coordinates'
|
||||
nms.iouThresholdInputFeatureName = 'iouThreshold'
|
||||
nms.confidenceThresholdInputFeatureName = 'confidenceThreshold'
|
||||
nms.iouThreshold = 0.45
|
||||
nms.confidenceThreshold = 0.25
|
||||
nms.pickTop.perClass = True
|
||||
nms.stringClassLabels.vector.extend(names.values())
|
||||
nms_model = ct.models.MLModel(nms_spec)
|
||||
|
||||
# 4. Pipeline models together
|
||||
pipeline = ct.models.pipeline.Pipeline(input_features=[('image', ct.models.datatypes.Array(3, ny, nx)),
|
||||
('iouThreshold', ct.models.datatypes.Double()),
|
||||
('confidenceThreshold', ct.models.datatypes.Double())],
|
||||
output_features=['confidence', 'coordinates'])
|
||||
pipeline.add_model(model)
|
||||
pipeline.add_model(nms_model)
|
||||
|
||||
# Correct datatypes
|
||||
pipeline.spec.description.input[0].ParseFromString(model._spec.description.input[0].SerializeToString())
|
||||
pipeline.spec.description.output[0].ParseFromString(nms_model._spec.description.output[0].SerializeToString())
|
||||
pipeline.spec.description.output[1].ParseFromString(nms_model._spec.description.output[1].SerializeToString())
|
||||
|
||||
# Update metadata
|
||||
pipeline.spec.specificationVersion = 5
|
||||
pipeline.spec.description.metadata.versionString = 'https://github.com/ultralytics/yolov5'
|
||||
pipeline.spec.description.metadata.shortDescription = 'https://github.com/ultralytics/yolov5'
|
||||
pipeline.spec.description.metadata.author = 'glenn.jocher@ultralytics.com'
|
||||
pipeline.spec.description.metadata.license = 'https://github.com/ultralytics/yolov5/blob/master/LICENSE'
|
||||
pipeline.spec.description.metadata.userDefined.update({
|
||||
'classes': ','.join(names.values()),
|
||||
'iou_threshold': str(nms.iouThreshold),
|
||||
'confidence_threshold': str(nms.confidenceThreshold)})
|
||||
|
||||
# Save the model
|
||||
f = file.with_suffix('.mlmodel') # filename
|
||||
model = ct.models.MLModel(pipeline.spec)
|
||||
model.input_description['image'] = 'Input image'
|
||||
model.input_description['iouThreshold'] = f'(optional) IOU Threshold override (default: {nms.iouThreshold})'
|
||||
model.input_description['confidenceThreshold'] = \
|
||||
f'(optional) Confidence Threshold override (default: {nms.confidenceThreshold})'
|
||||
model.output_description['confidence'] = 'Boxes × Class confidence (see user-defined metadata "classes")'
|
||||
model.output_description['coordinates'] = 'Boxes × [x, y, width, height] (relative to image size)'
|
||||
model.save(f) # pipelined
|
||||
print(f'{prefix} pipeline success ({time.time() - t:.2f}s), saved as {f} ({file_size(f):.1f} MB)')
|
||||
|
||||
|
||||
@smart_inference_mode()
|
||||
def run(
|
||||
data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path'
|
||||
weights=ROOT / 'yolov5s.pt', # weights path
|
||||
imgsz=(640, 640), # image (height, width)
|
||||
batch_size=1, # batch size
|
||||
device='cpu', # cuda device, i.e. 0 or 0,1,2,3 or cpu
|
||||
include=('torchscript', 'onnx'), # include formats
|
||||
half=False, # FP16 half-precision export
|
||||
inplace=False, # set YOLOv5 Detect() inplace=True
|
||||
keras=False, # use Keras
|
||||
optimize=False, # TorchScript: optimize for mobile
|
||||
int8=False, # CoreML/TF INT8 quantization
|
||||
dynamic=False, # ONNX/TF/TensorRT: dynamic axes
|
||||
simplify=False, # ONNX: simplify model
|
||||
opset=12, # ONNX: opset version
|
||||
verbose=False, # TensorRT: verbose log
|
||||
workspace=4, # TensorRT: workspace size (GB)
|
||||
nms=False, # TF: add NMS to model
|
||||
agnostic_nms=False, # TF: add agnostic NMS to model
|
||||
topk_per_class=100, # TF.js NMS: topk per class to keep
|
||||
topk_all=100, # TF.js NMS: topk for all classes to keep
|
||||
iou_thres=0.45, # TF.js NMS: IoU threshold
|
||||
conf_thres=0.25, # TF.js NMS: confidence threshold
|
||||
):
|
||||
t = time.time()
|
||||
include = [x.lower() for x in include] # to lowercase
|
||||
fmts = tuple(export_formats()['Argument'][1:]) # --include arguments
|
||||
flags = [x in include for x in fmts]
|
||||
assert sum(flags) == len(include), f'ERROR: Invalid --include {include}, valid --include arguments are {fmts}'
|
||||
jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle = flags # export booleans
|
||||
file = Path(url2file(weights) if str(weights).startswith(('http:/', 'https:/')) else weights) # PyTorch weights
|
||||
|
||||
# Load PyTorch model
|
||||
device = select_device(device)
|
||||
if half:
|
||||
assert device.type != 'cpu' or coreml, '--half only compatible with GPU export, i.e. use --device 0'
|
||||
assert not dynamic, '--half not compatible with --dynamic, i.e. use either --half or --dynamic but not both'
|
||||
model = attempt_load(weights, device=device, inplace=True, fuse=True) # load FP32 model
|
||||
|
||||
# Checks
|
||||
imgsz *= 2 if len(imgsz) == 1 else 1 # expand
|
||||
if optimize:
|
||||
assert device.type == 'cpu', '--optimize not compatible with cuda devices, i.e. use --device cpu'
|
||||
|
||||
# Input
|
||||
gs = int(max(model.stride)) # grid size (max stride)
|
||||
imgsz = [check_img_size(x, gs) for x in imgsz] # verify img_size are gs-multiples
|
||||
im = torch.zeros(batch_size, 3, *imgsz).to(device) # image size(1,3,320,192) BCHW iDetection
|
||||
|
||||
# Update model
|
||||
model.eval()
|
||||
for k, m in model.named_modules():
|
||||
if isinstance(m, Detect):
|
||||
m.inplace = inplace
|
||||
m.dynamic = dynamic
|
||||
m.export = True
|
||||
|
||||
for _ in range(2):
|
||||
y = model(im) # dry runs
|
||||
if half and not coreml:
|
||||
im, model = im.half(), model.half() # to FP16
|
||||
shape = tuple((y[0] if isinstance(y, tuple) else y).shape) # model output shape
|
||||
metadata = {'stride': int(max(model.stride)), 'names': model.names} # model metadata
|
||||
LOGGER.info(f"\n{colorstr('PyTorch:')} starting from {file} with output shape {shape} ({file_size(file):.1f} MB)")
|
||||
|
||||
# Exports
|
||||
f = [''] * len(fmts) # exported filenames
|
||||
warnings.filterwarnings(action='ignore', category=torch.jit.TracerWarning) # suppress TracerWarning
|
||||
if jit: # TorchScript
|
||||
f[0], _ = export_torchscript(model, im, file, optimize)
|
||||
if engine: # TensorRT required before ONNX
|
||||
f[1], _ = export_engine(model, im, file, half, dynamic, simplify, workspace, verbose)
|
||||
if onnx or xml: # OpenVINO requires ONNX
|
||||
f[2], _ = export_onnx(model, im, file, opset, dynamic, simplify)
|
||||
if xml: # OpenVINO
|
||||
f[3], _ = export_openvino(file, metadata, half)
|
||||
if coreml: # CoreML
|
||||
f[4], ct_model = export_coreml(model, im, file, int8, half, nms)
|
||||
if nms:
|
||||
pipeline_coreml(ct_model, im, file, model.names, y)
|
||||
if any((saved_model, pb, tflite, edgetpu, tfjs)): # TensorFlow formats
|
||||
assert not tflite or not tfjs, 'TFLite and TF.js models must be exported separately, please pass only one type.'
|
||||
assert not isinstance(model, ClassificationModel), 'ClassificationModel export to TF formats not yet supported.'
|
||||
f[5], s_model = export_saved_model(model.cpu(),
|
||||
im,
|
||||
file,
|
||||
dynamic,
|
||||
tf_nms=nms or agnostic_nms or tfjs,
|
||||
agnostic_nms=agnostic_nms or tfjs,
|
||||
topk_per_class=topk_per_class,
|
||||
topk_all=topk_all,
|
||||
iou_thres=iou_thres,
|
||||
conf_thres=conf_thres,
|
||||
keras=keras)
|
||||
if pb or tfjs: # pb prerequisite to tfjs
|
||||
f[6], _ = export_pb(s_model, file)
|
||||
if tflite or edgetpu:
|
||||
f[7], _ = export_tflite(s_model, im, file, int8 or edgetpu, data=data, nms=nms, agnostic_nms=agnostic_nms)
|
||||
if edgetpu:
|
||||
f[8], _ = export_edgetpu(file)
|
||||
add_tflite_metadata(f[8] or f[7], metadata, num_outputs=len(s_model.outputs))
|
||||
if tfjs:
|
||||
f[9], _ = export_tfjs(file, int8)
|
||||
if paddle: # PaddlePaddle
|
||||
f[10], _ = export_paddle(model, im, file, metadata)
|
||||
|
||||
# Finish
|
||||
f = [str(x) for x in f if x] # filter out '' and None
|
||||
if any(f):
|
||||
cls, det, seg = (isinstance(model, x) for x in (ClassificationModel, DetectionModel, SegmentationModel)) # type
|
||||
det &= not seg # segmentation models inherit from SegmentationModel(DetectionModel)
|
||||
dir = Path('segment' if seg else 'classify' if cls else '')
|
||||
h = '--half' if half else '' # --half FP16 inference arg
|
||||
s = '# WARNING ⚠️ ClassificationModel not yet supported for PyTorch Hub AutoShape inference' if cls else \
|
||||
'# WARNING ⚠️ SegmentationModel not yet supported for PyTorch Hub AutoShape inference' if seg else ''
|
||||
LOGGER.info(f'\nExport complete ({time.time() - t:.1f}s)'
|
||||
f"\nResults saved to {colorstr('bold', file.parent.resolve())}"
|
||||
f"\nDetect: python {dir / ('detect.py' if det else 'predict.py')} --weights {f[-1]} {h}"
|
||||
f"\nValidate: python {dir / 'val.py'} --weights {f[-1]} {h}"
|
||||
f"\nPyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', '{f[-1]}') {s}"
|
||||
f'\nVisualize: https://netron.app')
|
||||
return f # return list of exported files/dirs
|
||||
|
||||
|
||||
def parse_opt(known=False):
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path')
|
||||
parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model.pt path(s)')
|
||||
parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640, 640], help='image (h, w)')
|
||||
parser.add_argument('--batch-size', type=int, default=1, help='batch size')
|
||||
parser.add_argument('--device', default='cpu', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
|
||||
parser.add_argument('--half', action='store_true', help='FP16 half-precision export')
|
||||
parser.add_argument('--inplace', action='store_true', help='set YOLOv5 Detect() inplace=True')
|
||||
parser.add_argument('--keras', action='store_true', help='TF: use Keras')
|
||||
parser.add_argument('--optimize', action='store_true', help='TorchScript: optimize for mobile')
|
||||
parser.add_argument('--int8', action='store_true', help='CoreML/TF INT8 quantization')
|
||||
parser.add_argument('--dynamic', action='store_true', help='ONNX/TF/TensorRT: dynamic axes')
|
||||
parser.add_argument('--simplify', action='store_true', help='ONNX: simplify model')
|
||||
parser.add_argument('--opset', type=int, default=17, help='ONNX: opset version')
|
||||
parser.add_argument('--verbose', action='store_true', help='TensorRT: verbose log')
|
||||
parser.add_argument('--workspace', type=int, default=4, help='TensorRT: workspace size (GB)')
|
||||
parser.add_argument('--nms', action='store_true', help='TF: add NMS to model')
|
||||
parser.add_argument('--agnostic-nms', action='store_true', help='TF: add agnostic NMS to model')
|
||||
parser.add_argument('--topk-per-class', type=int, default=100, help='TF.js NMS: topk per class to keep')
|
||||
parser.add_argument('--topk-all', type=int, default=100, help='TF.js NMS: topk for all classes to keep')
|
||||
parser.add_argument('--iou-thres', type=float, default=0.45, help='TF.js NMS: IoU threshold')
|
||||
parser.add_argument('--conf-thres', type=float, default=0.25, help='TF.js NMS: confidence threshold')
|
||||
parser.add_argument(
|
||||
'--include',
|
||||
nargs='+',
|
||||
default=['torchscript'],
|
||||
help='torchscript, onnx, openvino, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle')
|
||||
opt = parser.parse_known_args()[0] if known else parser.parse_args()
|
||||
print_args(vars(opt))
|
||||
return opt
|
||||
|
||||
|
||||
def main(opt):
|
||||
for opt.weights in (opt.weights if isinstance(opt.weights, list) else [opt.weights]):
|
||||
run(**vars(opt))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
opt = parse_opt()
|
||||
main(opt)
|
29
ayf_ocr/yolo_databases/api/main.py
Normal file
@ -0,0 +1,29 @@
|
||||
from flask import Flask, request, jsonify
|
||||
from detect import YOLOv5
|
||||
from utils.general import cv2
|
||||
import numpy as np
|
||||
import base64
|
||||
|
||||
app = Flask(__name__)
|
||||
yolov_5 = YOLOv5()
|
||||
|
||||
|
||||
@app.route("/", methods=["POST"])
|
||||
def index():
|
||||
images = request.values.get("images")
|
||||
input_image = base64.b64decode(images)
|
||||
imBytes = np.frombuffer(input_image, np.uint8)
|
||||
iImage = cv2.imdecode(imBytes, cv2.IMREAD_COLOR)
|
||||
result = yolov_5.infer(iImage)
|
||||
result = result[0].view(-1).int()
|
||||
res_dict = {'coordinate': result.tolist()}
|
||||
print(res_dict)
|
||||
return jsonify(res_dict)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
app.run(
|
||||
host='127.0.0.1',
|
||||
port=8888,
|
||||
debug=True
|
||||
)
|
0
ayf_ocr/yolo_databases/api/models/__init__.py
Normal file
871
ayf_ocr/yolo_databases/api/models/common.py
Normal file
@ -0,0 +1,871 @@
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
"""
|
||||
Common modules
|
||||
"""
|
||||
|
||||
import ast
|
||||
import contextlib
|
||||
import json
|
||||
import math
|
||||
import platform
|
||||
import warnings
|
||||
import zipfile
|
||||
from collections import OrderedDict, namedtuple
|
||||
from copy import copy
|
||||
from pathlib import Path
|
||||
from urllib.parse import urlparse
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
import requests
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
from PIL import Image
|
||||
from torch.cuda import amp
|
||||
|
||||
from utils import TryExcept
|
||||
from utils.dataloaders import exif_transpose, letterbox
|
||||
from utils.general import (LOGGER, ROOT, Profile, check_requirements, check_suffix, check_version, colorstr,
|
||||
increment_path, is_jupyter, make_divisible, non_max_suppression, scale_boxes, xywh2xyxy,
|
||||
xyxy2xywh, yaml_load)
|
||||
from utils.plots import Annotator, colors, save_one_box
|
||||
from utils.torch_utils import copy_attr, smart_inference_mode
|
||||
|
||||
|
||||
def autopad(k, p=None, d=1): # kernel, padding, dilation
|
||||
# Pad to 'same' shape outputs
|
||||
if d > 1:
|
||||
k = d * (k - 1) + 1 if isinstance(k, int) else [d * (x - 1) + 1 for x in k] # actual kernel-size
|
||||
if p is None:
|
||||
p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad
|
||||
return p
|
||||
|
||||
|
||||
class Conv(nn.Module):
|
||||
# Standard convolution with args(ch_in, ch_out, kernel, stride, padding, groups, dilation, activation)
|
||||
default_act = nn.SiLU() # default activation
|
||||
|
||||
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, d=1, act=True):
|
||||
super().__init__()
|
||||
self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p, d), groups=g, dilation=d, bias=False)
|
||||
self.bn = nn.BatchNorm2d(c2)
|
||||
self.act = self.default_act if act is True else act if isinstance(act, nn.Module) else nn.Identity()
|
||||
|
||||
def forward(self, x):
|
||||
return self.act(self.bn(self.conv(x)))
|
||||
|
||||
def forward_fuse(self, x):
|
||||
return self.act(self.conv(x))
|
||||
|
||||
|
||||
class DWConv(Conv):
|
||||
# Depth-wise convolution
|
||||
def __init__(self, c1, c2, k=1, s=1, d=1, act=True): # ch_in, ch_out, kernel, stride, dilation, activation
|
||||
super().__init__(c1, c2, k, s, g=math.gcd(c1, c2), d=d, act=act)
|
||||
|
||||
|
||||
class DWConvTranspose2d(nn.ConvTranspose2d):
|
||||
# Depth-wise transpose convolution
|
||||
def __init__(self, c1, c2, k=1, s=1, p1=0, p2=0): # ch_in, ch_out, kernel, stride, padding, padding_out
|
||||
super().__init__(c1, c2, k, s, p1, p2, groups=math.gcd(c1, c2))
|
||||
|
||||
|
||||
class TransformerLayer(nn.Module):
|
||||
# Transformer layer https://arxiv.org/abs/2010.11929 (LayerNorm layers removed for better performance)
|
||||
def __init__(self, c, num_heads):
|
||||
super().__init__()
|
||||
self.q = nn.Linear(c, c, bias=False)
|
||||
self.k = nn.Linear(c, c, bias=False)
|
||||
self.v = nn.Linear(c, c, bias=False)
|
||||
self.ma = nn.MultiheadAttention(embed_dim=c, num_heads=num_heads)
|
||||
self.fc1 = nn.Linear(c, c, bias=False)
|
||||
self.fc2 = nn.Linear(c, c, bias=False)
|
||||
|
||||
def forward(self, x):
|
||||
x = self.ma(self.q(x), self.k(x), self.v(x))[0] + x
|
||||
x = self.fc2(self.fc1(x)) + x
|
||||
return x
|
||||
|
||||
|
||||
class TransformerBlock(nn.Module):
|
||||
# Vision Transformer https://arxiv.org/abs/2010.11929
|
||||
def __init__(self, c1, c2, num_heads, num_layers):
|
||||
super().__init__()
|
||||
self.conv = None
|
||||
if c1 != c2:
|
||||
self.conv = Conv(c1, c2)
|
||||
self.linear = nn.Linear(c2, c2) # learnable position embedding
|
||||
self.tr = nn.Sequential(*(TransformerLayer(c2, num_heads) for _ in range(num_layers)))
|
||||
self.c2 = c2
|
||||
|
||||
def forward(self, x):
|
||||
if self.conv is not None:
|
||||
x = self.conv(x)
|
||||
b, _, w, h = x.shape
|
||||
p = x.flatten(2).permute(2, 0, 1)
|
||||
return self.tr(p + self.linear(p)).permute(1, 2, 0).reshape(b, self.c2, w, h)
|
||||
|
||||
|
||||
class Bottleneck(nn.Module):
|
||||
# Standard bottleneck
|
||||
def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion
|
||||
super().__init__()
|
||||
c_ = int(c2 * e) # hidden channels
|
||||
self.cv1 = Conv(c1, c_, 1, 1)
|
||||
self.cv2 = Conv(c_, c2, 3, 1, g=g)
|
||||
self.add = shortcut and c1 == c2
|
||||
|
||||
def forward(self, x):
|
||||
return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
|
||||
|
||||
|
||||
class BottleneckCSP(nn.Module):
|
||||
# CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
|
||||
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
|
||||
super().__init__()
|
||||
c_ = int(c2 * e) # hidden channels
|
||||
self.cv1 = Conv(c1, c_, 1, 1)
|
||||
self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)
|
||||
self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False)
|
||||
self.cv4 = Conv(2 * c_, c2, 1, 1)
|
||||
self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3)
|
||||
self.act = nn.SiLU()
|
||||
self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)))
|
||||
|
||||
def forward(self, x):
|
||||
y1 = self.cv3(self.m(self.cv1(x)))
|
||||
y2 = self.cv2(x)
|
||||
return self.cv4(self.act(self.bn(torch.cat((y1, y2), 1))))
|
||||
|
||||
|
||||
class CrossConv(nn.Module):
|
||||
# Cross Convolution Downsample
|
||||
def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False):
|
||||
# ch_in, ch_out, kernel, stride, groups, expansion, shortcut
|
||||
super().__init__()
|
||||
c_ = int(c2 * e) # hidden channels
|
||||
self.cv1 = Conv(c1, c_, (1, k), (1, s))
|
||||
self.cv2 = Conv(c_, c2, (k, 1), (s, 1), g=g)
|
||||
self.add = shortcut and c1 == c2
|
||||
|
||||
def forward(self, x):
|
||||
return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
|
||||
|
||||
|
||||
class C3(nn.Module):
|
||||
# CSP Bottleneck with 3 convolutions
|
||||
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
|
||||
super().__init__()
|
||||
c_ = int(c2 * e) # hidden channels
|
||||
self.cv1 = Conv(c1, c_, 1, 1)
|
||||
self.cv2 = Conv(c1, c_, 1, 1)
|
||||
self.cv3 = Conv(2 * c_, c2, 1) # optional act=FReLU(c2)
|
||||
self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)))
|
||||
|
||||
def forward(self, x):
|
||||
return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), 1))
|
||||
|
||||
|
||||
class C3x(C3):
|
||||
# C3 module with cross-convolutions
|
||||
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
|
||||
super().__init__(c1, c2, n, shortcut, g, e)
|
||||
c_ = int(c2 * e)
|
||||
self.m = nn.Sequential(*(CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n)))
|
||||
|
||||
|
||||
class C3TR(C3):
|
||||
# C3 module with TransformerBlock()
|
||||
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
|
||||
super().__init__(c1, c2, n, shortcut, g, e)
|
||||
c_ = int(c2 * e)
|
||||
self.m = TransformerBlock(c_, c_, 4, n)
|
||||
|
||||
|
||||
class C3SPP(C3):
|
||||
# C3 module with SPP()
|
||||
def __init__(self, c1, c2, k=(5, 9, 13), n=1, shortcut=True, g=1, e=0.5):
|
||||
super().__init__(c1, c2, n, shortcut, g, e)
|
||||
c_ = int(c2 * e)
|
||||
self.m = SPP(c_, c_, k)
|
||||
|
||||
|
||||
class C3Ghost(C3):
|
||||
# C3 module with GhostBottleneck()
|
||||
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
|
||||
super().__init__(c1, c2, n, shortcut, g, e)
|
||||
c_ = int(c2 * e) # hidden channels
|
||||
self.m = nn.Sequential(*(GhostBottleneck(c_, c_) for _ in range(n)))
|
||||
|
||||
|
||||
class SPP(nn.Module):
|
||||
# Spatial Pyramid Pooling (SPP) layer https://arxiv.org/abs/1406.4729
|
||||
def __init__(self, c1, c2, k=(5, 9, 13)):
|
||||
super().__init__()
|
||||
c_ = c1 // 2 # hidden channels
|
||||
self.cv1 = Conv(c1, c_, 1, 1)
|
||||
self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1)
|
||||
self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k])
|
||||
|
||||
def forward(self, x):
|
||||
x = self.cv1(x)
|
||||
with warnings.catch_warnings():
|
||||
warnings.simplefilter('ignore') # suppress torch 1.9.0 max_pool2d() warning
|
||||
return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1))
|
||||
|
||||
|
||||
class SPPF(nn.Module):
|
||||
# Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv5 by Glenn Jocher
|
||||
def __init__(self, c1, c2, k=5): # equivalent to SPP(k=(5, 9, 13))
|
||||
super().__init__()
|
||||
c_ = c1 // 2 # hidden channels
|
||||
self.cv1 = Conv(c1, c_, 1, 1)
|
||||
self.cv2 = Conv(c_ * 4, c2, 1, 1)
|
||||
self.m = nn.MaxPool2d(kernel_size=k, stride=1, padding=k // 2)
|
||||
|
||||
def forward(self, x):
|
||||
x = self.cv1(x)
|
||||
with warnings.catch_warnings():
|
||||
warnings.simplefilter('ignore') # suppress torch 1.9.0 max_pool2d() warning
|
||||
y1 = self.m(x)
|
||||
y2 = self.m(y1)
|
||||
return self.cv2(torch.cat((x, y1, y2, self.m(y2)), 1))
|
||||
|
||||
|
||||
class Focus(nn.Module):
|
||||
# Focus wh information into c-space
|
||||
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
|
||||
super().__init__()
|
||||
self.conv = Conv(c1 * 4, c2, k, s, p, g, act=act)
|
||||
# self.contract = Contract(gain=2)
|
||||
|
||||
def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2)
|
||||
return self.conv(torch.cat((x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]), 1))
|
||||
# return self.conv(self.contract(x))
|
||||
|
||||
|
||||
class GhostConv(nn.Module):
|
||||
# Ghost Convolution https://github.com/huawei-noah/ghostnet
|
||||
def __init__(self, c1, c2, k=1, s=1, g=1, act=True): # ch_in, ch_out, kernel, stride, groups
|
||||
super().__init__()
|
||||
c_ = c2 // 2 # hidden channels
|
||||
self.cv1 = Conv(c1, c_, k, s, None, g, act=act)
|
||||
self.cv2 = Conv(c_, c_, 5, 1, None, c_, act=act)
|
||||
|
||||
def forward(self, x):
|
||||
y = self.cv1(x)
|
||||
return torch.cat((y, self.cv2(y)), 1)
|
||||
|
||||
|
||||
class GhostBottleneck(nn.Module):
|
||||
# Ghost Bottleneck https://github.com/huawei-noah/ghostnet
|
||||
def __init__(self, c1, c2, k=3, s=1): # ch_in, ch_out, kernel, stride
|
||||
super().__init__()
|
||||
c_ = c2 // 2
|
||||
self.conv = nn.Sequential(
|
||||
GhostConv(c1, c_, 1, 1), # pw
|
||||
DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(), # dw
|
||||
GhostConv(c_, c2, 1, 1, act=False)) # pw-linear
|
||||
self.shortcut = nn.Sequential(DWConv(c1, c1, k, s, act=False), Conv(c1, c2, 1, 1,
|
||||
act=False)) if s == 2 else nn.Identity()
|
||||
|
||||
def forward(self, x):
|
||||
return self.conv(x) + self.shortcut(x)
|
||||
|
||||
|
||||
class Contract(nn.Module):
|
||||
# Contract width-height into channels, i.e. x(1,64,80,80) to x(1,256,40,40)
|
||||
def __init__(self, gain=2):
|
||||
super().__init__()
|
||||
self.gain = gain
|
||||
|
||||
def forward(self, x):
|
||||
b, c, h, w = x.size() # assert (h / s == 0) and (W / s == 0), 'Indivisible gain'
|
||||
s = self.gain
|
||||
x = x.view(b, c, h // s, s, w // s, s) # x(1,64,40,2,40,2)
|
||||
x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # x(1,2,2,64,40,40)
|
||||
return x.view(b, c * s * s, h // s, w // s) # x(1,256,40,40)
|
||||
|
||||
|
||||
class Expand(nn.Module):
|
||||
# Expand channels into width-height, i.e. x(1,64,80,80) to x(1,16,160,160)
|
||||
def __init__(self, gain=2):
|
||||
super().__init__()
|
||||
self.gain = gain
|
||||
|
||||
def forward(self, x):
|
||||
b, c, h, w = x.size() # assert C / s ** 2 == 0, 'Indivisible gain'
|
||||
s = self.gain
|
||||
x = x.view(b, s, s, c // s ** 2, h, w) # x(1,2,2,16,80,80)
|
||||
x = x.permute(0, 3, 4, 1, 5, 2).contiguous() # x(1,16,80,2,80,2)
|
||||
return x.view(b, c // s ** 2, h * s, w * s) # x(1,16,160,160)
|
||||
|
||||
|
||||
class Concat(nn.Module):
|
||||
# Concatenate a list of tensors along dimension
|
||||
def __init__(self, dimension=1):
|
||||
super().__init__()
|
||||
self.d = dimension
|
||||
|
||||
def forward(self, x):
|
||||
return torch.cat(x, self.d)
|
||||
|
||||
|
||||
class DetectMultiBackend(nn.Module):
|
||||
# YOLOv5 MultiBackend class for python inference on various backends
|
||||
def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, data=None, fp16=False, fuse=True):
|
||||
# Usage:
|
||||
# PyTorch: weights = *.pt
|
||||
# TorchScript: *.torchscript
|
||||
# ONNX Runtime: *.onnx
|
||||
# ONNX OpenCV DNN: *.onnx --dnn
|
||||
# OpenVINO: *_openvino_model
|
||||
# CoreML: *.mlmodel
|
||||
# TensorRT: *.engine
|
||||
# TensorFlow SavedModel: *_saved_model
|
||||
# TensorFlow GraphDef: *.pb
|
||||
# TensorFlow Lite: *.tflite
|
||||
# TensorFlow Edge TPU: *_edgetpu.tflite
|
||||
# PaddlePaddle: *_paddle_model
|
||||
from models.experimental import attempt_download, attempt_load # scoped to avoid circular import
|
||||
|
||||
super().__init__()
|
||||
w = str(weights[0] if isinstance(weights, list) else weights)
|
||||
pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle, triton = self._model_type(w)
|
||||
fp16 &= pt or jit or onnx or engine or triton # FP16
|
||||
nhwc = coreml or saved_model or pb or tflite or edgetpu # BHWC formats (vs torch BCWH)
|
||||
stride = 32 # default stride
|
||||
cuda = torch.cuda.is_available() and device.type != 'cpu' # use CUDA
|
||||
if not (pt or triton):
|
||||
w = attempt_download(w) # download if not local
|
||||
|
||||
if pt: # PyTorch
|
||||
model = attempt_load(weights if isinstance(weights, list) else w, device=device, inplace=True, fuse=fuse)
|
||||
stride = max(int(model.stride.max()), 32) # model stride
|
||||
names = model.module.names if hasattr(model, 'module') else model.names # get class names
|
||||
model.half() if fp16 else model.float()
|
||||
self.model = model # explicitly assign for to(), cpu(), cuda(), half()
|
||||
elif jit: # TorchScript
|
||||
LOGGER.info(f'Loading {w} for TorchScript inference...')
|
||||
extra_files = {'config.txt': ''} # model metadata
|
||||
model = torch.jit.load(w, _extra_files=extra_files, map_location=device)
|
||||
model.half() if fp16 else model.float()
|
||||
if extra_files['config.txt']: # load metadata dict
|
||||
d = json.loads(extra_files['config.txt'],
|
||||
object_hook=lambda d: {
|
||||
int(k) if k.isdigit() else k: v
|
||||
for k, v in d.items()})
|
||||
stride, names = int(d['stride']), d['names']
|
||||
elif dnn: # ONNX OpenCV DNN
|
||||
LOGGER.info(f'Loading {w} for ONNX OpenCV DNN inference...')
|
||||
check_requirements('opencv-python>=4.5.4')
|
||||
net = cv2.dnn.readNetFromONNX(w)
|
||||
elif onnx: # ONNX Runtime
|
||||
LOGGER.info(f'Loading {w} for ONNX Runtime inference...')
|
||||
check_requirements(('onnx', 'onnxruntime-gpu' if cuda else 'onnxruntime'))
|
||||
import onnxruntime
|
||||
providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider']
|
||||
session = onnxruntime.InferenceSession(w, providers=providers)
|
||||
output_names = [x.name for x in session.get_outputs()]
|
||||
meta = session.get_modelmeta().custom_metadata_map # metadata
|
||||
if 'stride' in meta:
|
||||
stride, names = int(meta['stride']), eval(meta['names'])
|
||||
elif xml: # OpenVINO
|
||||
LOGGER.info(f'Loading {w} for OpenVINO inference...')
|
||||
check_requirements('openvino') # requires openvino-dev: https://pypi.org/project/openvino-dev/
|
||||
from openvino.runtime import Core, Layout, get_batch
|
||||
ie = Core()
|
||||
if not Path(w).is_file(): # if not *.xml
|
||||
w = next(Path(w).glob('*.xml')) # get *.xml file from *_openvino_model dir
|
||||
network = ie.read_model(model=w, weights=Path(w).with_suffix('.bin'))
|
||||
if network.get_parameters()[0].get_layout().empty:
|
||||
network.get_parameters()[0].set_layout(Layout('NCHW'))
|
||||
batch_dim = get_batch(network)
|
||||
if batch_dim.is_static:
|
||||
batch_size = batch_dim.get_length()
|
||||
executable_network = ie.compile_model(network, device_name='CPU') # device_name="MYRIAD" for Intel NCS2
|
||||
stride, names = self._load_metadata(Path(w).with_suffix('.yaml')) # load metadata
|
||||
elif engine: # TensorRT
|
||||
LOGGER.info(f'Loading {w} for TensorRT inference...')
|
||||
import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download
|
||||
check_version(trt.__version__, '7.0.0', hard=True) # require tensorrt>=7.0.0
|
||||
if device.type == 'cpu':
|
||||
device = torch.device('cuda:0')
|
||||
Binding = namedtuple('Binding', ('name', 'dtype', 'shape', 'data', 'ptr'))
|
||||
logger = trt.Logger(trt.Logger.INFO)
|
||||
with open(w, 'rb') as f, trt.Runtime(logger) as runtime:
|
||||
model = runtime.deserialize_cuda_engine(f.read())
|
||||
context = model.create_execution_context()
|
||||
bindings = OrderedDict()
|
||||
output_names = []
|
||||
fp16 = False # default updated below
|
||||
dynamic = False
|
||||
for i in range(model.num_bindings):
|
||||
name = model.get_binding_name(i)
|
||||
dtype = trt.nptype(model.get_binding_dtype(i))
|
||||
if model.binding_is_input(i):
|
||||
if -1 in tuple(model.get_binding_shape(i)): # dynamic
|
||||
dynamic = True
|
||||
context.set_binding_shape(i, tuple(model.get_profile_shape(0, i)[2]))
|
||||
if dtype == np.float16:
|
||||
fp16 = True
|
||||
else: # output
|
||||
output_names.append(name)
|
||||
shape = tuple(context.get_binding_shape(i))
|
||||
im = torch.from_numpy(np.empty(shape, dtype=dtype)).to(device)
|
||||
bindings[name] = Binding(name, dtype, shape, im, int(im.data_ptr()))
|
||||
binding_addrs = OrderedDict((n, d.ptr) for n, d in bindings.items())
|
||||
batch_size = bindings['images'].shape[0] # if dynamic, this is instead max batch size
|
||||
elif coreml: # CoreML
|
||||
LOGGER.info(f'Loading {w} for CoreML inference...')
|
||||
import coremltools as ct
|
||||
model = ct.models.MLModel(w)
|
||||
elif saved_model: # TF SavedModel
|
||||
LOGGER.info(f'Loading {w} for TensorFlow SavedModel inference...')
|
||||
import tensorflow as tf
|
||||
keras = False # assume TF1 saved_model
|
||||
model = tf.keras.models.load_model(w) if keras else tf.saved_model.load(w)
|
||||
elif pb: # GraphDef https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt
|
||||
LOGGER.info(f'Loading {w} for TensorFlow GraphDef inference...')
|
||||
import tensorflow as tf
|
||||
|
||||
def wrap_frozen_graph(gd, inputs, outputs):
|
||||
x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=''), []) # wrapped
|
||||
ge = x.graph.as_graph_element
|
||||
return x.prune(tf.nest.map_structure(ge, inputs), tf.nest.map_structure(ge, outputs))
|
||||
|
||||
def gd_outputs(gd):
|
||||
name_list, input_list = [], []
|
||||
for node in gd.node: # tensorflow.core.framework.node_def_pb2.NodeDef
|
||||
name_list.append(node.name)
|
||||
input_list.extend(node.input)
|
||||
return sorted(f'{x}:0' for x in list(set(name_list) - set(input_list)) if not x.startswith('NoOp'))
|
||||
|
||||
gd = tf.Graph().as_graph_def() # TF GraphDef
|
||||
with open(w, 'rb') as f:
|
||||
gd.ParseFromString(f.read())
|
||||
frozen_func = wrap_frozen_graph(gd, inputs='x:0', outputs=gd_outputs(gd))
|
||||
elif tflite or edgetpu: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python
|
||||
try: # https://coral.ai/docs/edgetpu/tflite-python/#update-existing-tf-lite-code-for-the-edge-tpu
|
||||
from tflite_runtime.interpreter import Interpreter, load_delegate
|
||||
except ImportError:
|
||||
import tensorflow as tf
|
||||
Interpreter, load_delegate = tf.lite.Interpreter, tf.lite.experimental.load_delegate,
|
||||
if edgetpu: # TF Edge TPU https://coral.ai/software/#edgetpu-runtime
|
||||
LOGGER.info(f'Loading {w} for TensorFlow Lite Edge TPU inference...')
|
||||
delegate = {
|
||||
'Linux': 'libedgetpu.so.1',
|
||||
'Darwin': 'libedgetpu.1.dylib',
|
||||
'Windows': 'edgetpu.dll'}[platform.system()]
|
||||
interpreter = Interpreter(model_path=w, experimental_delegates=[load_delegate(delegate)])
|
||||
else: # TFLite
|
||||
LOGGER.info(f'Loading {w} for TensorFlow Lite inference...')
|
||||
interpreter = Interpreter(model_path=w) # load TFLite model
|
||||
interpreter.allocate_tensors() # allocate
|
||||
input_details = interpreter.get_input_details() # inputs
|
||||
output_details = interpreter.get_output_details() # outputs
|
||||
# load metadata
|
||||
with contextlib.suppress(zipfile.BadZipFile):
|
||||
with zipfile.ZipFile(w, 'r') as model:
|
||||
meta_file = model.namelist()[0]
|
||||
meta = ast.literal_eval(model.read(meta_file).decode('utf-8'))
|
||||
stride, names = int(meta['stride']), meta['names']
|
||||
elif tfjs: # TF.js
|
||||
raise NotImplementedError('ERROR: YOLOv5 TF.js inference is not supported')
|
||||
elif paddle: # PaddlePaddle
|
||||
LOGGER.info(f'Loading {w} for PaddlePaddle inference...')
|
||||
check_requirements('paddlepaddle-gpu' if cuda else 'paddlepaddle')
|
||||
import paddle.inference as pdi
|
||||
if not Path(w).is_file(): # if not *.pdmodel
|
||||
w = next(Path(w).rglob('*.pdmodel')) # get *.pdmodel file from *_paddle_model dir
|
||||
weights = Path(w).with_suffix('.pdiparams')
|
||||
config = pdi.Config(str(w), str(weights))
|
||||
if cuda:
|
||||
config.enable_use_gpu(memory_pool_init_size_mb=2048, device_id=0)
|
||||
predictor = pdi.create_predictor(config)
|
||||
input_handle = predictor.get_input_handle(predictor.get_input_names()[0])
|
||||
output_names = predictor.get_output_names()
|
||||
elif triton: # NVIDIA Triton Inference Server
|
||||
LOGGER.info(f'Using {w} as Triton Inference Server...')
|
||||
check_requirements('tritonclient[all]')
|
||||
from utils.triton import TritonRemoteModel
|
||||
model = TritonRemoteModel(url=w)
|
||||
nhwc = model.runtime.startswith('tensorflow')
|
||||
else:
|
||||
raise NotImplementedError(f'ERROR: {w} is not a supported format')
|
||||
|
||||
# class names
|
||||
if 'names' not in locals():
|
||||
names = yaml_load(data)['names'] if data else {i: f'class{i}' for i in range(999)}
|
||||
if names[0] == 'n01440764' and len(names) == 1000: # ImageNet
|
||||
names = yaml_load(ROOT / 'data/ImageNet.yaml')['names'] # human-readable names
|
||||
|
||||
self.__dict__.update(locals()) # assign all variables to self
|
||||
|
||||
def forward(self, im, augment=False, visualize=False):
|
||||
# YOLOv5 MultiBackend inference
|
||||
b, ch, h, w = im.shape # batch, channel, height, width
|
||||
if self.fp16 and im.dtype != torch.float16:
|
||||
im = im.half() # to FP16
|
||||
if self.nhwc:
|
||||
im = im.permute(0, 2, 3, 1) # torch BCHW to numpy BHWC shape(1,320,192,3)
|
||||
|
||||
if self.pt: # PyTorch
|
||||
y = self.model(im, augment=augment, visualize=visualize) if augment or visualize else self.model(im)
|
||||
elif self.jit: # TorchScript
|
||||
y = self.model(im)
|
||||
elif self.dnn: # ONNX OpenCV DNN
|
||||
im = im.cpu().numpy() # torch to numpy
|
||||
self.net.setInput(im)
|
||||
y = self.net.forward()
|
||||
elif self.onnx: # ONNX Runtime
|
||||
im = im.cpu().numpy() # torch to numpy
|
||||
y = self.session.run(self.output_names, {self.session.get_inputs()[0].name: im})
|
||||
elif self.xml: # OpenVINO
|
||||
im = im.cpu().numpy() # FP32
|
||||
y = list(self.executable_network([im]).values())
|
||||
elif self.engine: # TensorRT
|
||||
if self.dynamic and im.shape != self.bindings['images'].shape:
|
||||
i = self.model.get_binding_index('images')
|
||||
self.context.set_binding_shape(i, im.shape) # reshape if dynamic
|
||||
self.bindings['images'] = self.bindings['images']._replace(shape=im.shape)
|
||||
for name in self.output_names:
|
||||
i = self.model.get_binding_index(name)
|
||||
self.bindings[name].data.resize_(tuple(self.context.get_binding_shape(i)))
|
||||
s = self.bindings['images'].shape
|
||||
assert im.shape == s, f"input size {im.shape} {'>' if self.dynamic else 'not equal to'} max model size {s}"
|
||||
self.binding_addrs['images'] = int(im.data_ptr())
|
||||
self.context.execute_v2(list(self.binding_addrs.values()))
|
||||
y = [self.bindings[x].data for x in sorted(self.output_names)]
|
||||
elif self.coreml: # CoreML
|
||||
im = im.cpu().numpy()
|
||||
im = Image.fromarray((im[0] * 255).astype('uint8'))
|
||||
# im = im.resize((192, 320), Image.ANTIALIAS)
|
||||
y = self.model.predict({'image': im}) # coordinates are xywh normalized
|
||||
if 'confidence' in y:
|
||||
box = xywh2xyxy(y['coordinates'] * [[w, h, w, h]]) # xyxy pixels
|
||||
conf, cls = y['confidence'].max(1), y['confidence'].argmax(1).astype(np.float)
|
||||
y = np.concatenate((box, conf.reshape(-1, 1), cls.reshape(-1, 1)), 1)
|
||||
else:
|
||||
y = list(reversed(y.values())) # reversed for segmentation models (pred, proto)
|
||||
elif self.paddle: # PaddlePaddle
|
||||
im = im.cpu().numpy().astype(np.float32)
|
||||
self.input_handle.copy_from_cpu(im)
|
||||
self.predictor.run()
|
||||
y = [self.predictor.get_output_handle(x).copy_to_cpu() for x in self.output_names]
|
||||
elif self.triton: # NVIDIA Triton Inference Server
|
||||
y = self.model(im)
|
||||
else: # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU)
|
||||
im = im.cpu().numpy()
|
||||
if self.saved_model: # SavedModel
|
||||
y = self.model(im, training=False) if self.keras else self.model(im)
|
||||
elif self.pb: # GraphDef
|
||||
y = self.frozen_func(x=self.tf.constant(im))
|
||||
else: # Lite or Edge TPU
|
||||
input = self.input_details[0]
|
||||
int8 = input['dtype'] == np.uint8 # is TFLite quantized uint8 model
|
||||
if int8:
|
||||
scale, zero_point = input['quantization']
|
||||
im = (im / scale + zero_point).astype(np.uint8) # de-scale
|
||||
self.interpreter.set_tensor(input['index'], im)
|
||||
self.interpreter.invoke()
|
||||
y = []
|
||||
for output in self.output_details:
|
||||
x = self.interpreter.get_tensor(output['index'])
|
||||
if int8:
|
||||
scale, zero_point = output['quantization']
|
||||
x = (x.astype(np.float32) - zero_point) * scale # re-scale
|
||||
y.append(x)
|
||||
y = [x if isinstance(x, np.ndarray) else x.numpy() for x in y]
|
||||
y[0][..., :4] *= [w, h, w, h] # xywh normalized to pixels
|
||||
|
||||
if isinstance(y, (list, tuple)):
|
||||
return self.from_numpy(y[0]) if len(y) == 1 else [self.from_numpy(x) for x in y]
|
||||
else:
|
||||
return self.from_numpy(y)
|
||||
|
||||
def from_numpy(self, x):
|
||||
return torch.from_numpy(x).to(self.device) if isinstance(x, np.ndarray) else x
|
||||
|
||||
def warmup(self, imgsz=(1, 3, 640, 640)):
|
||||
# Warmup model by running inference once
|
||||
warmup_types = self.pt, self.jit, self.onnx, self.engine, self.saved_model, self.pb, self.triton
|
||||
if any(warmup_types) and (self.device.type != 'cpu' or self.triton):
|
||||
im = torch.empty(*imgsz, dtype=torch.half if self.fp16 else torch.float, device=self.device) # input
|
||||
for _ in range(2 if self.jit else 1): #
|
||||
self.forward(im) # warmup
|
||||
|
||||
@staticmethod
|
||||
def _model_type(p='path/to/model.pt'):
|
||||
# Return model type from model path, i.e. path='path/to/model.onnx' -> type=onnx
|
||||
# types = [pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle]
|
||||
from export import export_formats
|
||||
from utils.downloads import is_url
|
||||
sf = list(export_formats().Suffix) # export suffixes
|
||||
if not is_url(p, check=False):
|
||||
check_suffix(p, sf) # checks
|
||||
url = urlparse(p) # if url may be Triton inference server
|
||||
types = [s in Path(p).name for s in sf]
|
||||
types[8] &= not types[9] # tflite &= not edgetpu
|
||||
triton = not any(types) and all([any(s in url.scheme for s in ['http', 'grpc']), url.netloc])
|
||||
return types + [triton]
|
||||
|
||||
@staticmethod
|
||||
def _load_metadata(f=Path('path/to/meta.yaml')):
|
||||
# Load metadata from meta.yaml if it exists
|
||||
if f.exists():
|
||||
d = yaml_load(f)
|
||||
return d['stride'], d['names'] # assign stride, names
|
||||
return None, None
|
||||
|
||||
|
||||
class AutoShape(nn.Module):
|
||||
# YOLOv5 input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS
|
||||
conf = 0.25 # NMS confidence threshold
|
||||
iou = 0.45 # NMS IoU threshold
|
||||
agnostic = False # NMS class-agnostic
|
||||
multi_label = False # NMS multiple labels per box
|
||||
classes = None # (optional list) filter by class, i.e. = [0, 15, 16] for COCO persons, cats and dogs
|
||||
max_det = 1000 # maximum number of detections per image
|
||||
amp = False # Automatic Mixed Precision (AMP) inference
|
||||
|
||||
def __init__(self, model, verbose=True):
|
||||
super().__init__()
|
||||
if verbose:
|
||||
LOGGER.info('Adding AutoShape... ')
|
||||
copy_attr(self, model, include=('yaml', 'nc', 'hyp', 'names', 'stride', 'abc'), exclude=()) # copy attributes
|
||||
self.dmb = isinstance(model, DetectMultiBackend) # DetectMultiBackend() instance
|
||||
self.pt = not self.dmb or model.pt # PyTorch model
|
||||
self.model = model.eval()
|
||||
if self.pt:
|
||||
m = self.model.model.model[-1] if self.dmb else self.model.model[-1] # Detect()
|
||||
m.inplace = False # Detect.inplace=False for safe multithread inference
|
||||
m.export = True # do not output loss values
|
||||
|
||||
def _apply(self, fn):
|
||||
# Apply to(), cpu(), cuda(), half() to model tensors that are not parameters or registered buffers
|
||||
self = super()._apply(fn)
|
||||
if self.pt:
|
||||
m = self.model.model.model[-1] if self.dmb else self.model.model[-1] # Detect()
|
||||
m.stride = fn(m.stride)
|
||||
m.grid = list(map(fn, m.grid))
|
||||
if isinstance(m.anchor_grid, list):
|
||||
m.anchor_grid = list(map(fn, m.anchor_grid))
|
||||
return self
|
||||
|
||||
@smart_inference_mode()
|
||||
def forward(self, ims, size=640, augment=False, profile=False):
|
||||
# Inference from various sources. For size(height=640, width=1280), RGB images example inputs are:
|
||||
# file: ims = 'data/images/zidane.jpg' # str or PosixPath
|
||||
# URI: = 'https://ultralytics.com/images/zidane.jpg'
|
||||
# OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(640,1280,3)
|
||||
# PIL: = Image.open('image.jpg') or ImageGrab.grab() # HWC x(640,1280,3)
|
||||
# numpy: = np.zeros((640,1280,3)) # HWC
|
||||
# torch: = torch.zeros(16,3,320,640) # BCHW (scaled to size=640, 0-1 values)
|
||||
# multiple: = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...] # list of images
|
||||
|
||||
dt = (Profile(), Profile(), Profile())
|
||||
with dt[0]:
|
||||
if isinstance(size, int): # expand
|
||||
size = (size, size)
|
||||
p = next(self.model.parameters()) if self.pt else torch.empty(1, device=self.model.device) # param
|
||||
autocast = self.amp and (p.device.type != 'cpu') # Automatic Mixed Precision (AMP) inference
|
||||
if isinstance(ims, torch.Tensor): # torch
|
||||
with amp.autocast(autocast):
|
||||
return self.model(ims.to(p.device).type_as(p), augment=augment) # inference
|
||||
|
||||
# Pre-process
|
||||
n, ims = (len(ims), list(ims)) if isinstance(ims, (list, tuple)) else (1, [ims]) # number, list of images
|
||||
shape0, shape1, files = [], [], [] # image and inference shapes, filenames
|
||||
for i, im in enumerate(ims):
|
||||
f = f'image{i}' # filename
|
||||
if isinstance(im, (str, Path)): # filename or uri
|
||||
im, f = Image.open(requests.get(im, stream=True).raw if str(im).startswith('http') else im), im
|
||||
im = np.asarray(exif_transpose(im))
|
||||
elif isinstance(im, Image.Image): # PIL Image
|
||||
im, f = np.asarray(exif_transpose(im)), getattr(im, 'filename', f) or f
|
||||
files.append(Path(f).with_suffix('.jpg').name)
|
||||
if im.shape[0] < 5: # image in CHW
|
||||
im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1)
|
||||
im = im[..., :3] if im.ndim == 3 else cv2.cvtColor(im, cv2.COLOR_GRAY2BGR) # enforce 3ch input
|
||||
s = im.shape[:2] # HWC
|
||||
shape0.append(s) # image shape
|
||||
g = max(size) / max(s) # gain
|
||||
shape1.append([int(y * g) for y in s])
|
||||
ims[i] = im if im.data.contiguous else np.ascontiguousarray(im) # update
|
||||
shape1 = [make_divisible(x, self.stride) for x in np.array(shape1).max(0)] # inf shape
|
||||
x = [letterbox(im, shape1, auto=False)[0] for im in ims] # pad
|
||||
x = np.ascontiguousarray(np.array(x).transpose((0, 3, 1, 2))) # stack and BHWC to BCHW
|
||||
x = torch.from_numpy(x).to(p.device).type_as(p) / 255 # uint8 to fp16/32
|
||||
|
||||
with amp.autocast(autocast):
|
||||
# Inference
|
||||
with dt[1]:
|
||||
y = self.model(x, augment=augment) # forward
|
||||
|
||||
# Post-process
|
||||
with dt[2]:
|
||||
y = non_max_suppression(y if self.dmb else y[0],
|
||||
self.conf,
|
||||
self.iou,
|
||||
self.classes,
|
||||
self.agnostic,
|
||||
self.multi_label,
|
||||
max_det=self.max_det) # NMS
|
||||
for i in range(n):
|
||||
scale_boxes(shape1, y[i][:, :4], shape0[i])
|
||||
|
||||
return Detections(ims, y, files, dt, self.names, x.shape)
|
||||
|
||||
|
||||
class Detections:
|
||||
# YOLOv5 detections class for inference results
|
||||
def __init__(self, ims, pred, files, times=(0, 0, 0), names=None, shape=None):
|
||||
super().__init__()
|
||||
d = pred[0].device # device
|
||||
gn = [torch.tensor([*(im.shape[i] for i in [1, 0, 1, 0]), 1, 1], device=d) for im in ims] # normalizations
|
||||
self.ims = ims # list of images as numpy arrays
|
||||
self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls)
|
||||
self.names = names # class names
|
||||
self.files = files # image filenames
|
||||
self.times = times # profiling times
|
||||
self.xyxy = pred # xyxy pixels
|
||||
self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels
|
||||
self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized
|
||||
self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized
|
||||
self.n = len(self.pred) # number of images (batch size)
|
||||
self.t = tuple(x.t / self.n * 1E3 for x in times) # timestamps (ms)
|
||||
self.s = tuple(shape) # inference BCHW shape
|
||||
|
||||
def _run(self, pprint=False, show=False, save=False, crop=False, render=False, labels=True, save_dir=Path('')):
|
||||
s, crops = '', []
|
||||
for i, (im, pred) in enumerate(zip(self.ims, self.pred)):
|
||||
s += f'\nimage {i + 1}/{len(self.pred)}: {im.shape[0]}x{im.shape[1]} ' # string
|
||||
if pred.shape[0]:
|
||||
for c in pred[:, -1].unique():
|
||||
n = (pred[:, -1] == c).sum() # detections per class
|
||||
s += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string
|
||||
s = s.rstrip(', ')
|
||||
if show or save or render or crop:
|
||||
annotator = Annotator(im, example=str(self.names))
|
||||
for *box, conf, cls in reversed(pred): # xyxy, confidence, class
|
||||
label = f'{self.names[int(cls)]} {conf:.2f}'
|
||||
if crop:
|
||||
file = save_dir / 'crops' / self.names[int(cls)] / self.files[i] if save else None
|
||||
crops.append({
|
||||
'box': box,
|
||||
'conf': conf,
|
||||
'cls': cls,
|
||||
'label': label,
|
||||
'im': save_one_box(box, im, file=file, save=save)})
|
||||
else: # all others
|
||||
annotator.box_label(box, label if labels else '', color=colors(cls))
|
||||
im = annotator.im
|
||||
else:
|
||||
s += '(no detections)'
|
||||
|
||||
im = Image.fromarray(im.astype(np.uint8)) if isinstance(im, np.ndarray) else im # from np
|
||||
if show:
|
||||
if is_jupyter():
|
||||
from IPython.display import display
|
||||
display(im)
|
||||
else:
|
||||
im.show(self.files[i])
|
||||
if save:
|
||||
f = self.files[i]
|
||||
im.save(save_dir / f) # save
|
||||
if i == self.n - 1:
|
||||
LOGGER.info(f"Saved {self.n} image{'s' * (self.n > 1)} to {colorstr('bold', save_dir)}")
|
||||
if render:
|
||||
self.ims[i] = np.asarray(im)
|
||||
if pprint:
|
||||
s = s.lstrip('\n')
|
||||
return f'{s}\nSpeed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {self.s}' % self.t
|
||||
if crop:
|
||||
if save:
|
||||
LOGGER.info(f'Saved results to {save_dir}\n')
|
||||
return crops
|
||||
|
||||
@TryExcept('Showing images is not supported in this environment')
|
||||
def show(self, labels=True):
|
||||
self._run(show=True, labels=labels) # show results
|
||||
|
||||
def save(self, labels=True, save_dir='runs/detect/exp', exist_ok=False):
|
||||
save_dir = increment_path(save_dir, exist_ok, mkdir=True) # increment save_dir
|
||||
self._run(save=True, labels=labels, save_dir=save_dir) # save results
|
||||
|
||||
def crop(self, save=True, save_dir='runs/detect/exp', exist_ok=False):
|
||||
save_dir = increment_path(save_dir, exist_ok, mkdir=True) if save else None
|
||||
return self._run(crop=True, save=save, save_dir=save_dir) # crop results
|
||||
|
||||
def render(self, labels=True):
|
||||
self._run(render=True, labels=labels) # render results
|
||||
return self.ims
|
||||
|
||||
def pandas(self):
|
||||
# return detections as pandas DataFrames, i.e. print(results.pandas().xyxy[0])
|
||||
new = copy(self) # return copy
|
||||
ca = 'xmin', 'ymin', 'xmax', 'ymax', 'confidence', 'class', 'name' # xyxy columns
|
||||
cb = 'xcenter', 'ycenter', 'width', 'height', 'confidence', 'class', 'name' # xywh columns
|
||||
for k, c in zip(['xyxy', 'xyxyn', 'xywh', 'xywhn'], [ca, ca, cb, cb]):
|
||||
a = [[x[:5] + [int(x[5]), self.names[int(x[5])]] for x in x.tolist()] for x in getattr(self, k)] # update
|
||||
setattr(new, k, [pd.DataFrame(x, columns=c) for x in a])
|
||||
return new
|
||||
|
||||
def tolist(self):
|
||||
# return a list of Detections objects, i.e. 'for result in results.tolist():'
|
||||
r = range(self.n) # iterable
|
||||
x = [Detections([self.ims[i]], [self.pred[i]], [self.files[i]], self.times, self.names, self.s) for i in r]
|
||||
# for d in x:
|
||||
# for k in ['ims', 'pred', 'xyxy', 'xyxyn', 'xywh', 'xywhn']:
|
||||
# setattr(d, k, getattr(d, k)[0]) # pop out of list
|
||||
return x
|
||||
|
||||
def print(self):
|
||||
LOGGER.info(self.__str__())
|
||||
|
||||
def __len__(self): # override len(results)
|
||||
return self.n
|
||||
|
||||
def __str__(self): # override print(results)
|
||||
return self._run(pprint=True) # print results
|
||||
|
||||
def __repr__(self):
|
||||
return f'YOLOv5 {self.__class__} instance\n' + self.__str__()
|
||||
|
||||
|
||||
class Proto(nn.Module):
|
||||
# YOLOv5 mask Proto module for segmentation models
|
||||
def __init__(self, c1, c_=256, c2=32): # ch_in, number of protos, number of masks
|
||||
super().__init__()
|
||||
self.cv1 = Conv(c1, c_, k=3)
|
||||
self.upsample = nn.Upsample(scale_factor=2, mode='nearest')
|
||||
self.cv2 = Conv(c_, c_, k=3)
|
||||
self.cv3 = Conv(c_, c2)
|
||||
|
||||
def forward(self, x):
|
||||
return self.cv3(self.cv2(self.upsample(self.cv1(x))))
|
||||
|
||||
|
||||
class Classify(nn.Module):
|
||||
# YOLOv5 classification head, i.e. x(b,c1,20,20) to x(b,c2)
|
||||
def __init__(self,
|
||||
c1,
|
||||
c2,
|
||||
k=1,
|
||||
s=1,
|
||||
p=None,
|
||||
g=1,
|
||||
dropout_p=0.0): # ch_in, ch_out, kernel, stride, padding, groups, dropout probability
|
||||
super().__init__()
|
||||
c_ = 1280 # efficientnet_b0 size
|
||||
self.conv = Conv(c1, c_, k, s, autopad(k, p), g)
|
||||
self.pool = nn.AdaptiveAvgPool2d(1) # to x(b,c_,1,1)
|
||||
self.drop = nn.Dropout(p=dropout_p, inplace=True)
|
||||
self.linear = nn.Linear(c_, c2) # to x(b,c2)
|
||||
|
||||
def forward(self, x):
|
||||
if isinstance(x, list):
|
||||
x = torch.cat(x, 1)
|
||||
return self.linear(self.drop(self.pool(self.conv(x)).flatten(1)))
|
111
ayf_ocr/yolo_databases/api/models/experimental.py
Normal file
@ -0,0 +1,111 @@
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
"""
|
||||
Experimental modules
|
||||
"""
|
||||
import math
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
|
||||
from utils.downloads import attempt_download
|
||||
|
||||
|
||||
class Sum(nn.Module):
|
||||
# Weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070
|
||||
def __init__(self, n, weight=False): # n: number of inputs
|
||||
super().__init__()
|
||||
self.weight = weight # apply weights boolean
|
||||
self.iter = range(n - 1) # iter object
|
||||
if weight:
|
||||
self.w = nn.Parameter(-torch.arange(1.0, n) / 2, requires_grad=True) # layer weights
|
||||
|
||||
def forward(self, x):
|
||||
y = x[0] # no weight
|
||||
if self.weight:
|
||||
w = torch.sigmoid(self.w) * 2
|
||||
for i in self.iter:
|
||||
y = y + x[i + 1] * w[i]
|
||||
else:
|
||||
for i in self.iter:
|
||||
y = y + x[i + 1]
|
||||
return y
|
||||
|
||||
|
||||
class MixConv2d(nn.Module):
|
||||
# Mixed Depth-wise Conv https://arxiv.org/abs/1907.09595
|
||||
def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True): # ch_in, ch_out, kernel, stride, ch_strategy
|
||||
super().__init__()
|
||||
n = len(k) # number of convolutions
|
||||
if equal_ch: # equal c_ per group
|
||||
i = torch.linspace(0, n - 1E-6, c2).floor() # c2 indices
|
||||
c_ = [(i == g).sum() for g in range(n)] # intermediate channels
|
||||
else: # equal weight.numel() per group
|
||||
b = [c2] + [0] * n
|
||||
a = np.eye(n + 1, n, k=-1)
|
||||
a -= np.roll(a, 1, axis=1)
|
||||
a *= np.array(k) ** 2
|
||||
a[0] = 1
|
||||
c_ = np.linalg.lstsq(a, b, rcond=None)[0].round() # solve for equal weight indices, ax = b
|
||||
|
||||
self.m = nn.ModuleList([
|
||||
nn.Conv2d(c1, int(c_), k, s, k // 2, groups=math.gcd(c1, int(c_)), bias=False) for k, c_ in zip(k, c_)])
|
||||
self.bn = nn.BatchNorm2d(c2)
|
||||
self.act = nn.SiLU()
|
||||
|
||||
def forward(self, x):
|
||||
return self.act(self.bn(torch.cat([m(x) for m in self.m], 1)))
|
||||
|
||||
|
||||
class Ensemble(nn.ModuleList):
|
||||
# Ensemble of models
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
def forward(self, x, augment=False, profile=False, visualize=False):
|
||||
y = [module(x, augment, profile, visualize)[0] for module in self]
|
||||
# y = torch.stack(y).max(0)[0] # max ensemble
|
||||
# y = torch.stack(y).mean(0) # mean ensemble
|
||||
y = torch.cat(y, 1) # nms ensemble
|
||||
return y, None # inference, train output
|
||||
|
||||
|
||||
def attempt_load(weights, device=None, inplace=True, fuse=True):
|
||||
# Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a
|
||||
from models.yolo import Detect, Model
|
||||
|
||||
model = Ensemble()
|
||||
for w in weights if isinstance(weights, list) else [weights]:
|
||||
ckpt = torch.load(attempt_download(w), map_location='cpu') # load
|
||||
ckpt = (ckpt.get('ema') or ckpt['model']).to(device).float() # FP32 model
|
||||
|
||||
# Model compatibility updates
|
||||
if not hasattr(ckpt, 'stride'):
|
||||
ckpt.stride = torch.tensor([32.])
|
||||
if hasattr(ckpt, 'names') and isinstance(ckpt.names, (list, tuple)):
|
||||
ckpt.names = dict(enumerate(ckpt.names)) # convert to dict
|
||||
|
||||
model.append(ckpt.fuse().eval() if fuse and hasattr(ckpt, 'fuse') else ckpt.eval()) # model in eval mode
|
||||
|
||||
# Module compatibility updates
|
||||
for m in model.modules():
|
||||
t = type(m)
|
||||
if t in (nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Model):
|
||||
m.inplace = inplace # torch 1.7.0 compatibility
|
||||
if t is Detect and not isinstance(m.anchor_grid, list):
|
||||
delattr(m, 'anchor_grid')
|
||||
setattr(m, 'anchor_grid', [torch.zeros(1)] * m.nl)
|
||||
elif t is nn.Upsample and not hasattr(m, 'recompute_scale_factor'):
|
||||
m.recompute_scale_factor = None # torch 1.11.0 compatibility
|
||||
|
||||
# Return model
|
||||
if len(model) == 1:
|
||||
return model[-1]
|
||||
|
||||
# Return detection ensemble
|
||||
print(f'Ensemble created with {weights}\n')
|
||||
for k in 'names', 'nc', 'yaml':
|
||||
setattr(model, k, getattr(model[0], k))
|
||||
model.stride = model[torch.argmax(torch.tensor([m.stride.max() for m in model])).int()].stride # max stride
|
||||
assert all(model[0].nc == m.nc for m in model), f'Models have different class counts: {[m.nc for m in model]}'
|
||||
return model
|
59
ayf_ocr/yolo_databases/api/models/hub/anchors.yaml
Normal file
@ -0,0 +1,59 @@
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
# Default anchors for COCO data
|
||||
|
||||
|
||||
# P5 -------------------------------------------------------------------------------------------------------------------
|
||||
# P5-640:
|
||||
anchors_p5_640:
|
||||
- [10,13, 16,30, 33,23] # P3/8
|
||||
- [30,61, 62,45, 59,119] # P4/16
|
||||
- [116,90, 156,198, 373,326] # P5/32
|
||||
|
||||
|
||||
# P6 -------------------------------------------------------------------------------------------------------------------
|
||||
# P6-640: thr=0.25: 0.9964 BPR, 5.54 anchors past thr, n=12, img_size=640, metric_all=0.281/0.716-mean/best, past_thr=0.469-mean: 9,11, 21,19, 17,41, 43,32, 39,70, 86,64, 65,131, 134,130, 120,265, 282,180, 247,354, 512,387
|
||||
anchors_p6_640:
|
||||
- [9,11, 21,19, 17,41] # P3/8
|
||||
- [43,32, 39,70, 86,64] # P4/16
|
||||
- [65,131, 134,130, 120,265] # P5/32
|
||||
- [282,180, 247,354, 512,387] # P6/64
|
||||
|
||||
# P6-1280: thr=0.25: 0.9950 BPR, 5.55 anchors past thr, n=12, img_size=1280, metric_all=0.281/0.714-mean/best, past_thr=0.468-mean: 19,27, 44,40, 38,94, 96,68, 86,152, 180,137, 140,301, 303,264, 238,542, 436,615, 739,380, 925,792
|
||||
anchors_p6_1280:
|
||||
- [19,27, 44,40, 38,94] # P3/8
|
||||
- [96,68, 86,152, 180,137] # P4/16
|
||||
- [140,301, 303,264, 238,542] # P5/32
|
||||
- [436,615, 739,380, 925,792] # P6/64
|
||||
|
||||
# P6-1920: thr=0.25: 0.9950 BPR, 5.55 anchors past thr, n=12, img_size=1920, metric_all=0.281/0.714-mean/best, past_thr=0.468-mean: 28,41, 67,59, 57,141, 144,103, 129,227, 270,205, 209,452, 455,396, 358,812, 653,922, 1109,570, 1387,1187
|
||||
anchors_p6_1920:
|
||||
- [28,41, 67,59, 57,141] # P3/8
|
||||
- [144,103, 129,227, 270,205] # P4/16
|
||||
- [209,452, 455,396, 358,812] # P5/32
|
||||
- [653,922, 1109,570, 1387,1187] # P6/64
|
||||
|
||||
|
||||
# P7 -------------------------------------------------------------------------------------------------------------------
|
||||
# P7-640: thr=0.25: 0.9962 BPR, 6.76 anchors past thr, n=15, img_size=640, metric_all=0.275/0.733-mean/best, past_thr=0.466-mean: 11,11, 13,30, 29,20, 30,46, 61,38, 39,92, 78,80, 146,66, 79,163, 149,150, 321,143, 157,303, 257,402, 359,290, 524,372
|
||||
anchors_p7_640:
|
||||
- [11,11, 13,30, 29,20] # P3/8
|
||||
- [30,46, 61,38, 39,92] # P4/16
|
||||
- [78,80, 146,66, 79,163] # P5/32
|
||||
- [149,150, 321,143, 157,303] # P6/64
|
||||
- [257,402, 359,290, 524,372] # P7/128
|
||||
|
||||
# P7-1280: thr=0.25: 0.9968 BPR, 6.71 anchors past thr, n=15, img_size=1280, metric_all=0.273/0.732-mean/best, past_thr=0.463-mean: 19,22, 54,36, 32,77, 70,83, 138,71, 75,173, 165,159, 148,334, 375,151, 334,317, 251,626, 499,474, 750,326, 534,814, 1079,818
|
||||
anchors_p7_1280:
|
||||
- [19,22, 54,36, 32,77] # P3/8
|
||||
- [70,83, 138,71, 75,173] # P4/16
|
||||
- [165,159, 148,334, 375,151] # P5/32
|
||||
- [334,317, 251,626, 499,474] # P6/64
|
||||
- [750,326, 534,814, 1079,818] # P7/128
|
||||
|
||||
# P7-1920: thr=0.25: 0.9968 BPR, 6.71 anchors past thr, n=15, img_size=1920, metric_all=0.273/0.732-mean/best, past_thr=0.463-mean: 29,34, 81,55, 47,115, 105,124, 207,107, 113,259, 247,238, 222,500, 563,227, 501,476, 376,939, 749,711, 1126,489, 801,1222, 1618,1227
|
||||
anchors_p7_1920:
|
||||
- [29,34, 81,55, 47,115] # P3/8
|
||||
- [105,124, 207,107, 113,259] # P4/16
|
||||
- [247,238, 222,500, 563,227] # P5/32
|
||||
- [501,476, 376,939, 749,711] # P6/64
|
||||
- [1126,489, 801,1222, 1618,1227] # P7/128
|
51
ayf_ocr/yolo_databases/api/models/hub/yolov3-spp.yaml
Normal file
@ -0,0 +1,51 @@
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
|
||||
# Parameters
|
||||
nc: 80 # number of classes
|
||||
depth_multiple: 1.0 # model depth multiple
|
||||
width_multiple: 1.0 # layer channel multiple
|
||||
anchors:
|
||||
- [10,13, 16,30, 33,23] # P3/8
|
||||
- [30,61, 62,45, 59,119] # P4/16
|
||||
- [116,90, 156,198, 373,326] # P5/32
|
||||
|
||||
# darknet53 backbone
|
||||
backbone:
|
||||
# [from, number, module, args]
|
||||
[[-1, 1, Conv, [32, 3, 1]], # 0
|
||||
[-1, 1, Conv, [64, 3, 2]], # 1-P1/2
|
||||
[-1, 1, Bottleneck, [64]],
|
||||
[-1, 1, Conv, [128, 3, 2]], # 3-P2/4
|
||||
[-1, 2, Bottleneck, [128]],
|
||||
[-1, 1, Conv, [256, 3, 2]], # 5-P3/8
|
||||
[-1, 8, Bottleneck, [256]],
|
||||
[-1, 1, Conv, [512, 3, 2]], # 7-P4/16
|
||||
[-1, 8, Bottleneck, [512]],
|
||||
[-1, 1, Conv, [1024, 3, 2]], # 9-P5/32
|
||||
[-1, 4, Bottleneck, [1024]], # 10
|
||||
]
|
||||
|
||||
# YOLOv3-SPP head
|
||||
head:
|
||||
[[-1, 1, Bottleneck, [1024, False]],
|
||||
[-1, 1, SPP, [512, [5, 9, 13]]],
|
||||
[-1, 1, Conv, [1024, 3, 1]],
|
||||
[-1, 1, Conv, [512, 1, 1]],
|
||||
[-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large)
|
||||
|
||||
[-2, 1, Conv, [256, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 8], 1, Concat, [1]], # cat backbone P4
|
||||
[-1, 1, Bottleneck, [512, False]],
|
||||
[-1, 1, Bottleneck, [512, False]],
|
||||
[-1, 1, Conv, [256, 1, 1]],
|
||||
[-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium)
|
||||
|
||||
[-2, 1, Conv, [128, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 6], 1, Concat, [1]], # cat backbone P3
|
||||
[-1, 1, Bottleneck, [256, False]],
|
||||
[-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small)
|
||||
|
||||
[[27, 22, 15], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
|
||||
]
|
41
ayf_ocr/yolo_databases/api/models/hub/yolov3-tiny.yaml
Normal file
@ -0,0 +1,41 @@
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
|
||||
# Parameters
|
||||
nc: 80 # number of classes
|
||||
depth_multiple: 1.0 # model depth multiple
|
||||
width_multiple: 1.0 # layer channel multiple
|
||||
anchors:
|
||||
- [10,14, 23,27, 37,58] # P4/16
|
||||
- [81,82, 135,169, 344,319] # P5/32
|
||||
|
||||
# YOLOv3-tiny backbone
|
||||
backbone:
|
||||
# [from, number, module, args]
|
||||
[[-1, 1, Conv, [16, 3, 1]], # 0
|
||||
[-1, 1, nn.MaxPool2d, [2, 2, 0]], # 1-P1/2
|
||||
[-1, 1, Conv, [32, 3, 1]],
|
||||
[-1, 1, nn.MaxPool2d, [2, 2, 0]], # 3-P2/4
|
||||
[-1, 1, Conv, [64, 3, 1]],
|
||||
[-1, 1, nn.MaxPool2d, [2, 2, 0]], # 5-P3/8
|
||||
[-1, 1, Conv, [128, 3, 1]],
|
||||
[-1, 1, nn.MaxPool2d, [2, 2, 0]], # 7-P4/16
|
||||
[-1, 1, Conv, [256, 3, 1]],
|
||||
[-1, 1, nn.MaxPool2d, [2, 2, 0]], # 9-P5/32
|
||||
[-1, 1, Conv, [512, 3, 1]],
|
||||
[-1, 1, nn.ZeroPad2d, [[0, 1, 0, 1]]], # 11
|
||||
[-1, 1, nn.MaxPool2d, [2, 1, 0]], # 12
|
||||
]
|
||||
|
||||
# YOLOv3-tiny head
|
||||
head:
|
||||
[[-1, 1, Conv, [1024, 3, 1]],
|
||||
[-1, 1, Conv, [256, 1, 1]],
|
||||
[-1, 1, Conv, [512, 3, 1]], # 15 (P5/32-large)
|
||||
|
||||
[-2, 1, Conv, [128, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 8], 1, Concat, [1]], # cat backbone P4
|
||||
[-1, 1, Conv, [256, 3, 1]], # 19 (P4/16-medium)
|
||||
|
||||
[[19, 15], 1, Detect, [nc, anchors]], # Detect(P4, P5)
|
||||
]
|
51
ayf_ocr/yolo_databases/api/models/hub/yolov3.yaml
Normal file
@ -0,0 +1,51 @@
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
|
||||
# Parameters
|
||||
nc: 80 # number of classes
|
||||
depth_multiple: 1.0 # model depth multiple
|
||||
width_multiple: 1.0 # layer channel multiple
|
||||
anchors:
|
||||
- [10,13, 16,30, 33,23] # P3/8
|
||||
- [30,61, 62,45, 59,119] # P4/16
|
||||
- [116,90, 156,198, 373,326] # P5/32
|
||||
|
||||
# darknet53 backbone
|
||||
backbone:
|
||||
# [from, number, module, args]
|
||||
[[-1, 1, Conv, [32, 3, 1]], # 0
|
||||
[-1, 1, Conv, [64, 3, 2]], # 1-P1/2
|
||||
[-1, 1, Bottleneck, [64]],
|
||||
[-1, 1, Conv, [128, 3, 2]], # 3-P2/4
|
||||
[-1, 2, Bottleneck, [128]],
|
||||
[-1, 1, Conv, [256, 3, 2]], # 5-P3/8
|
||||
[-1, 8, Bottleneck, [256]],
|
||||
[-1, 1, Conv, [512, 3, 2]], # 7-P4/16
|
||||
[-1, 8, Bottleneck, [512]],
|
||||
[-1, 1, Conv, [1024, 3, 2]], # 9-P5/32
|
||||
[-1, 4, Bottleneck, [1024]], # 10
|
||||
]
|
||||
|
||||
# YOLOv3 head
|
||||
head:
|
||||
[[-1, 1, Bottleneck, [1024, False]],
|
||||
[-1, 1, Conv, [512, 1, 1]],
|
||||
[-1, 1, Conv, [1024, 3, 1]],
|
||||
[-1, 1, Conv, [512, 1, 1]],
|
||||
[-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large)
|
||||
|
||||
[-2, 1, Conv, [256, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 8], 1, Concat, [1]], # cat backbone P4
|
||||
[-1, 1, Bottleneck, [512, False]],
|
||||
[-1, 1, Bottleneck, [512, False]],
|
||||
[-1, 1, Conv, [256, 1, 1]],
|
||||
[-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium)
|
||||
|
||||
[-2, 1, Conv, [128, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 6], 1, Concat, [1]], # cat backbone P3
|
||||
[-1, 1, Bottleneck, [256, False]],
|
||||
[-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small)
|
||||
|
||||
[[27, 22, 15], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
|
||||
]
|
48
ayf_ocr/yolo_databases/api/models/hub/yolov5-bifpn.yaml
Normal file
@ -0,0 +1,48 @@
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
|
||||
# Parameters
|
||||
nc: 80 # number of classes
|
||||
depth_multiple: 1.0 # model depth multiple
|
||||
width_multiple: 1.0 # layer channel multiple
|
||||
anchors:
|
||||
- [10,13, 16,30, 33,23] # P3/8
|
||||
- [30,61, 62,45, 59,119] # P4/16
|
||||
- [116,90, 156,198, 373,326] # P5/32
|
||||
|
||||
# YOLOv5 v6.0 backbone
|
||||
backbone:
|
||||
# [from, number, module, args]
|
||||
[[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
|
||||
[-1, 1, Conv, [128, 3, 2]], # 1-P2/4
|
||||
[-1, 3, C3, [128]],
|
||||
[-1, 1, Conv, [256, 3, 2]], # 3-P3/8
|
||||
[-1, 6, C3, [256]],
|
||||
[-1, 1, Conv, [512, 3, 2]], # 5-P4/16
|
||||
[-1, 9, C3, [512]],
|
||||
[-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
|
||||
[-1, 3, C3, [1024]],
|
||||
[-1, 1, SPPF, [1024, 5]], # 9
|
||||
]
|
||||
|
||||
# YOLOv5 v6.0 BiFPN head
|
||||
head:
|
||||
[[-1, 1, Conv, [512, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 6], 1, Concat, [1]], # cat backbone P4
|
||||
[-1, 3, C3, [512, False]], # 13
|
||||
|
||||
[-1, 1, Conv, [256, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 4], 1, Concat, [1]], # cat backbone P3
|
||||
[-1, 3, C3, [256, False]], # 17 (P3/8-small)
|
||||
|
||||
[-1, 1, Conv, [256, 3, 2]],
|
||||
[[-1, 14, 6], 1, Concat, [1]], # cat P4 <--- BiFPN change
|
||||
[-1, 3, C3, [512, False]], # 20 (P4/16-medium)
|
||||
|
||||
[-1, 1, Conv, [512, 3, 2]],
|
||||
[[-1, 10], 1, Concat, [1]], # cat head P5
|
||||
[-1, 3, C3, [1024, False]], # 23 (P5/32-large)
|
||||
|
||||
[[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
|
||||
]
|
42
ayf_ocr/yolo_databases/api/models/hub/yolov5-fpn.yaml
Normal file
@ -0,0 +1,42 @@
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
|
||||
# Parameters
|
||||
nc: 80 # number of classes
|
||||
depth_multiple: 1.0 # model depth multiple
|
||||
width_multiple: 1.0 # layer channel multiple
|
||||
anchors:
|
||||
- [10,13, 16,30, 33,23] # P3/8
|
||||
- [30,61, 62,45, 59,119] # P4/16
|
||||
- [116,90, 156,198, 373,326] # P5/32
|
||||
|
||||
# YOLOv5 v6.0 backbone
|
||||
backbone:
|
||||
# [from, number, module, args]
|
||||
[[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
|
||||
[-1, 1, Conv, [128, 3, 2]], # 1-P2/4
|
||||
[-1, 3, C3, [128]],
|
||||
[-1, 1, Conv, [256, 3, 2]], # 3-P3/8
|
||||
[-1, 6, C3, [256]],
|
||||
[-1, 1, Conv, [512, 3, 2]], # 5-P4/16
|
||||
[-1, 9, C3, [512]],
|
||||
[-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
|
||||
[-1, 3, C3, [1024]],
|
||||
[-1, 1, SPPF, [1024, 5]], # 9
|
||||
]
|
||||
|
||||
# YOLOv5 v6.0 FPN head
|
||||
head:
|
||||
[[-1, 3, C3, [1024, False]], # 10 (P5/32-large)
|
||||
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 6], 1, Concat, [1]], # cat backbone P4
|
||||
[-1, 1, Conv, [512, 1, 1]],
|
||||
[-1, 3, C3, [512, False]], # 14 (P4/16-medium)
|
||||
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 4], 1, Concat, [1]], # cat backbone P3
|
||||
[-1, 1, Conv, [256, 1, 1]],
|
||||
[-1, 3, C3, [256, False]], # 18 (P3/8-small)
|
||||
|
||||
[[18, 14, 10], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
|
||||
]
|
54
ayf_ocr/yolo_databases/api/models/hub/yolov5-p2.yaml
Normal file
@ -0,0 +1,54 @@
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
|
||||
# Parameters
|
||||
nc: 80 # number of classes
|
||||
depth_multiple: 1.0 # model depth multiple
|
||||
width_multiple: 1.0 # layer channel multiple
|
||||
anchors: 3 # AutoAnchor evolves 3 anchors per P output layer
|
||||
|
||||
# YOLOv5 v6.0 backbone
|
||||
backbone:
|
||||
# [from, number, module, args]
|
||||
[[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
|
||||
[-1, 1, Conv, [128, 3, 2]], # 1-P2/4
|
||||
[-1, 3, C3, [128]],
|
||||
[-1, 1, Conv, [256, 3, 2]], # 3-P3/8
|
||||
[-1, 6, C3, [256]],
|
||||
[-1, 1, Conv, [512, 3, 2]], # 5-P4/16
|
||||
[-1, 9, C3, [512]],
|
||||
[-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
|
||||
[-1, 3, C3, [1024]],
|
||||
[-1, 1, SPPF, [1024, 5]], # 9
|
||||
]
|
||||
|
||||
# YOLOv5 v6.0 head with (P2, P3, P4, P5) outputs
|
||||
head:
|
||||
[[-1, 1, Conv, [512, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 6], 1, Concat, [1]], # cat backbone P4
|
||||
[-1, 3, C3, [512, False]], # 13
|
||||
|
||||
[-1, 1, Conv, [256, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 4], 1, Concat, [1]], # cat backbone P3
|
||||
[-1, 3, C3, [256, False]], # 17 (P3/8-small)
|
||||
|
||||
[-1, 1, Conv, [128, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 2], 1, Concat, [1]], # cat backbone P2
|
||||
[-1, 1, C3, [128, False]], # 21 (P2/4-xsmall)
|
||||
|
||||
[-1, 1, Conv, [128, 3, 2]],
|
||||
[[-1, 18], 1, Concat, [1]], # cat head P3
|
||||
[-1, 3, C3, [256, False]], # 24 (P3/8-small)
|
||||
|
||||
[-1, 1, Conv, [256, 3, 2]],
|
||||
[[-1, 14], 1, Concat, [1]], # cat head P4
|
||||
[-1, 3, C3, [512, False]], # 27 (P4/16-medium)
|
||||
|
||||
[-1, 1, Conv, [512, 3, 2]],
|
||||
[[-1, 10], 1, Concat, [1]], # cat head P5
|
||||
[-1, 3, C3, [1024, False]], # 30 (P5/32-large)
|
||||
|
||||
[[21, 24, 27, 30], 1, Detect, [nc, anchors]], # Detect(P2, P3, P4, P5)
|
||||
]
|
41
ayf_ocr/yolo_databases/api/models/hub/yolov5-p34.yaml
Normal file
@ -0,0 +1,41 @@
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
|
||||
# Parameters
|
||||
nc: 80 # number of classes
|
||||
depth_multiple: 0.33 # model depth multiple
|
||||
width_multiple: 0.50 # layer channel multiple
|
||||
anchors: 3 # AutoAnchor evolves 3 anchors per P output layer
|
||||
|
||||
# YOLOv5 v6.0 backbone
|
||||
backbone:
|
||||
# [from, number, module, args]
|
||||
[ [ -1, 1, Conv, [ 64, 6, 2, 2 ] ], # 0-P1/2
|
||||
[ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4
|
||||
[ -1, 3, C3, [ 128 ] ],
|
||||
[ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8
|
||||
[ -1, 6, C3, [ 256 ] ],
|
||||
[ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16
|
||||
[ -1, 9, C3, [ 512 ] ],
|
||||
[ -1, 1, Conv, [ 1024, 3, 2 ] ], # 7-P5/32
|
||||
[ -1, 3, C3, [ 1024 ] ],
|
||||
[ -1, 1, SPPF, [ 1024, 5 ] ], # 9
|
||||
]
|
||||
|
||||
# YOLOv5 v6.0 head with (P3, P4) outputs
|
||||
head:
|
||||
[ [ -1, 1, Conv, [ 512, 1, 1 ] ],
|
||||
[ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
|
||||
[ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4
|
||||
[ -1, 3, C3, [ 512, False ] ], # 13
|
||||
|
||||
[ -1, 1, Conv, [ 256, 1, 1 ] ],
|
||||
[ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
|
||||
[ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3
|
||||
[ -1, 3, C3, [ 256, False ] ], # 17 (P3/8-small)
|
||||
|
||||
[ -1, 1, Conv, [ 256, 3, 2 ] ],
|
||||
[ [ -1, 14 ], 1, Concat, [ 1 ] ], # cat head P4
|
||||
[ -1, 3, C3, [ 512, False ] ], # 20 (P4/16-medium)
|
||||
|
||||
[ [ 17, 20 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4)
|
||||
]
|
56
ayf_ocr/yolo_databases/api/models/hub/yolov5-p6.yaml
Normal file
@ -0,0 +1,56 @@
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
|
||||
# Parameters
|
||||
nc: 80 # number of classes
|
||||
depth_multiple: 1.0 # model depth multiple
|
||||
width_multiple: 1.0 # layer channel multiple
|
||||
anchors: 3 # AutoAnchor evolves 3 anchors per P output layer
|
||||
|
||||
# YOLOv5 v6.0 backbone
|
||||
backbone:
|
||||
# [from, number, module, args]
|
||||
[[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
|
||||
[-1, 1, Conv, [128, 3, 2]], # 1-P2/4
|
||||
[-1, 3, C3, [128]],
|
||||
[-1, 1, Conv, [256, 3, 2]], # 3-P3/8
|
||||
[-1, 6, C3, [256]],
|
||||
[-1, 1, Conv, [512, 3, 2]], # 5-P4/16
|
||||
[-1, 9, C3, [512]],
|
||||
[-1, 1, Conv, [768, 3, 2]], # 7-P5/32
|
||||
[-1, 3, C3, [768]],
|
||||
[-1, 1, Conv, [1024, 3, 2]], # 9-P6/64
|
||||
[-1, 3, C3, [1024]],
|
||||
[-1, 1, SPPF, [1024, 5]], # 11
|
||||
]
|
||||
|
||||
# YOLOv5 v6.0 head with (P3, P4, P5, P6) outputs
|
||||
head:
|
||||
[[-1, 1, Conv, [768, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 8], 1, Concat, [1]], # cat backbone P5
|
||||
[-1, 3, C3, [768, False]], # 15
|
||||
|
||||
[-1, 1, Conv, [512, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 6], 1, Concat, [1]], # cat backbone P4
|
||||
[-1, 3, C3, [512, False]], # 19
|
||||
|
||||
[-1, 1, Conv, [256, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 4], 1, Concat, [1]], # cat backbone P3
|
||||
[-1, 3, C3, [256, False]], # 23 (P3/8-small)
|
||||
|
||||
[-1, 1, Conv, [256, 3, 2]],
|
||||
[[-1, 20], 1, Concat, [1]], # cat head P4
|
||||
[-1, 3, C3, [512, False]], # 26 (P4/16-medium)
|
||||
|
||||
[-1, 1, Conv, [512, 3, 2]],
|
||||
[[-1, 16], 1, Concat, [1]], # cat head P5
|
||||
[-1, 3, C3, [768, False]], # 29 (P5/32-large)
|
||||
|
||||
[-1, 1, Conv, [768, 3, 2]],
|
||||
[[-1, 12], 1, Concat, [1]], # cat head P6
|
||||
[-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge)
|
||||
|
||||
[[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6)
|
||||
]
|
67
ayf_ocr/yolo_databases/api/models/hub/yolov5-p7.yaml
Normal file
@ -0,0 +1,67 @@
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
|
||||
# Parameters
|
||||
nc: 80 # number of classes
|
||||
depth_multiple: 1.0 # model depth multiple
|
||||
width_multiple: 1.0 # layer channel multiple
|
||||
anchors: 3 # AutoAnchor evolves 3 anchors per P output layer
|
||||
|
||||
# YOLOv5 v6.0 backbone
|
||||
backbone:
|
||||
# [from, number, module, args]
|
||||
[[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
|
||||
[-1, 1, Conv, [128, 3, 2]], # 1-P2/4
|
||||
[-1, 3, C3, [128]],
|
||||
[-1, 1, Conv, [256, 3, 2]], # 3-P3/8
|
||||
[-1, 6, C3, [256]],
|
||||
[-1, 1, Conv, [512, 3, 2]], # 5-P4/16
|
||||
[-1, 9, C3, [512]],
|
||||
[-1, 1, Conv, [768, 3, 2]], # 7-P5/32
|
||||
[-1, 3, C3, [768]],
|
||||
[-1, 1, Conv, [1024, 3, 2]], # 9-P6/64
|
||||
[-1, 3, C3, [1024]],
|
||||
[-1, 1, Conv, [1280, 3, 2]], # 11-P7/128
|
||||
[-1, 3, C3, [1280]],
|
||||
[-1, 1, SPPF, [1280, 5]], # 13
|
||||
]
|
||||
|
||||
# YOLOv5 v6.0 head with (P3, P4, P5, P6, P7) outputs
|
||||
head:
|
||||
[[-1, 1, Conv, [1024, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 10], 1, Concat, [1]], # cat backbone P6
|
||||
[-1, 3, C3, [1024, False]], # 17
|
||||
|
||||
[-1, 1, Conv, [768, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 8], 1, Concat, [1]], # cat backbone P5
|
||||
[-1, 3, C3, [768, False]], # 21
|
||||
|
||||
[-1, 1, Conv, [512, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 6], 1, Concat, [1]], # cat backbone P4
|
||||
[-1, 3, C3, [512, False]], # 25
|
||||
|
||||
[-1, 1, Conv, [256, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 4], 1, Concat, [1]], # cat backbone P3
|
||||
[-1, 3, C3, [256, False]], # 29 (P3/8-small)
|
||||
|
||||
[-1, 1, Conv, [256, 3, 2]],
|
||||
[[-1, 26], 1, Concat, [1]], # cat head P4
|
||||
[-1, 3, C3, [512, False]], # 32 (P4/16-medium)
|
||||
|
||||
[-1, 1, Conv, [512, 3, 2]],
|
||||
[[-1, 22], 1, Concat, [1]], # cat head P5
|
||||
[-1, 3, C3, [768, False]], # 35 (P5/32-large)
|
||||
|
||||
[-1, 1, Conv, [768, 3, 2]],
|
||||
[[-1, 18], 1, Concat, [1]], # cat head P6
|
||||
[-1, 3, C3, [1024, False]], # 38 (P6/64-xlarge)
|
||||
|
||||
[-1, 1, Conv, [1024, 3, 2]],
|
||||
[[-1, 14], 1, Concat, [1]], # cat head P7
|
||||
[-1, 3, C3, [1280, False]], # 41 (P7/128-xxlarge)
|
||||
|
||||
[[29, 32, 35, 38, 41], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6, P7)
|
||||
]
|
48
ayf_ocr/yolo_databases/api/models/hub/yolov5-panet.yaml
Normal file
@ -0,0 +1,48 @@
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
|
||||
# Parameters
|
||||
nc: 80 # number of classes
|
||||
depth_multiple: 1.0 # model depth multiple
|
||||
width_multiple: 1.0 # layer channel multiple
|
||||
anchors:
|
||||
- [10,13, 16,30, 33,23] # P3/8
|
||||
- [30,61, 62,45, 59,119] # P4/16
|
||||
- [116,90, 156,198, 373,326] # P5/32
|
||||
|
||||
# YOLOv5 v6.0 backbone
|
||||
backbone:
|
||||
# [from, number, module, args]
|
||||
[[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
|
||||
[-1, 1, Conv, [128, 3, 2]], # 1-P2/4
|
||||
[-1, 3, C3, [128]],
|
||||
[-1, 1, Conv, [256, 3, 2]], # 3-P3/8
|
||||
[-1, 6, C3, [256]],
|
||||
[-1, 1, Conv, [512, 3, 2]], # 5-P4/16
|
||||
[-1, 9, C3, [512]],
|
||||
[-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
|
||||
[-1, 3, C3, [1024]],
|
||||
[-1, 1, SPPF, [1024, 5]], # 9
|
||||
]
|
||||
|
||||
# YOLOv5 v6.0 PANet head
|
||||
head:
|
||||
[[-1, 1, Conv, [512, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 6], 1, Concat, [1]], # cat backbone P4
|
||||
[-1, 3, C3, [512, False]], # 13
|
||||
|
||||
[-1, 1, Conv, [256, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 4], 1, Concat, [1]], # cat backbone P3
|
||||
[-1, 3, C3, [256, False]], # 17 (P3/8-small)
|
||||
|
||||
[-1, 1, Conv, [256, 3, 2]],
|
||||
[[-1, 14], 1, Concat, [1]], # cat head P4
|
||||
[-1, 3, C3, [512, False]], # 20 (P4/16-medium)
|
||||
|
||||
[-1, 1, Conv, [512, 3, 2]],
|
||||
[[-1, 10], 1, Concat, [1]], # cat head P5
|
||||
[-1, 3, C3, [1024, False]], # 23 (P5/32-large)
|
||||
|
||||
[[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
|
||||
]
|
60
ayf_ocr/yolo_databases/api/models/hub/yolov5l6.yaml
Normal file
@ -0,0 +1,60 @@
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
|
||||
# Parameters
|
||||
nc: 80 # number of classes
|
||||
depth_multiple: 1.0 # model depth multiple
|
||||
width_multiple: 1.0 # layer channel multiple
|
||||
anchors:
|
||||
- [19,27, 44,40, 38,94] # P3/8
|
||||
- [96,68, 86,152, 180,137] # P4/16
|
||||
- [140,301, 303,264, 238,542] # P5/32
|
||||
- [436,615, 739,380, 925,792] # P6/64
|
||||
|
||||
# YOLOv5 v6.0 backbone
|
||||
backbone:
|
||||
# [from, number, module, args]
|
||||
[[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
|
||||
[-1, 1, Conv, [128, 3, 2]], # 1-P2/4
|
||||
[-1, 3, C3, [128]],
|
||||
[-1, 1, Conv, [256, 3, 2]], # 3-P3/8
|
||||
[-1, 6, C3, [256]],
|
||||
[-1, 1, Conv, [512, 3, 2]], # 5-P4/16
|
||||
[-1, 9, C3, [512]],
|
||||
[-1, 1, Conv, [768, 3, 2]], # 7-P5/32
|
||||
[-1, 3, C3, [768]],
|
||||
[-1, 1, Conv, [1024, 3, 2]], # 9-P6/64
|
||||
[-1, 3, C3, [1024]],
|
||||
[-1, 1, SPPF, [1024, 5]], # 11
|
||||
]
|
||||
|
||||
# YOLOv5 v6.0 head
|
||||
head:
|
||||
[[-1, 1, Conv, [768, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 8], 1, Concat, [1]], # cat backbone P5
|
||||
[-1, 3, C3, [768, False]], # 15
|
||||
|
||||
[-1, 1, Conv, [512, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 6], 1, Concat, [1]], # cat backbone P4
|
||||
[-1, 3, C3, [512, False]], # 19
|
||||
|
||||
[-1, 1, Conv, [256, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 4], 1, Concat, [1]], # cat backbone P3
|
||||
[-1, 3, C3, [256, False]], # 23 (P3/8-small)
|
||||
|
||||
[-1, 1, Conv, [256, 3, 2]],
|
||||
[[-1, 20], 1, Concat, [1]], # cat head P4
|
||||
[-1, 3, C3, [512, False]], # 26 (P4/16-medium)
|
||||
|
||||
[-1, 1, Conv, [512, 3, 2]],
|
||||
[[-1, 16], 1, Concat, [1]], # cat head P5
|
||||
[-1, 3, C3, [768, False]], # 29 (P5/32-large)
|
||||
|
||||
[-1, 1, Conv, [768, 3, 2]],
|
||||
[[-1, 12], 1, Concat, [1]], # cat head P6
|
||||
[-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge)
|
||||
|
||||
[[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6)
|
||||
]
|
60
ayf_ocr/yolo_databases/api/models/hub/yolov5m6.yaml
Normal file
@ -0,0 +1,60 @@
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
|
||||
# Parameters
|
||||
nc: 80 # number of classes
|
||||
depth_multiple: 0.67 # model depth multiple
|
||||
width_multiple: 0.75 # layer channel multiple
|
||||
anchors:
|
||||
- [19,27, 44,40, 38,94] # P3/8
|
||||
- [96,68, 86,152, 180,137] # P4/16
|
||||
- [140,301, 303,264, 238,542] # P5/32
|
||||
- [436,615, 739,380, 925,792] # P6/64
|
||||
|
||||
# YOLOv5 v6.0 backbone
|
||||
backbone:
|
||||
# [from, number, module, args]
|
||||
[[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
|
||||
[-1, 1, Conv, [128, 3, 2]], # 1-P2/4
|
||||
[-1, 3, C3, [128]],
|
||||
[-1, 1, Conv, [256, 3, 2]], # 3-P3/8
|
||||
[-1, 6, C3, [256]],
|
||||
[-1, 1, Conv, [512, 3, 2]], # 5-P4/16
|
||||
[-1, 9, C3, [512]],
|
||||
[-1, 1, Conv, [768, 3, 2]], # 7-P5/32
|
||||
[-1, 3, C3, [768]],
|
||||
[-1, 1, Conv, [1024, 3, 2]], # 9-P6/64
|
||||
[-1, 3, C3, [1024]],
|
||||
[-1, 1, SPPF, [1024, 5]], # 11
|
||||
]
|
||||
|
||||
# YOLOv5 v6.0 head
|
||||
head:
|
||||
[[-1, 1, Conv, [768, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 8], 1, Concat, [1]], # cat backbone P5
|
||||
[-1, 3, C3, [768, False]], # 15
|
||||
|
||||
[-1, 1, Conv, [512, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 6], 1, Concat, [1]], # cat backbone P4
|
||||
[-1, 3, C3, [512, False]], # 19
|
||||
|
||||
[-1, 1, Conv, [256, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 4], 1, Concat, [1]], # cat backbone P3
|
||||
[-1, 3, C3, [256, False]], # 23 (P3/8-small)
|
||||
|
||||
[-1, 1, Conv, [256, 3, 2]],
|
||||
[[-1, 20], 1, Concat, [1]], # cat head P4
|
||||
[-1, 3, C3, [512, False]], # 26 (P4/16-medium)
|
||||
|
||||
[-1, 1, Conv, [512, 3, 2]],
|
||||
[[-1, 16], 1, Concat, [1]], # cat head P5
|
||||
[-1, 3, C3, [768, False]], # 29 (P5/32-large)
|
||||
|
||||
[-1, 1, Conv, [768, 3, 2]],
|
||||
[[-1, 12], 1, Concat, [1]], # cat head P6
|
||||
[-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge)
|
||||
|
||||
[[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6)
|
||||
]
|
60
ayf_ocr/yolo_databases/api/models/hub/yolov5n6.yaml
Normal file
@ -0,0 +1,60 @@
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
|
||||
# Parameters
|
||||
nc: 80 # number of classes
|
||||
depth_multiple: 0.33 # model depth multiple
|
||||
width_multiple: 0.25 # layer channel multiple
|
||||
anchors:
|
||||
- [19,27, 44,40, 38,94] # P3/8
|
||||
- [96,68, 86,152, 180,137] # P4/16
|
||||
- [140,301, 303,264, 238,542] # P5/32
|
||||
- [436,615, 739,380, 925,792] # P6/64
|
||||
|
||||
# YOLOv5 v6.0 backbone
|
||||
backbone:
|
||||
# [from, number, module, args]
|
||||
[[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
|
||||
[-1, 1, Conv, [128, 3, 2]], # 1-P2/4
|
||||
[-1, 3, C3, [128]],
|
||||
[-1, 1, Conv, [256, 3, 2]], # 3-P3/8
|
||||
[-1, 6, C3, [256]],
|
||||
[-1, 1, Conv, [512, 3, 2]], # 5-P4/16
|
||||
[-1, 9, C3, [512]],
|
||||
[-1, 1, Conv, [768, 3, 2]], # 7-P5/32
|
||||
[-1, 3, C3, [768]],
|
||||
[-1, 1, Conv, [1024, 3, 2]], # 9-P6/64
|
||||
[-1, 3, C3, [1024]],
|
||||
[-1, 1, SPPF, [1024, 5]], # 11
|
||||
]
|
||||
|
||||
# YOLOv5 v6.0 head
|
||||
head:
|
||||
[[-1, 1, Conv, [768, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 8], 1, Concat, [1]], # cat backbone P5
|
||||
[-1, 3, C3, [768, False]], # 15
|
||||
|
||||
[-1, 1, Conv, [512, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 6], 1, Concat, [1]], # cat backbone P4
|
||||
[-1, 3, C3, [512, False]], # 19
|
||||
|
||||
[-1, 1, Conv, [256, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 4], 1, Concat, [1]], # cat backbone P3
|
||||
[-1, 3, C3, [256, False]], # 23 (P3/8-small)
|
||||
|
||||
[-1, 1, Conv, [256, 3, 2]],
|
||||
[[-1, 20], 1, Concat, [1]], # cat head P4
|
||||
[-1, 3, C3, [512, False]], # 26 (P4/16-medium)
|
||||
|
||||
[-1, 1, Conv, [512, 3, 2]],
|
||||
[[-1, 16], 1, Concat, [1]], # cat head P5
|
||||
[-1, 3, C3, [768, False]], # 29 (P5/32-large)
|
||||
|
||||
[-1, 1, Conv, [768, 3, 2]],
|
||||
[[-1, 12], 1, Concat, [1]], # cat head P6
|
||||
[-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge)
|
||||
|
||||
[[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6)
|
||||
]
|
49
ayf_ocr/yolo_databases/api/models/hub/yolov5s-LeakyReLU.yaml
Normal file
@ -0,0 +1,49 @@
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
|
||||
# Parameters
|
||||
nc: 80 # number of classes
|
||||
activation: nn.LeakyReLU(0.1) # <----- Conv() activation used throughout entire YOLOv5 model
|
||||
depth_multiple: 0.33 # model depth multiple
|
||||
width_multiple: 0.50 # layer channel multiple
|
||||
anchors:
|
||||
- [10,13, 16,30, 33,23] # P3/8
|
||||
- [30,61, 62,45, 59,119] # P4/16
|
||||
- [116,90, 156,198, 373,326] # P5/32
|
||||
|
||||
# YOLOv5 v6.0 backbone
|
||||
backbone:
|
||||
# [from, number, module, args]
|
||||
[[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
|
||||
[-1, 1, Conv, [128, 3, 2]], # 1-P2/4
|
||||
[-1, 3, C3, [128]],
|
||||
[-1, 1, Conv, [256, 3, 2]], # 3-P3/8
|
||||
[-1, 6, C3, [256]],
|
||||
[-1, 1, Conv, [512, 3, 2]], # 5-P4/16
|
||||
[-1, 9, C3, [512]],
|
||||
[-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
|
||||
[-1, 3, C3, [1024]],
|
||||
[-1, 1, SPPF, [1024, 5]], # 9
|
||||
]
|
||||
|
||||
# YOLOv5 v6.0 head
|
||||
head:
|
||||
[[-1, 1, Conv, [512, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 6], 1, Concat, [1]], # cat backbone P4
|
||||
[-1, 3, C3, [512, False]], # 13
|
||||
|
||||
[-1, 1, Conv, [256, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 4], 1, Concat, [1]], # cat backbone P3
|
||||
[-1, 3, C3, [256, False]], # 17 (P3/8-small)
|
||||
|
||||
[-1, 1, Conv, [256, 3, 2]],
|
||||
[[-1, 14], 1, Concat, [1]], # cat head P4
|
||||
[-1, 3, C3, [512, False]], # 20 (P4/16-medium)
|
||||
|
||||
[-1, 1, Conv, [512, 3, 2]],
|
||||
[[-1, 10], 1, Concat, [1]], # cat head P5
|
||||
[-1, 3, C3, [1024, False]], # 23 (P5/32-large)
|
||||
|
||||
[[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
|
||||
]
|
48
ayf_ocr/yolo_databases/api/models/hub/yolov5s-ghost.yaml
Normal file
@ -0,0 +1,48 @@
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
|
||||
# Parameters
|
||||
nc: 80 # number of classes
|
||||
depth_multiple: 0.33 # model depth multiple
|
||||
width_multiple: 0.50 # layer channel multiple
|
||||
anchors:
|
||||
- [10,13, 16,30, 33,23] # P3/8
|
||||
- [30,61, 62,45, 59,119] # P4/16
|
||||
- [116,90, 156,198, 373,326] # P5/32
|
||||
|
||||
# YOLOv5 v6.0 backbone
|
||||
backbone:
|
||||
# [from, number, module, args]
|
||||
[[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
|
||||
[-1, 1, GhostConv, [128, 3, 2]], # 1-P2/4
|
||||
[-1, 3, C3Ghost, [128]],
|
||||
[-1, 1, GhostConv, [256, 3, 2]], # 3-P3/8
|
||||
[-1, 6, C3Ghost, [256]],
|
||||
[-1, 1, GhostConv, [512, 3, 2]], # 5-P4/16
|
||||
[-1, 9, C3Ghost, [512]],
|
||||
[-1, 1, GhostConv, [1024, 3, 2]], # 7-P5/32
|
||||
[-1, 3, C3Ghost, [1024]],
|
||||
[-1, 1, SPPF, [1024, 5]], # 9
|
||||
]
|
||||
|
||||
# YOLOv5 v6.0 head
|
||||
head:
|
||||
[[-1, 1, GhostConv, [512, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 6], 1, Concat, [1]], # cat backbone P4
|
||||
[-1, 3, C3Ghost, [512, False]], # 13
|
||||
|
||||
[-1, 1, GhostConv, [256, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 4], 1, Concat, [1]], # cat backbone P3
|
||||
[-1, 3, C3Ghost, [256, False]], # 17 (P3/8-small)
|
||||
|
||||
[-1, 1, GhostConv, [256, 3, 2]],
|
||||
[[-1, 14], 1, Concat, [1]], # cat head P4
|
||||
[-1, 3, C3Ghost, [512, False]], # 20 (P4/16-medium)
|
||||
|
||||
[-1, 1, GhostConv, [512, 3, 2]],
|
||||
[[-1, 10], 1, Concat, [1]], # cat head P5
|
||||
[-1, 3, C3Ghost, [1024, False]], # 23 (P5/32-large)
|
||||
|
||||
[[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
|
||||
]
|
@ -0,0 +1,48 @@
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
|
||||
# Parameters
|
||||
nc: 80 # number of classes
|
||||
depth_multiple: 0.33 # model depth multiple
|
||||
width_multiple: 0.50 # layer channel multiple
|
||||
anchors:
|
||||
- [10,13, 16,30, 33,23] # P3/8
|
||||
- [30,61, 62,45, 59,119] # P4/16
|
||||
- [116,90, 156,198, 373,326] # P5/32
|
||||
|
||||
# YOLOv5 v6.0 backbone
|
||||
backbone:
|
||||
# [from, number, module, args]
|
||||
[[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
|
||||
[-1, 1, Conv, [128, 3, 2]], # 1-P2/4
|
||||
[-1, 3, C3, [128]],
|
||||
[-1, 1, Conv, [256, 3, 2]], # 3-P3/8
|
||||
[-1, 6, C3, [256]],
|
||||
[-1, 1, Conv, [512, 3, 2]], # 5-P4/16
|
||||
[-1, 9, C3, [512]],
|
||||
[-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
|
||||
[-1, 3, C3TR, [1024]], # 9 <--- C3TR() Transformer module
|
||||
[-1, 1, SPPF, [1024, 5]], # 9
|
||||
]
|
||||
|
||||
# YOLOv5 v6.0 head
|
||||
head:
|
||||
[[-1, 1, Conv, [512, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 6], 1, Concat, [1]], # cat backbone P4
|
||||
[-1, 3, C3, [512, False]], # 13
|
||||
|
||||
[-1, 1, Conv, [256, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 4], 1, Concat, [1]], # cat backbone P3
|
||||
[-1, 3, C3, [256, False]], # 17 (P3/8-small)
|
||||
|
||||
[-1, 1, Conv, [256, 3, 2]],
|
||||
[[-1, 14], 1, Concat, [1]], # cat head P4
|
||||
[-1, 3, C3, [512, False]], # 20 (P4/16-medium)
|
||||
|
||||
[-1, 1, Conv, [512, 3, 2]],
|
||||
[[-1, 10], 1, Concat, [1]], # cat head P5
|
||||
[-1, 3, C3, [1024, False]], # 23 (P5/32-large)
|
||||
|
||||
[[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
|
||||
]
|
60
ayf_ocr/yolo_databases/api/models/hub/yolov5s6.yaml
Normal file
@ -0,0 +1,60 @@
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
|
||||
# Parameters
|
||||
nc: 80 # number of classes
|
||||
depth_multiple: 0.33 # model depth multiple
|
||||
width_multiple: 0.50 # layer channel multiple
|
||||
anchors:
|
||||
- [19,27, 44,40, 38,94] # P3/8
|
||||
- [96,68, 86,152, 180,137] # P4/16
|
||||
- [140,301, 303,264, 238,542] # P5/32
|
||||
- [436,615, 739,380, 925,792] # P6/64
|
||||
|
||||
# YOLOv5 v6.0 backbone
|
||||
backbone:
|
||||
# [from, number, module, args]
|
||||
[[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
|
||||
[-1, 1, Conv, [128, 3, 2]], # 1-P2/4
|
||||
[-1, 3, C3, [128]],
|
||||
[-1, 1, Conv, [256, 3, 2]], # 3-P3/8
|
||||
[-1, 6, C3, [256]],
|
||||
[-1, 1, Conv, [512, 3, 2]], # 5-P4/16
|
||||
[-1, 9, C3, [512]],
|
||||
[-1, 1, Conv, [768, 3, 2]], # 7-P5/32
|
||||
[-1, 3, C3, [768]],
|
||||
[-1, 1, Conv, [1024, 3, 2]], # 9-P6/64
|
||||
[-1, 3, C3, [1024]],
|
||||
[-1, 1, SPPF, [1024, 5]], # 11
|
||||
]
|
||||
|
||||
# YOLOv5 v6.0 head
|
||||
head:
|
||||
[[-1, 1, Conv, [768, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 8], 1, Concat, [1]], # cat backbone P5
|
||||
[-1, 3, C3, [768, False]], # 15
|
||||
|
||||
[-1, 1, Conv, [512, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 6], 1, Concat, [1]], # cat backbone P4
|
||||
[-1, 3, C3, [512, False]], # 19
|
||||
|
||||
[-1, 1, Conv, [256, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 4], 1, Concat, [1]], # cat backbone P3
|
||||
[-1, 3, C3, [256, False]], # 23 (P3/8-small)
|
||||
|
||||
[-1, 1, Conv, [256, 3, 2]],
|
||||
[[-1, 20], 1, Concat, [1]], # cat head P4
|
||||
[-1, 3, C3, [512, False]], # 26 (P4/16-medium)
|
||||
|
||||
[-1, 1, Conv, [512, 3, 2]],
|
||||
[[-1, 16], 1, Concat, [1]], # cat head P5
|
||||
[-1, 3, C3, [768, False]], # 29 (P5/32-large)
|
||||
|
||||
[-1, 1, Conv, [768, 3, 2]],
|
||||
[[-1, 12], 1, Concat, [1]], # cat head P6
|
||||
[-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge)
|
||||
|
||||
[[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6)
|
||||
]
|
60
ayf_ocr/yolo_databases/api/models/hub/yolov5x6.yaml
Normal file
@ -0,0 +1,60 @@
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
|
||||
# Parameters
|
||||
nc: 80 # number of classes
|
||||
depth_multiple: 1.33 # model depth multiple
|
||||
width_multiple: 1.25 # layer channel multiple
|
||||
anchors:
|
||||
- [19,27, 44,40, 38,94] # P3/8
|
||||
- [96,68, 86,152, 180,137] # P4/16
|
||||
- [140,301, 303,264, 238,542] # P5/32
|
||||
- [436,615, 739,380, 925,792] # P6/64
|
||||
|
||||
# YOLOv5 v6.0 backbone
|
||||
backbone:
|
||||
# [from, number, module, args]
|
||||
[[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
|
||||
[-1, 1, Conv, [128, 3, 2]], # 1-P2/4
|
||||
[-1, 3, C3, [128]],
|
||||
[-1, 1, Conv, [256, 3, 2]], # 3-P3/8
|
||||
[-1, 6, C3, [256]],
|
||||
[-1, 1, Conv, [512, 3, 2]], # 5-P4/16
|
||||
[-1, 9, C3, [512]],
|
||||
[-1, 1, Conv, [768, 3, 2]], # 7-P5/32
|
||||
[-1, 3, C3, [768]],
|
||||
[-1, 1, Conv, [1024, 3, 2]], # 9-P6/64
|
||||
[-1, 3, C3, [1024]],
|
||||
[-1, 1, SPPF, [1024, 5]], # 11
|
||||
]
|
||||
|
||||
# YOLOv5 v6.0 head
|
||||
head:
|
||||
[[-1, 1, Conv, [768, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 8], 1, Concat, [1]], # cat backbone P5
|
||||
[-1, 3, C3, [768, False]], # 15
|
||||
|
||||
[-1, 1, Conv, [512, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 6], 1, Concat, [1]], # cat backbone P4
|
||||
[-1, 3, C3, [512, False]], # 19
|
||||
|
||||
[-1, 1, Conv, [256, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 4], 1, Concat, [1]], # cat backbone P3
|
||||
[-1, 3, C3, [256, False]], # 23 (P3/8-small)
|
||||
|
||||
[-1, 1, Conv, [256, 3, 2]],
|
||||
[[-1, 20], 1, Concat, [1]], # cat head P4
|
||||
[-1, 3, C3, [512, False]], # 26 (P4/16-medium)
|
||||
|
||||
[-1, 1, Conv, [512, 3, 2]],
|
||||
[[-1, 16], 1, Concat, [1]], # cat head P5
|
||||
[-1, 3, C3, [768, False]], # 29 (P5/32-large)
|
||||
|
||||
[-1, 1, Conv, [768, 3, 2]],
|
||||
[[-1, 12], 1, Concat, [1]], # cat head P6
|
||||
[-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge)
|
||||
|
||||
[[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6)
|
||||
]
|
48
ayf_ocr/yolo_databases/api/models/segment/yolov5l-seg.yaml
Normal file
@ -0,0 +1,48 @@
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
|
||||
# Parameters
|
||||
nc: 80 # number of classes
|
||||
depth_multiple: 1.0 # model depth multiple
|
||||
width_multiple: 1.0 # layer channel multiple
|
||||
anchors:
|
||||
- [10,13, 16,30, 33,23] # P3/8
|
||||
- [30,61, 62,45, 59,119] # P4/16
|
||||
- [116,90, 156,198, 373,326] # P5/32
|
||||
|
||||
# YOLOv5 v6.0 backbone
|
||||
backbone:
|
||||
# [from, number, module, args]
|
||||
[[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
|
||||
[-1, 1, Conv, [128, 3, 2]], # 1-P2/4
|
||||
[-1, 3, C3, [128]],
|
||||
[-1, 1, Conv, [256, 3, 2]], # 3-P3/8
|
||||
[-1, 6, C3, [256]],
|
||||
[-1, 1, Conv, [512, 3, 2]], # 5-P4/16
|
||||
[-1, 9, C3, [512]],
|
||||
[-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
|
||||
[-1, 3, C3, [1024]],
|
||||
[-1, 1, SPPF, [1024, 5]], # 9
|
||||
]
|
||||
|
||||
# YOLOv5 v6.0 head
|
||||
head:
|
||||
[[-1, 1, Conv, [512, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 6], 1, Concat, [1]], # cat backbone P4
|
||||
[-1, 3, C3, [512, False]], # 13
|
||||
|
||||
[-1, 1, Conv, [256, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 4], 1, Concat, [1]], # cat backbone P3
|
||||
[-1, 3, C3, [256, False]], # 17 (P3/8-small)
|
||||
|
||||
[-1, 1, Conv, [256, 3, 2]],
|
||||
[[-1, 14], 1, Concat, [1]], # cat head P4
|
||||
[-1, 3, C3, [512, False]], # 20 (P4/16-medium)
|
||||
|
||||
[-1, 1, Conv, [512, 3, 2]],
|
||||
[[-1, 10], 1, Concat, [1]], # cat head P5
|
||||
[-1, 3, C3, [1024, False]], # 23 (P5/32-large)
|
||||
|
||||
[[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5)
|
||||
]
|
48
ayf_ocr/yolo_databases/api/models/segment/yolov5m-seg.yaml
Normal file
@ -0,0 +1,48 @@
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
|
||||
# Parameters
|
||||
nc: 80 # number of classes
|
||||
depth_multiple: 0.67 # model depth multiple
|
||||
width_multiple: 0.75 # layer channel multiple
|
||||
anchors:
|
||||
- [10,13, 16,30, 33,23] # P3/8
|
||||
- [30,61, 62,45, 59,119] # P4/16
|
||||
- [116,90, 156,198, 373,326] # P5/32
|
||||
|
||||
# YOLOv5 v6.0 backbone
|
||||
backbone:
|
||||
# [from, number, module, args]
|
||||
[[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
|
||||
[-1, 1, Conv, [128, 3, 2]], # 1-P2/4
|
||||
[-1, 3, C3, [128]],
|
||||
[-1, 1, Conv, [256, 3, 2]], # 3-P3/8
|
||||
[-1, 6, C3, [256]],
|
||||
[-1, 1, Conv, [512, 3, 2]], # 5-P4/16
|
||||
[-1, 9, C3, [512]],
|
||||
[-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
|
||||
[-1, 3, C3, [1024]],
|
||||
[-1, 1, SPPF, [1024, 5]], # 9
|
||||
]
|
||||
|
||||
# YOLOv5 v6.0 head
|
||||
head:
|
||||
[[-1, 1, Conv, [512, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 6], 1, Concat, [1]], # cat backbone P4
|
||||
[-1, 3, C3, [512, False]], # 13
|
||||
|
||||
[-1, 1, Conv, [256, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 4], 1, Concat, [1]], # cat backbone P3
|
||||
[-1, 3, C3, [256, False]], # 17 (P3/8-small)
|
||||
|
||||
[-1, 1, Conv, [256, 3, 2]],
|
||||
[[-1, 14], 1, Concat, [1]], # cat head P4
|
||||
[-1, 3, C3, [512, False]], # 20 (P4/16-medium)
|
||||
|
||||
[-1, 1, Conv, [512, 3, 2]],
|
||||
[[-1, 10], 1, Concat, [1]], # cat head P5
|
||||
[-1, 3, C3, [1024, False]], # 23 (P5/32-large)
|
||||
|
||||
[[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5)
|
||||
]
|
48
ayf_ocr/yolo_databases/api/models/segment/yolov5n-seg.yaml
Normal file
@ -0,0 +1,48 @@
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
|
||||
# Parameters
|
||||
nc: 80 # number of classes
|
||||
depth_multiple: 0.33 # model depth multiple
|
||||
width_multiple: 0.25 # layer channel multiple
|
||||
anchors:
|
||||
- [10,13, 16,30, 33,23] # P3/8
|
||||
- [30,61, 62,45, 59,119] # P4/16
|
||||
- [116,90, 156,198, 373,326] # P5/32
|
||||
|
||||
# YOLOv5 v6.0 backbone
|
||||
backbone:
|
||||
# [from, number, module, args]
|
||||
[[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
|
||||
[-1, 1, Conv, [128, 3, 2]], # 1-P2/4
|
||||
[-1, 3, C3, [128]],
|
||||
[-1, 1, Conv, [256, 3, 2]], # 3-P3/8
|
||||
[-1, 6, C3, [256]],
|
||||
[-1, 1, Conv, [512, 3, 2]], # 5-P4/16
|
||||
[-1, 9, C3, [512]],
|
||||
[-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
|
||||
[-1, 3, C3, [1024]],
|
||||
[-1, 1, SPPF, [1024, 5]], # 9
|
||||
]
|
||||
|
||||
# YOLOv5 v6.0 head
|
||||
head:
|
||||
[[-1, 1, Conv, [512, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 6], 1, Concat, [1]], # cat backbone P4
|
||||
[-1, 3, C3, [512, False]], # 13
|
||||
|
||||
[-1, 1, Conv, [256, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 4], 1, Concat, [1]], # cat backbone P3
|
||||
[-1, 3, C3, [256, False]], # 17 (P3/8-small)
|
||||
|
||||
[-1, 1, Conv, [256, 3, 2]],
|
||||
[[-1, 14], 1, Concat, [1]], # cat head P4
|
||||
[-1, 3, C3, [512, False]], # 20 (P4/16-medium)
|
||||
|
||||
[-1, 1, Conv, [512, 3, 2]],
|
||||
[[-1, 10], 1, Concat, [1]], # cat head P5
|
||||
[-1, 3, C3, [1024, False]], # 23 (P5/32-large)
|
||||
|
||||
[[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5)
|
||||
]
|
48
ayf_ocr/yolo_databases/api/models/segment/yolov5s-seg.yaml
Normal file
@ -0,0 +1,48 @@
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
|
||||
# Parameters
|
||||
nc: 80 # number of classes
|
||||
depth_multiple: 0.33 # model depth multiple
|
||||
width_multiple: 0.5 # layer channel multiple
|
||||
anchors:
|
||||
- [10,13, 16,30, 33,23] # P3/8
|
||||
- [30,61, 62,45, 59,119] # P4/16
|
||||
- [116,90, 156,198, 373,326] # P5/32
|
||||
|
||||
# YOLOv5 v6.0 backbone
|
||||
backbone:
|
||||
# [from, number, module, args]
|
||||
[[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
|
||||
[-1, 1, Conv, [128, 3, 2]], # 1-P2/4
|
||||
[-1, 3, C3, [128]],
|
||||
[-1, 1, Conv, [256, 3, 2]], # 3-P3/8
|
||||
[-1, 6, C3, [256]],
|
||||
[-1, 1, Conv, [512, 3, 2]], # 5-P4/16
|
||||
[-1, 9, C3, [512]],
|
||||
[-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
|
||||
[-1, 3, C3, [1024]],
|
||||
[-1, 1, SPPF, [1024, 5]], # 9
|
||||
]
|
||||
|
||||
# YOLOv5 v6.0 head
|
||||
head:
|
||||
[[-1, 1, Conv, [512, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 6], 1, Concat, [1]], # cat backbone P4
|
||||
[-1, 3, C3, [512, False]], # 13
|
||||
|
||||
[-1, 1, Conv, [256, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 4], 1, Concat, [1]], # cat backbone P3
|
||||
[-1, 3, C3, [256, False]], # 17 (P3/8-small)
|
||||
|
||||
[-1, 1, Conv, [256, 3, 2]],
|
||||
[[-1, 14], 1, Concat, [1]], # cat head P4
|
||||
[-1, 3, C3, [512, False]], # 20 (P4/16-medium)
|
||||
|
||||
[-1, 1, Conv, [512, 3, 2]],
|
||||
[[-1, 10], 1, Concat, [1]], # cat head P5
|
||||
[-1, 3, C3, [1024, False]], # 23 (P5/32-large)
|
||||
|
||||
[[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5)
|
||||
]
|
48
ayf_ocr/yolo_databases/api/models/segment/yolov5x-seg.yaml
Normal file
@ -0,0 +1,48 @@
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
|
||||
# Parameters
|
||||
nc: 80 # number of classes
|
||||
depth_multiple: 1.33 # model depth multiple
|
||||
width_multiple: 1.25 # layer channel multiple
|
||||
anchors:
|
||||
- [10,13, 16,30, 33,23] # P3/8
|
||||
- [30,61, 62,45, 59,119] # P4/16
|
||||
- [116,90, 156,198, 373,326] # P5/32
|
||||
|
||||
# YOLOv5 v6.0 backbone
|
||||
backbone:
|
||||
# [from, number, module, args]
|
||||
[[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
|
||||
[-1, 1, Conv, [128, 3, 2]], # 1-P2/4
|
||||
[-1, 3, C3, [128]],
|
||||
[-1, 1, Conv, [256, 3, 2]], # 3-P3/8
|
||||
[-1, 6, C3, [256]],
|
||||
[-1, 1, Conv, [512, 3, 2]], # 5-P4/16
|
||||
[-1, 9, C3, [512]],
|
||||
[-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
|
||||
[-1, 3, C3, [1024]],
|
||||
[-1, 1, SPPF, [1024, 5]], # 9
|
||||
]
|
||||
|
||||
# YOLOv5 v6.0 head
|
||||
head:
|
||||
[[-1, 1, Conv, [512, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 6], 1, Concat, [1]], # cat backbone P4
|
||||
[-1, 3, C3, [512, False]], # 13
|
||||
|
||||
[-1, 1, Conv, [256, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 4], 1, Concat, [1]], # cat backbone P3
|
||||
[-1, 3, C3, [256, False]], # 17 (P3/8-small)
|
||||
|
||||
[-1, 1, Conv, [256, 3, 2]],
|
||||
[[-1, 14], 1, Concat, [1]], # cat head P4
|
||||
[-1, 3, C3, [512, False]], # 20 (P4/16-medium)
|
||||
|
||||
[-1, 1, Conv, [512, 3, 2]],
|
||||
[[-1, 10], 1, Concat, [1]], # cat head P5
|
||||
[-1, 3, C3, [1024, False]], # 23 (P5/32-large)
|
||||
|
||||
[[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5)
|
||||
]
|
608
ayf_ocr/yolo_databases/api/models/tf.py
Normal file
@ -0,0 +1,608 @@
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
"""
|
||||
TensorFlow, Keras and TFLite versions of YOLOv5
|
||||
Authored by https://github.com/zldrobit in PR https://github.com/ultralytics/yolov5/pull/1127
|
||||
|
||||
Usage:
|
||||
$ python models/tf.py --weights yolov5s.pt
|
||||
|
||||
Export:
|
||||
$ python export.py --weights yolov5s.pt --include saved_model pb tflite tfjs
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import sys
|
||||
from copy import deepcopy
|
||||
from pathlib import Path
|
||||
|
||||
FILE = Path(__file__).resolve()
|
||||
ROOT = FILE.parents[1] # YOLOv5 root directory
|
||||
if str(ROOT) not in sys.path:
|
||||
sys.path.append(str(ROOT)) # add ROOT to PATH
|
||||
# ROOT = ROOT.relative_to(Path.cwd()) # relative
|
||||
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
from tensorflow import keras
|
||||
|
||||
from models.common import (C3, SPP, SPPF, Bottleneck, BottleneckCSP, C3x, Concat, Conv, CrossConv, DWConv,
|
||||
DWConvTranspose2d, Focus, autopad)
|
||||
from models.experimental import MixConv2d, attempt_load
|
||||
from models.yolo import Detect, Segment
|
||||
from utils.activations import SiLU
|
||||
from utils.general import LOGGER, make_divisible, print_args
|
||||
|
||||
|
||||
class TFBN(keras.layers.Layer):
|
||||
# TensorFlow BatchNormalization wrapper
|
||||
def __init__(self, w=None):
|
||||
super().__init__()
|
||||
self.bn = keras.layers.BatchNormalization(
|
||||
beta_initializer=keras.initializers.Constant(w.bias.numpy()),
|
||||
gamma_initializer=keras.initializers.Constant(w.weight.numpy()),
|
||||
moving_mean_initializer=keras.initializers.Constant(w.running_mean.numpy()),
|
||||
moving_variance_initializer=keras.initializers.Constant(w.running_var.numpy()),
|
||||
epsilon=w.eps)
|
||||
|
||||
def call(self, inputs):
|
||||
return self.bn(inputs)
|
||||
|
||||
|
||||
class TFPad(keras.layers.Layer):
|
||||
# Pad inputs in spatial dimensions 1 and 2
|
||||
def __init__(self, pad):
|
||||
super().__init__()
|
||||
if isinstance(pad, int):
|
||||
self.pad = tf.constant([[0, 0], [pad, pad], [pad, pad], [0, 0]])
|
||||
else: # tuple/list
|
||||
self.pad = tf.constant([[0, 0], [pad[0], pad[0]], [pad[1], pad[1]], [0, 0]])
|
||||
|
||||
def call(self, inputs):
|
||||
return tf.pad(inputs, self.pad, mode='constant', constant_values=0)
|
||||
|
||||
|
||||
class TFConv(keras.layers.Layer):
|
||||
# Standard convolution
|
||||
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None):
|
||||
# ch_in, ch_out, weights, kernel, stride, padding, groups
|
||||
super().__init__()
|
||||
assert g == 1, "TF v2.2 Conv2D does not support 'groups' argument"
|
||||
# TensorFlow convolution padding is inconsistent with PyTorch (e.g. k=3 s=2 'SAME' padding)
|
||||
# see https://stackoverflow.com/questions/52975843/comparing-conv2d-with-padding-between-tensorflow-and-pytorch
|
||||
conv = keras.layers.Conv2D(
|
||||
filters=c2,
|
||||
kernel_size=k,
|
||||
strides=s,
|
||||
padding='SAME' if s == 1 else 'VALID',
|
||||
use_bias=not hasattr(w, 'bn'),
|
||||
kernel_initializer=keras.initializers.Constant(w.conv.weight.permute(2, 3, 1, 0).numpy()),
|
||||
bias_initializer='zeros' if hasattr(w, 'bn') else keras.initializers.Constant(w.conv.bias.numpy()))
|
||||
self.conv = conv if s == 1 else keras.Sequential([TFPad(autopad(k, p)), conv])
|
||||
self.bn = TFBN(w.bn) if hasattr(w, 'bn') else tf.identity
|
||||
self.act = activations(w.act) if act else tf.identity
|
||||
|
||||
def call(self, inputs):
|
||||
return self.act(self.bn(self.conv(inputs)))
|
||||
|
||||
|
||||
class TFDWConv(keras.layers.Layer):
|
||||
# Depthwise convolution
|
||||
def __init__(self, c1, c2, k=1, s=1, p=None, act=True, w=None):
|
||||
# ch_in, ch_out, weights, kernel, stride, padding, groups
|
||||
super().__init__()
|
||||
assert c2 % c1 == 0, f'TFDWConv() output={c2} must be a multiple of input={c1} channels'
|
||||
conv = keras.layers.DepthwiseConv2D(
|
||||
kernel_size=k,
|
||||
depth_multiplier=c2 // c1,
|
||||
strides=s,
|
||||
padding='SAME' if s == 1 else 'VALID',
|
||||
use_bias=not hasattr(w, 'bn'),
|
||||
depthwise_initializer=keras.initializers.Constant(w.conv.weight.permute(2, 3, 1, 0).numpy()),
|
||||
bias_initializer='zeros' if hasattr(w, 'bn') else keras.initializers.Constant(w.conv.bias.numpy()))
|
||||
self.conv = conv if s == 1 else keras.Sequential([TFPad(autopad(k, p)), conv])
|
||||
self.bn = TFBN(w.bn) if hasattr(w, 'bn') else tf.identity
|
||||
self.act = activations(w.act) if act else tf.identity
|
||||
|
||||
def call(self, inputs):
|
||||
return self.act(self.bn(self.conv(inputs)))
|
||||
|
||||
|
||||
class TFDWConvTranspose2d(keras.layers.Layer):
|
||||
# Depthwise ConvTranspose2d
|
||||
def __init__(self, c1, c2, k=1, s=1, p1=0, p2=0, w=None):
|
||||
# ch_in, ch_out, weights, kernel, stride, padding, groups
|
||||
super().__init__()
|
||||
assert c1 == c2, f'TFDWConv() output={c2} must be equal to input={c1} channels'
|
||||
assert k == 4 and p1 == 1, 'TFDWConv() only valid for k=4 and p1=1'
|
||||
weight, bias = w.weight.permute(2, 3, 1, 0).numpy(), w.bias.numpy()
|
||||
self.c1 = c1
|
||||
self.conv = [
|
||||
keras.layers.Conv2DTranspose(filters=1,
|
||||
kernel_size=k,
|
||||
strides=s,
|
||||
padding='VALID',
|
||||
output_padding=p2,
|
||||
use_bias=True,
|
||||
kernel_initializer=keras.initializers.Constant(weight[..., i:i + 1]),
|
||||
bias_initializer=keras.initializers.Constant(bias[i])) for i in range(c1)]
|
||||
|
||||
def call(self, inputs):
|
||||
return tf.concat([m(x) for m, x in zip(self.conv, tf.split(inputs, self.c1, 3))], 3)[:, 1:-1, 1:-1]
|
||||
|
||||
|
||||
class TFFocus(keras.layers.Layer):
|
||||
# Focus wh information into c-space
|
||||
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None):
|
||||
# ch_in, ch_out, kernel, stride, padding, groups
|
||||
super().__init__()
|
||||
self.conv = TFConv(c1 * 4, c2, k, s, p, g, act, w.conv)
|
||||
|
||||
def call(self, inputs): # x(b,w,h,c) -> y(b,w/2,h/2,4c)
|
||||
# inputs = inputs / 255 # normalize 0-255 to 0-1
|
||||
inputs = [inputs[:, ::2, ::2, :], inputs[:, 1::2, ::2, :], inputs[:, ::2, 1::2, :], inputs[:, 1::2, 1::2, :]]
|
||||
return self.conv(tf.concat(inputs, 3))
|
||||
|
||||
|
||||
class TFBottleneck(keras.layers.Layer):
|
||||
# Standard bottleneck
|
||||
def __init__(self, c1, c2, shortcut=True, g=1, e=0.5, w=None): # ch_in, ch_out, shortcut, groups, expansion
|
||||
super().__init__()
|
||||
c_ = int(c2 * e) # hidden channels
|
||||
self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)
|
||||
self.cv2 = TFConv(c_, c2, 3, 1, g=g, w=w.cv2)
|
||||
self.add = shortcut and c1 == c2
|
||||
|
||||
def call(self, inputs):
|
||||
return inputs + self.cv2(self.cv1(inputs)) if self.add else self.cv2(self.cv1(inputs))
|
||||
|
||||
|
||||
class TFCrossConv(keras.layers.Layer):
|
||||
# Cross Convolution
|
||||
def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False, w=None):
|
||||
super().__init__()
|
||||
c_ = int(c2 * e) # hidden channels
|
||||
self.cv1 = TFConv(c1, c_, (1, k), (1, s), w=w.cv1)
|
||||
self.cv2 = TFConv(c_, c2, (k, 1), (s, 1), g=g, w=w.cv2)
|
||||
self.add = shortcut and c1 == c2
|
||||
|
||||
def call(self, inputs):
|
||||
return inputs + self.cv2(self.cv1(inputs)) if self.add else self.cv2(self.cv1(inputs))
|
||||
|
||||
|
||||
class TFConv2d(keras.layers.Layer):
|
||||
# Substitution for PyTorch nn.Conv2D
|
||||
def __init__(self, c1, c2, k, s=1, g=1, bias=True, w=None):
|
||||
super().__init__()
|
||||
assert g == 1, "TF v2.2 Conv2D does not support 'groups' argument"
|
||||
self.conv = keras.layers.Conv2D(filters=c2,
|
||||
kernel_size=k,
|
||||
strides=s,
|
||||
padding='VALID',
|
||||
use_bias=bias,
|
||||
kernel_initializer=keras.initializers.Constant(
|
||||
w.weight.permute(2, 3, 1, 0).numpy()),
|
||||
bias_initializer=keras.initializers.Constant(w.bias.numpy()) if bias else None)
|
||||
|
||||
def call(self, inputs):
|
||||
return self.conv(inputs)
|
||||
|
||||
|
||||
class TFBottleneckCSP(keras.layers.Layer):
|
||||
# CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
|
||||
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None):
|
||||
# ch_in, ch_out, number, shortcut, groups, expansion
|
||||
super().__init__()
|
||||
c_ = int(c2 * e) # hidden channels
|
||||
self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)
|
||||
self.cv2 = TFConv2d(c1, c_, 1, 1, bias=False, w=w.cv2)
|
||||
self.cv3 = TFConv2d(c_, c_, 1, 1, bias=False, w=w.cv3)
|
||||
self.cv4 = TFConv(2 * c_, c2, 1, 1, w=w.cv4)
|
||||
self.bn = TFBN(w.bn)
|
||||
self.act = lambda x: keras.activations.swish(x)
|
||||
self.m = keras.Sequential([TFBottleneck(c_, c_, shortcut, g, e=1.0, w=w.m[j]) for j in range(n)])
|
||||
|
||||
def call(self, inputs):
|
||||
y1 = self.cv3(self.m(self.cv1(inputs)))
|
||||
y2 = self.cv2(inputs)
|
||||
return self.cv4(self.act(self.bn(tf.concat((y1, y2), axis=3))))
|
||||
|
||||
|
||||
class TFC3(keras.layers.Layer):
|
||||
# CSP Bottleneck with 3 convolutions
|
||||
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None):
|
||||
# ch_in, ch_out, number, shortcut, groups, expansion
|
||||
super().__init__()
|
||||
c_ = int(c2 * e) # hidden channels
|
||||
self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)
|
||||
self.cv2 = TFConv(c1, c_, 1, 1, w=w.cv2)
|
||||
self.cv3 = TFConv(2 * c_, c2, 1, 1, w=w.cv3)
|
||||
self.m = keras.Sequential([TFBottleneck(c_, c_, shortcut, g, e=1.0, w=w.m[j]) for j in range(n)])
|
||||
|
||||
def call(self, inputs):
|
||||
return self.cv3(tf.concat((self.m(self.cv1(inputs)), self.cv2(inputs)), axis=3))
|
||||
|
||||
|
||||
class TFC3x(keras.layers.Layer):
|
||||
# 3 module with cross-convolutions
|
||||
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None):
|
||||
# ch_in, ch_out, number, shortcut, groups, expansion
|
||||
super().__init__()
|
||||
c_ = int(c2 * e) # hidden channels
|
||||
self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)
|
||||
self.cv2 = TFConv(c1, c_, 1, 1, w=w.cv2)
|
||||
self.cv3 = TFConv(2 * c_, c2, 1, 1, w=w.cv3)
|
||||
self.m = keras.Sequential([
|
||||
TFCrossConv(c_, c_, k=3, s=1, g=g, e=1.0, shortcut=shortcut, w=w.m[j]) for j in range(n)])
|
||||
|
||||
def call(self, inputs):
|
||||
return self.cv3(tf.concat((self.m(self.cv1(inputs)), self.cv2(inputs)), axis=3))
|
||||
|
||||
|
||||
class TFSPP(keras.layers.Layer):
|
||||
# Spatial pyramid pooling layer used in YOLOv3-SPP
|
||||
def __init__(self, c1, c2, k=(5, 9, 13), w=None):
|
||||
super().__init__()
|
||||
c_ = c1 // 2 # hidden channels
|
||||
self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)
|
||||
self.cv2 = TFConv(c_ * (len(k) + 1), c2, 1, 1, w=w.cv2)
|
||||
self.m = [keras.layers.MaxPool2D(pool_size=x, strides=1, padding='SAME') for x in k]
|
||||
|
||||
def call(self, inputs):
|
||||
x = self.cv1(inputs)
|
||||
return self.cv2(tf.concat([x] + [m(x) for m in self.m], 3))
|
||||
|
||||
|
||||
class TFSPPF(keras.layers.Layer):
|
||||
# Spatial pyramid pooling-Fast layer
|
||||
def __init__(self, c1, c2, k=5, w=None):
|
||||
super().__init__()
|
||||
c_ = c1 // 2 # hidden channels
|
||||
self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)
|
||||
self.cv2 = TFConv(c_ * 4, c2, 1, 1, w=w.cv2)
|
||||
self.m = keras.layers.MaxPool2D(pool_size=k, strides=1, padding='SAME')
|
||||
|
||||
def call(self, inputs):
|
||||
x = self.cv1(inputs)
|
||||
y1 = self.m(x)
|
||||
y2 = self.m(y1)
|
||||
return self.cv2(tf.concat([x, y1, y2, self.m(y2)], 3))
|
||||
|
||||
|
||||
class TFDetect(keras.layers.Layer):
|
||||
# TF YOLOv5 Detect layer
|
||||
def __init__(self, nc=80, anchors=(), ch=(), imgsz=(640, 640), w=None): # detection layer
|
||||
super().__init__()
|
||||
self.stride = tf.convert_to_tensor(w.stride.numpy(), dtype=tf.float32)
|
||||
self.nc = nc # number of classes
|
||||
self.no = nc + 5 # number of outputs per anchor
|
||||
self.nl = len(anchors) # number of detection layers
|
||||
self.na = len(anchors[0]) // 2 # number of anchors
|
||||
self.grid = [tf.zeros(1)] * self.nl # init grid
|
||||
self.anchors = tf.convert_to_tensor(w.anchors.numpy(), dtype=tf.float32)
|
||||
self.anchor_grid = tf.reshape(self.anchors * tf.reshape(self.stride, [self.nl, 1, 1]), [self.nl, 1, -1, 1, 2])
|
||||
self.m = [TFConv2d(x, self.no * self.na, 1, w=w.m[i]) for i, x in enumerate(ch)]
|
||||
self.training = False # set to False after building model
|
||||
self.imgsz = imgsz
|
||||
for i in range(self.nl):
|
||||
ny, nx = self.imgsz[0] // self.stride[i], self.imgsz[1] // self.stride[i]
|
||||
self.grid[i] = self._make_grid(nx, ny)
|
||||
|
||||
def call(self, inputs):
|
||||
z = [] # inference output
|
||||
x = []
|
||||
for i in range(self.nl):
|
||||
x.append(self.m[i](inputs[i]))
|
||||
# x(bs,20,20,255) to x(bs,3,20,20,85)
|
||||
ny, nx = self.imgsz[0] // self.stride[i], self.imgsz[1] // self.stride[i]
|
||||
x[i] = tf.reshape(x[i], [-1, ny * nx, self.na, self.no])
|
||||
|
||||
if not self.training: # inference
|
||||
y = x[i]
|
||||
grid = tf.transpose(self.grid[i], [0, 2, 1, 3]) - 0.5
|
||||
anchor_grid = tf.transpose(self.anchor_grid[i], [0, 2, 1, 3]) * 4
|
||||
xy = (tf.sigmoid(y[..., 0:2]) * 2 + grid) * self.stride[i] # xy
|
||||
wh = tf.sigmoid(y[..., 2:4]) ** 2 * anchor_grid
|
||||
# Normalize xywh to 0-1 to reduce calibration error
|
||||
xy /= tf.constant([[self.imgsz[1], self.imgsz[0]]], dtype=tf.float32)
|
||||
wh /= tf.constant([[self.imgsz[1], self.imgsz[0]]], dtype=tf.float32)
|
||||
y = tf.concat([xy, wh, tf.sigmoid(y[..., 4:5 + self.nc]), y[..., 5 + self.nc:]], -1)
|
||||
z.append(tf.reshape(y, [-1, self.na * ny * nx, self.no]))
|
||||
|
||||
return tf.transpose(x, [0, 2, 1, 3]) if self.training else (tf.concat(z, 1),)
|
||||
|
||||
@staticmethod
|
||||
def _make_grid(nx=20, ny=20):
|
||||
# yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)])
|
||||
# return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float()
|
||||
xv, yv = tf.meshgrid(tf.range(nx), tf.range(ny))
|
||||
return tf.cast(tf.reshape(tf.stack([xv, yv], 2), [1, 1, ny * nx, 2]), dtype=tf.float32)
|
||||
|
||||
|
||||
class TFSegment(TFDetect):
|
||||
# YOLOv5 Segment head for segmentation models
|
||||
def __init__(self, nc=80, anchors=(), nm=32, npr=256, ch=(), imgsz=(640, 640), w=None):
|
||||
super().__init__(nc, anchors, ch, imgsz, w)
|
||||
self.nm = nm # number of masks
|
||||
self.npr = npr # number of protos
|
||||
self.no = 5 + nc + self.nm # number of outputs per anchor
|
||||
self.m = [TFConv2d(x, self.no * self.na, 1, w=w.m[i]) for i, x in enumerate(ch)] # output conv
|
||||
self.proto = TFProto(ch[0], self.npr, self.nm, w=w.proto) # protos
|
||||
self.detect = TFDetect.call
|
||||
|
||||
def call(self, x):
|
||||
p = self.proto(x[0])
|
||||
# p = TFUpsample(None, scale_factor=4, mode='nearest')(self.proto(x[0])) # (optional) full-size protos
|
||||
p = tf.transpose(p, [0, 3, 1, 2]) # from shape(1,160,160,32) to shape(1,32,160,160)
|
||||
x = self.detect(self, x)
|
||||
return (x, p) if self.training else (x[0], p)
|
||||
|
||||
|
||||
class TFProto(keras.layers.Layer):
|
||||
|
||||
def __init__(self, c1, c_=256, c2=32, w=None):
|
||||
super().__init__()
|
||||
self.cv1 = TFConv(c1, c_, k=3, w=w.cv1)
|
||||
self.upsample = TFUpsample(None, scale_factor=2, mode='nearest')
|
||||
self.cv2 = TFConv(c_, c_, k=3, w=w.cv2)
|
||||
self.cv3 = TFConv(c_, c2, w=w.cv3)
|
||||
|
||||
def call(self, inputs):
|
||||
return self.cv3(self.cv2(self.upsample(self.cv1(inputs))))
|
||||
|
||||
|
||||
class TFUpsample(keras.layers.Layer):
|
||||
# TF version of torch.nn.Upsample()
|
||||
def __init__(self, size, scale_factor, mode, w=None): # warning: all arguments needed including 'w'
|
||||
super().__init__()
|
||||
assert scale_factor % 2 == 0, 'scale_factor must be multiple of 2'
|
||||
self.upsample = lambda x: tf.image.resize(x, (x.shape[1] * scale_factor, x.shape[2] * scale_factor), mode)
|
||||
# self.upsample = keras.layers.UpSampling2D(size=scale_factor, interpolation=mode)
|
||||
# with default arguments: align_corners=False, half_pixel_centers=False
|
||||
# self.upsample = lambda x: tf.raw_ops.ResizeNearestNeighbor(images=x,
|
||||
# size=(x.shape[1] * 2, x.shape[2] * 2))
|
||||
|
||||
def call(self, inputs):
|
||||
return self.upsample(inputs)
|
||||
|
||||
|
||||
class TFConcat(keras.layers.Layer):
|
||||
# TF version of torch.concat()
|
||||
def __init__(self, dimension=1, w=None):
|
||||
super().__init__()
|
||||
assert dimension == 1, 'convert only NCHW to NHWC concat'
|
||||
self.d = 3
|
||||
|
||||
def call(self, inputs):
|
||||
return tf.concat(inputs, self.d)
|
||||
|
||||
|
||||
def parse_model(d, ch, model, imgsz): # model_dict, input_channels(3)
|
||||
LOGGER.info(f"\n{'':>3}{'from':>18}{'n':>3}{'params':>10} {'module':<40}{'arguments':<30}")
|
||||
anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple']
|
||||
na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors
|
||||
no = na * (nc + 5) # number of outputs = anchors * (classes + 5)
|
||||
|
||||
layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out
|
||||
for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args
|
||||
m_str = m
|
||||
m = eval(m) if isinstance(m, str) else m # eval strings
|
||||
for j, a in enumerate(args):
|
||||
try:
|
||||
args[j] = eval(a) if isinstance(a, str) else a # eval strings
|
||||
except NameError:
|
||||
pass
|
||||
|
||||
n = max(round(n * gd), 1) if n > 1 else n # depth gain
|
||||
if m in [
|
||||
nn.Conv2d, Conv, DWConv, DWConvTranspose2d, Bottleneck, SPP, SPPF, MixConv2d, Focus, CrossConv,
|
||||
BottleneckCSP, C3, C3x]:
|
||||
c1, c2 = ch[f], args[0]
|
||||
c2 = make_divisible(c2 * gw, 8) if c2 != no else c2
|
||||
|
||||
args = [c1, c2, *args[1:]]
|
||||
if m in [BottleneckCSP, C3, C3x]:
|
||||
args.insert(2, n)
|
||||
n = 1
|
||||
elif m is nn.BatchNorm2d:
|
||||
args = [ch[f]]
|
||||
elif m is Concat:
|
||||
c2 = sum(ch[-1 if x == -1 else x + 1] for x in f)
|
||||
elif m in [Detect, Segment]:
|
||||
args.append([ch[x + 1] for x in f])
|
||||
if isinstance(args[1], int): # number of anchors
|
||||
args[1] = [list(range(args[1] * 2))] * len(f)
|
||||
if m is Segment:
|
||||
args[3] = make_divisible(args[3] * gw, 8)
|
||||
args.append(imgsz)
|
||||
else:
|
||||
c2 = ch[f]
|
||||
|
||||
tf_m = eval('TF' + m_str.replace('nn.', ''))
|
||||
m_ = keras.Sequential([tf_m(*args, w=model.model[i][j]) for j in range(n)]) if n > 1 \
|
||||
else tf_m(*args, w=model.model[i]) # module
|
||||
|
||||
torch_m_ = nn.Sequential(*(m(*args) for _ in range(n))) if n > 1 else m(*args) # module
|
||||
t = str(m)[8:-2].replace('__main__.', '') # module type
|
||||
np = sum(x.numel() for x in torch_m_.parameters()) # number params
|
||||
m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params
|
||||
LOGGER.info(f'{i:>3}{str(f):>18}{str(n):>3}{np:>10} {t:<40}{str(args):<30}') # print
|
||||
save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist
|
||||
layers.append(m_)
|
||||
ch.append(c2)
|
||||
return keras.Sequential(layers), sorted(save)
|
||||
|
||||
|
||||
class TFModel:
|
||||
# TF YOLOv5 model
|
||||
def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, model=None, imgsz=(640, 640)): # model, channels, classes
|
||||
super().__init__()
|
||||
if isinstance(cfg, dict):
|
||||
self.yaml = cfg # model dict
|
||||
else: # is *.yaml
|
||||
import yaml # for torch hub
|
||||
self.yaml_file = Path(cfg).name
|
||||
with open(cfg) as f:
|
||||
self.yaml = yaml.load(f, Loader=yaml.FullLoader) # model dict
|
||||
|
||||
# Define model
|
||||
if nc and nc != self.yaml['nc']:
|
||||
LOGGER.info(f"Overriding {cfg} nc={self.yaml['nc']} with nc={nc}")
|
||||
self.yaml['nc'] = nc # override yaml value
|
||||
self.model, self.savelist = parse_model(deepcopy(self.yaml), ch=[ch], model=model, imgsz=imgsz)
|
||||
|
||||
def predict(self,
|
||||
inputs,
|
||||
tf_nms=False,
|
||||
agnostic_nms=False,
|
||||
topk_per_class=100,
|
||||
topk_all=100,
|
||||
iou_thres=0.45,
|
||||
conf_thres=0.25):
|
||||
y = [] # outputs
|
||||
x = inputs
|
||||
for m in self.model.layers:
|
||||
if m.f != -1: # if not from previous layer
|
||||
x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers
|
||||
|
||||
x = m(x) # run
|
||||
y.append(x if m.i in self.savelist else None) # save output
|
||||
|
||||
# Add TensorFlow NMS
|
||||
if tf_nms:
|
||||
boxes = self._xywh2xyxy(x[0][..., :4])
|
||||
probs = x[0][:, :, 4:5]
|
||||
classes = x[0][:, :, 5:]
|
||||
scores = probs * classes
|
||||
if agnostic_nms:
|
||||
nms = AgnosticNMS()((boxes, classes, scores), topk_all, iou_thres, conf_thres)
|
||||
else:
|
||||
boxes = tf.expand_dims(boxes, 2)
|
||||
nms = tf.image.combined_non_max_suppression(boxes,
|
||||
scores,
|
||||
topk_per_class,
|
||||
topk_all,
|
||||
iou_thres,
|
||||
conf_thres,
|
||||
clip_boxes=False)
|
||||
return (nms,)
|
||||
return x # output [1,6300,85] = [xywh, conf, class0, class1, ...]
|
||||
# x = x[0] # [x(1,6300,85), ...] to x(6300,85)
|
||||
# xywh = x[..., :4] # x(6300,4) boxes
|
||||
# conf = x[..., 4:5] # x(6300,1) confidences
|
||||
# cls = tf.reshape(tf.cast(tf.argmax(x[..., 5:], axis=1), tf.float32), (-1, 1)) # x(6300,1) classes
|
||||
# return tf.concat([conf, cls, xywh], 1)
|
||||
|
||||
@staticmethod
|
||||
def _xywh2xyxy(xywh):
|
||||
# Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
|
||||
x, y, w, h = tf.split(xywh, num_or_size_splits=4, axis=-1)
|
||||
return tf.concat([x - w / 2, y - h / 2, x + w / 2, y + h / 2], axis=-1)
|
||||
|
||||
|
||||
class AgnosticNMS(keras.layers.Layer):
|
||||
# TF Agnostic NMS
|
||||
def call(self, input, topk_all, iou_thres, conf_thres):
|
||||
# wrap map_fn to avoid TypeSpec related error https://stackoverflow.com/a/65809989/3036450
|
||||
return tf.map_fn(lambda x: self._nms(x, topk_all, iou_thres, conf_thres),
|
||||
input,
|
||||
fn_output_signature=(tf.float32, tf.float32, tf.float32, tf.int32),
|
||||
name='agnostic_nms')
|
||||
|
||||
@staticmethod
|
||||
def _nms(x, topk_all=100, iou_thres=0.45, conf_thres=0.25): # agnostic NMS
|
||||
boxes, classes, scores = x
|
||||
class_inds = tf.cast(tf.argmax(classes, axis=-1), tf.float32)
|
||||
scores_inp = tf.reduce_max(scores, -1)
|
||||
selected_inds = tf.image.non_max_suppression(boxes,
|
||||
scores_inp,
|
||||
max_output_size=topk_all,
|
||||
iou_threshold=iou_thres,
|
||||
score_threshold=conf_thres)
|
||||
selected_boxes = tf.gather(boxes, selected_inds)
|
||||
padded_boxes = tf.pad(selected_boxes,
|
||||
paddings=[[0, topk_all - tf.shape(selected_boxes)[0]], [0, 0]],
|
||||
mode='CONSTANT',
|
||||
constant_values=0.0)
|
||||
selected_scores = tf.gather(scores_inp, selected_inds)
|
||||
padded_scores = tf.pad(selected_scores,
|
||||
paddings=[[0, topk_all - tf.shape(selected_boxes)[0]]],
|
||||
mode='CONSTANT',
|
||||
constant_values=-1.0)
|
||||
selected_classes = tf.gather(class_inds, selected_inds)
|
||||
padded_classes = tf.pad(selected_classes,
|
||||
paddings=[[0, topk_all - tf.shape(selected_boxes)[0]]],
|
||||
mode='CONSTANT',
|
||||
constant_values=-1.0)
|
||||
valid_detections = tf.shape(selected_inds)[0]
|
||||
return padded_boxes, padded_scores, padded_classes, valid_detections
|
||||
|
||||
|
||||
def activations(act=nn.SiLU):
|
||||
# Returns TF activation from input PyTorch activation
|
||||
if isinstance(act, nn.LeakyReLU):
|
||||
return lambda x: keras.activations.relu(x, alpha=0.1)
|
||||
elif isinstance(act, nn.Hardswish):
|
||||
return lambda x: x * tf.nn.relu6(x + 3) * 0.166666667
|
||||
elif isinstance(act, (nn.SiLU, SiLU)):
|
||||
return lambda x: keras.activations.swish(x)
|
||||
else:
|
||||
raise Exception(f'no matching TensorFlow activation found for PyTorch activation {act}')
|
||||
|
||||
|
||||
def representative_dataset_gen(dataset, ncalib=100):
|
||||
# Representative dataset generator for use with converter.representative_dataset, returns a generator of np arrays
|
||||
for n, (path, img, im0s, vid_cap, string) in enumerate(dataset):
|
||||
im = np.transpose(img, [1, 2, 0])
|
||||
im = np.expand_dims(im, axis=0).astype(np.float32)
|
||||
im /= 255
|
||||
yield [im]
|
||||
if n >= ncalib:
|
||||
break
|
||||
|
||||
|
||||
def run(
|
||||
weights=ROOT / 'yolov5s.pt', # weights path
|
||||
imgsz=(640, 640), # inference size h,w
|
||||
batch_size=1, # batch size
|
||||
dynamic=False, # dynamic batch size
|
||||
):
|
||||
# PyTorch model
|
||||
im = torch.zeros((batch_size, 3, *imgsz)) # BCHW image
|
||||
model = attempt_load(weights, device=torch.device('cpu'), inplace=True, fuse=False)
|
||||
_ = model(im) # inference
|
||||
model.info()
|
||||
|
||||
# TensorFlow model
|
||||
im = tf.zeros((batch_size, *imgsz, 3)) # BHWC image
|
||||
tf_model = TFModel(cfg=model.yaml, model=model, nc=model.nc, imgsz=imgsz)
|
||||
_ = tf_model.predict(im) # inference
|
||||
|
||||
# Keras model
|
||||
im = keras.Input(shape=(*imgsz, 3), batch_size=None if dynamic else batch_size)
|
||||
keras_model = keras.Model(inputs=im, outputs=tf_model.predict(im))
|
||||
keras_model.summary()
|
||||
|
||||
LOGGER.info('PyTorch, TensorFlow and Keras models successfully verified.\nUse export.py for TF model export.')
|
||||
|
||||
|
||||
def parse_opt():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s.pt', help='weights path')
|
||||
parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w')
|
||||
parser.add_argument('--batch-size', type=int, default=1, help='batch size')
|
||||
parser.add_argument('--dynamic', action='store_true', help='dynamic batch size')
|
||||
opt = parser.parse_args()
|
||||
opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand
|
||||
print_args(vars(opt))
|
||||
return opt
|
||||
|
||||
|
||||
def main(opt):
|
||||
run(**vars(opt))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
opt = parse_opt()
|
||||
main(opt)
|
391
ayf_ocr/yolo_databases/api/models/yolo.py
Normal file
@ -0,0 +1,391 @@
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
"""
|
||||
YOLO-specific modules
|
||||
|
||||
Usage:
|
||||
$ python models/yolo.py --cfg yolov5s.yaml
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import contextlib
|
||||
import os
|
||||
import platform
|
||||
import sys
|
||||
from copy import deepcopy
|
||||
from pathlib import Path
|
||||
|
||||
FILE = Path(__file__).resolve()
|
||||
ROOT = FILE.parents[1] # YOLOv5 root directory
|
||||
if str(ROOT) not in sys.path:
|
||||
sys.path.append(str(ROOT)) # add ROOT to PATH
|
||||
if platform.system() != 'Windows':
|
||||
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
|
||||
|
||||
from models.common import *
|
||||
from models.experimental import *
|
||||
from utils.autoanchor import check_anchor_order
|
||||
from utils.general import LOGGER, check_version, check_yaml, make_divisible, print_args
|
||||
from utils.plots import feature_visualization
|
||||
from utils.torch_utils import (fuse_conv_and_bn, initialize_weights, model_info, profile, scale_img, select_device,
|
||||
time_sync)
|
||||
|
||||
try:
|
||||
import thop # for FLOPs computation
|
||||
except ImportError:
|
||||
thop = None
|
||||
|
||||
|
||||
class Detect(nn.Module):
|
||||
# YOLOv5 Detect head for detection models
|
||||
stride = None # strides computed during build
|
||||
dynamic = False # force grid reconstruction
|
||||
export = False # export mode
|
||||
|
||||
def __init__(self, nc=80, anchors=(), ch=(), inplace=True): # detection layer
|
||||
super().__init__()
|
||||
self.nc = nc # number of classes
|
||||
self.no = nc + 5 # number of outputs per anchor
|
||||
self.nl = len(anchors) # number of detection layers
|
||||
self.na = len(anchors[0]) // 2 # number of anchors
|
||||
self.grid = [torch.empty(0) for _ in range(self.nl)] # init grid
|
||||
self.anchor_grid = [torch.empty(0) for _ in range(self.nl)] # init anchor grid
|
||||
self.register_buffer('anchors', torch.tensor(anchors).float().view(self.nl, -1, 2)) # shape(nl,na,2)
|
||||
self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv
|
||||
self.inplace = inplace # use inplace ops (e.g. slice assignment)
|
||||
|
||||
def forward(self, x):
|
||||
z = [] # inference output
|
||||
for i in range(self.nl):
|
||||
x[i] = self.m[i](x[i]) # conv
|
||||
bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85)
|
||||
x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous()
|
||||
|
||||
if not self.training: # inference
|
||||
if self.dynamic or self.grid[i].shape[2:4] != x[i].shape[2:4]:
|
||||
self.grid[i], self.anchor_grid[i] = self._make_grid(nx, ny, i)
|
||||
|
||||
if isinstance(self, Segment): # (boxes + masks)
|
||||
xy, wh, conf, mask = x[i].split((2, 2, self.nc + 1, self.no - self.nc - 5), 4)
|
||||
xy = (xy.sigmoid() * 2 + self.grid[i]) * self.stride[i] # xy
|
||||
wh = (wh.sigmoid() * 2) ** 2 * self.anchor_grid[i] # wh
|
||||
y = torch.cat((xy, wh, conf.sigmoid(), mask), 4)
|
||||
else: # Detect (boxes only)
|
||||
xy, wh, conf = x[i].sigmoid().split((2, 2, self.nc + 1), 4)
|
||||
xy = (xy * 2 + self.grid[i]) * self.stride[i] # xy
|
||||
wh = (wh * 2) ** 2 * self.anchor_grid[i] # wh
|
||||
y = torch.cat((xy, wh, conf), 4)
|
||||
z.append(y.view(bs, self.na * nx * ny, self.no))
|
||||
|
||||
return x if self.training else (torch.cat(z, 1),) if self.export else (torch.cat(z, 1), x)
|
||||
|
||||
def _make_grid(self, nx=20, ny=20, i=0, torch_1_10=check_version(torch.__version__, '1.10.0')):
|
||||
d = self.anchors[i].device
|
||||
t = self.anchors[i].dtype
|
||||
shape = 1, self.na, ny, nx, 2 # grid shape
|
||||
y, x = torch.arange(ny, device=d, dtype=t), torch.arange(nx, device=d, dtype=t)
|
||||
yv, xv = torch.meshgrid(y, x, indexing='ij') if torch_1_10 else torch.meshgrid(y, x) # torch>=0.7 compatibility
|
||||
grid = torch.stack((xv, yv), 2).expand(shape) - 0.5 # add grid offset, i.e. y = 2.0 * x - 0.5
|
||||
anchor_grid = (self.anchors[i] * self.stride[i]).view((1, self.na, 1, 1, 2)).expand(shape)
|
||||
return grid, anchor_grid
|
||||
|
||||
|
||||
class Segment(Detect):
|
||||
# YOLOv5 Segment head for segmentation models
|
||||
def __init__(self, nc=80, anchors=(), nm=32, npr=256, ch=(), inplace=True):
|
||||
super().__init__(nc, anchors, ch, inplace)
|
||||
self.nm = nm # number of masks
|
||||
self.npr = npr # number of protos
|
||||
self.no = 5 + nc + self.nm # number of outputs per anchor
|
||||
self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv
|
||||
self.proto = Proto(ch[0], self.npr, self.nm) # protos
|
||||
self.detect = Detect.forward
|
||||
|
||||
def forward(self, x):
|
||||
p = self.proto(x[0])
|
||||
x = self.detect(self, x)
|
||||
return (x, p) if self.training else (x[0], p) if self.export else (x[0], p, x[1])
|
||||
|
||||
|
||||
class BaseModel(nn.Module):
|
||||
# YOLOv5 base model
|
||||
def forward(self, x, profile=False, visualize=False):
|
||||
return self._forward_once(x, profile, visualize) # single-scale inference, train
|
||||
|
||||
def _forward_once(self, x, profile=False, visualize=False):
|
||||
y, dt = [], [] # outputs
|
||||
for m in self.model:
|
||||
if m.f != -1: # if not from previous layer
|
||||
x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers
|
||||
if profile:
|
||||
self._profile_one_layer(m, x, dt)
|
||||
x = m(x) # run
|
||||
y.append(x if m.i in self.save else None) # save output
|
||||
if visualize:
|
||||
feature_visualization(x, m.type, m.i, save_dir=visualize)
|
||||
return x
|
||||
|
||||
def _profile_one_layer(self, m, x, dt):
|
||||
c = m == self.model[-1] # is final layer, copy input as inplace fix
|
||||
o = thop.profile(m, inputs=(x.copy() if c else x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPs
|
||||
t = time_sync()
|
||||
for _ in range(10):
|
||||
m(x.copy() if c else x)
|
||||
dt.append((time_sync() - t) * 100)
|
||||
if m == self.model[0]:
|
||||
LOGGER.info(f"{'time (ms)':>10s} {'GFLOPs':>10s} {'params':>10s} module")
|
||||
LOGGER.info(f'{dt[-1]:10.2f} {o:10.2f} {m.np:10.0f} {m.type}')
|
||||
if c:
|
||||
LOGGER.info(f"{sum(dt):10.2f} {'-':>10s} {'-':>10s} Total")
|
||||
|
||||
def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers
|
||||
LOGGER.info('Fusing layers... ')
|
||||
for m in self.model.modules():
|
||||
if isinstance(m, (Conv, DWConv)) and hasattr(m, 'bn'):
|
||||
m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv
|
||||
delattr(m, 'bn') # remove batchnorm
|
||||
m.forward = m.forward_fuse # update forward
|
||||
self.info()
|
||||
return self
|
||||
|
||||
def info(self, verbose=False, img_size=640): # print model information
|
||||
model_info(self, verbose, img_size)
|
||||
|
||||
def _apply(self, fn):
|
||||
# Apply to(), cpu(), cuda(), half() to model tensors that are not parameters or registered buffers
|
||||
self = super()._apply(fn)
|
||||
m = self.model[-1] # Detect()
|
||||
if isinstance(m, (Detect, Segment)):
|
||||
m.stride = fn(m.stride)
|
||||
m.grid = list(map(fn, m.grid))
|
||||
if isinstance(m.anchor_grid, list):
|
||||
m.anchor_grid = list(map(fn, m.anchor_grid))
|
||||
return self
|
||||
|
||||
|
||||
class DetectionModel(BaseModel):
|
||||
# YOLOv5 detection model
|
||||
def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, input channels, number of classes
|
||||
super().__init__()
|
||||
if isinstance(cfg, dict):
|
||||
self.yaml = cfg # model dict
|
||||
else: # is *.yaml
|
||||
import yaml # for torch hub
|
||||
self.yaml_file = Path(cfg).name
|
||||
with open(cfg, encoding='ascii', errors='ignore') as f:
|
||||
self.yaml = yaml.safe_load(f) # model dict
|
||||
|
||||
# Define model
|
||||
ch = self.yaml['ch'] = self.yaml.get('ch', ch) # input channels
|
||||
if nc and nc != self.yaml['nc']:
|
||||
LOGGER.info(f"Overriding model.yaml nc={self.yaml['nc']} with nc={nc}")
|
||||
self.yaml['nc'] = nc # override yaml value
|
||||
if anchors:
|
||||
LOGGER.info(f'Overriding model.yaml anchors with anchors={anchors}')
|
||||
self.yaml['anchors'] = round(anchors) # override yaml value
|
||||
self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist
|
||||
self.names = [str(i) for i in range(self.yaml['nc'])] # default names
|
||||
self.inplace = self.yaml.get('inplace', True)
|
||||
|
||||
# Build strides, anchors
|
||||
m = self.model[-1] # Detect()
|
||||
if isinstance(m, (Detect, Segment)):
|
||||
s = 256 # 2x min stride
|
||||
m.inplace = self.inplace
|
||||
forward = lambda x: self.forward(x)[0] if isinstance(m, Segment) else self.forward(x)
|
||||
m.stride = torch.tensor([s / x.shape[-2] for x in forward(torch.zeros(1, ch, s, s))]) # forward
|
||||
check_anchor_order(m)
|
||||
m.anchors /= m.stride.view(-1, 1, 1)
|
||||
self.stride = m.stride
|
||||
self._initialize_biases() # only run once
|
||||
|
||||
# Init weights, biases
|
||||
initialize_weights(self)
|
||||
self.info()
|
||||
LOGGER.info('')
|
||||
|
||||
def forward(self, x, augment=False, profile=False, visualize=False):
|
||||
if augment:
|
||||
return self._forward_augment(x) # augmented inference, None
|
||||
return self._forward_once(x, profile, visualize) # single-scale inference, train
|
||||
|
||||
def _forward_augment(self, x):
|
||||
img_size = x.shape[-2:] # height, width
|
||||
s = [1, 0.83, 0.67] # scales
|
||||
f = [None, 3, None] # flips (2-ud, 3-lr)
|
||||
y = [] # outputs
|
||||
for si, fi in zip(s, f):
|
||||
xi = scale_img(x.flip(fi) if fi else x, si, gs=int(self.stride.max()))
|
||||
yi = self._forward_once(xi)[0] # forward
|
||||
# cv2.imwrite(f'img_{si}.jpg', 255 * xi[0].cpu().numpy().transpose((1, 2, 0))[:, :, ::-1]) # save
|
||||
yi = self._descale_pred(yi, fi, si, img_size)
|
||||
y.append(yi)
|
||||
y = self._clip_augmented(y) # clip augmented tails
|
||||
return torch.cat(y, 1), None # augmented inference, train
|
||||
|
||||
def _descale_pred(self, p, flips, scale, img_size):
|
||||
# de-scale predictions following augmented inference (inverse operation)
|
||||
if self.inplace:
|
||||
p[..., :4] /= scale # de-scale
|
||||
if flips == 2:
|
||||
p[..., 1] = img_size[0] - p[..., 1] # de-flip ud
|
||||
elif flips == 3:
|
||||
p[..., 0] = img_size[1] - p[..., 0] # de-flip lr
|
||||
else:
|
||||
x, y, wh = p[..., 0:1] / scale, p[..., 1:2] / scale, p[..., 2:4] / scale # de-scale
|
||||
if flips == 2:
|
||||
y = img_size[0] - y # de-flip ud
|
||||
elif flips == 3:
|
||||
x = img_size[1] - x # de-flip lr
|
||||
p = torch.cat((x, y, wh, p[..., 4:]), -1)
|
||||
return p
|
||||
|
||||
def _clip_augmented(self, y):
|
||||
# Clip YOLOv5 augmented inference tails
|
||||
nl = self.model[-1].nl # number of detection layers (P3-P5)
|
||||
g = sum(4 ** x for x in range(nl)) # grid points
|
||||
e = 1 # exclude layer count
|
||||
i = (y[0].shape[1] // g) * sum(4 ** x for x in range(e)) # indices
|
||||
y[0] = y[0][:, :-i] # large
|
||||
i = (y[-1].shape[1] // g) * sum(4 ** (nl - 1 - x) for x in range(e)) # indices
|
||||
y[-1] = y[-1][:, i:] # small
|
||||
return y
|
||||
|
||||
def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency
|
||||
# https://arxiv.org/abs/1708.02002 section 3.3
|
||||
# cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1.
|
||||
m = self.model[-1] # Detect() module
|
||||
for mi, s in zip(m.m, m.stride): # from
|
||||
b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85)
|
||||
b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image)
|
||||
b.data[:, 5:5 + m.nc] += math.log(0.6 / (m.nc - 0.99999)) if cf is None else torch.log(cf / cf.sum()) # cls
|
||||
mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
|
||||
|
||||
|
||||
Model = DetectionModel # retain YOLOv5 'Model' class for backwards compatibility
|
||||
|
||||
|
||||
class SegmentationModel(DetectionModel):
|
||||
# YOLOv5 segmentation model
|
||||
def __init__(self, cfg='yolov5s-seg.yaml', ch=3, nc=None, anchors=None):
|
||||
super().__init__(cfg, ch, nc, anchors)
|
||||
|
||||
|
||||
class ClassificationModel(BaseModel):
|
||||
# YOLOv5 classification model
|
||||
def __init__(self, cfg=None, model=None, nc=1000, cutoff=10): # yaml, model, number of classes, cutoff index
|
||||
super().__init__()
|
||||
self._from_detection_model(model, nc, cutoff) if model is not None else self._from_yaml(cfg)
|
||||
|
||||
def _from_detection_model(self, model, nc=1000, cutoff=10):
|
||||
# Create a YOLOv5 classification model from a YOLOv5 detection model
|
||||
if isinstance(model, DetectMultiBackend):
|
||||
model = model.model # unwrap DetectMultiBackend
|
||||
model.model = model.model[:cutoff] # backbone
|
||||
m = model.model[-1] # last layer
|
||||
ch = m.conv.in_channels if hasattr(m, 'conv') else m.cv1.conv.in_channels # ch into module
|
||||
c = Classify(ch, nc) # Classify()
|
||||
c.i, c.f, c.type = m.i, m.f, 'models.common.Classify' # index, from, type
|
||||
model.model[-1] = c # replace
|
||||
self.model = model.model
|
||||
self.stride = model.stride
|
||||
self.save = []
|
||||
self.nc = nc
|
||||
|
||||
def _from_yaml(self, cfg):
|
||||
# Create a YOLOv5 classification model from a *.yaml file
|
||||
self.model = None
|
||||
|
||||
|
||||
def parse_model(d, ch): # model_dict, input_channels(3)
|
||||
# Parse a YOLOv5 model.yaml dictionary
|
||||
LOGGER.info(f"\n{'':>3}{'from':>18}{'n':>3}{'params':>10} {'module':<40}{'arguments':<30}")
|
||||
anchors, nc, gd, gw, act = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple'], d.get('activation')
|
||||
if act:
|
||||
Conv.default_act = eval(act) # redefine default activation, i.e. Conv.default_act = nn.SiLU()
|
||||
LOGGER.info(f"{colorstr('activation:')} {act}") # print
|
||||
na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors
|
||||
no = na * (nc + 5) # number of outputs = anchors * (classes + 5)
|
||||
|
||||
layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out
|
||||
for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args
|
||||
m = eval(m) if isinstance(m, str) else m # eval strings
|
||||
for j, a in enumerate(args):
|
||||
with contextlib.suppress(NameError):
|
||||
args[j] = eval(a) if isinstance(a, str) else a # eval strings
|
||||
|
||||
n = n_ = max(round(n * gd), 1) if n > 1 else n # depth gain
|
||||
if m in {
|
||||
Conv, GhostConv, Bottleneck, GhostBottleneck, SPP, SPPF, DWConv, MixConv2d, Focus, CrossConv,
|
||||
BottleneckCSP, C3, C3TR, C3SPP, C3Ghost, nn.ConvTranspose2d, DWConvTranspose2d, C3x}:
|
||||
c1, c2 = ch[f], args[0]
|
||||
if c2 != no: # if not output
|
||||
c2 = make_divisible(c2 * gw, 8)
|
||||
|
||||
args = [c1, c2, *args[1:]]
|
||||
if m in {BottleneckCSP, C3, C3TR, C3Ghost, C3x}:
|
||||
args.insert(2, n) # number of repeats
|
||||
n = 1
|
||||
elif m is nn.BatchNorm2d:
|
||||
args = [ch[f]]
|
||||
elif m is Concat:
|
||||
c2 = sum(ch[x] for x in f)
|
||||
# TODO: channel, gw, gd
|
||||
elif m in {Detect, Segment}:
|
||||
args.append([ch[x] for x in f])
|
||||
if isinstance(args[1], int): # number of anchors
|
||||
args[1] = [list(range(args[1] * 2))] * len(f)
|
||||
if m is Segment:
|
||||
args[3] = make_divisible(args[3] * gw, 8)
|
||||
elif m is Contract:
|
||||
c2 = ch[f] * args[0] ** 2
|
||||
elif m is Expand:
|
||||
c2 = ch[f] // args[0] ** 2
|
||||
else:
|
||||
c2 = ch[f]
|
||||
|
||||
m_ = nn.Sequential(*(m(*args) for _ in range(n))) if n > 1 else m(*args) # module
|
||||
t = str(m)[8:-2].replace('__main__.', '') # module type
|
||||
np = sum(x.numel() for x in m_.parameters()) # number params
|
||||
m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params
|
||||
LOGGER.info(f'{i:>3}{str(f):>18}{n_:>3}{np:10.0f} {t:<40}{str(args):<30}') # print
|
||||
save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist
|
||||
layers.append(m_)
|
||||
if i == 0:
|
||||
ch = []
|
||||
ch.append(c2)
|
||||
return nn.Sequential(*layers), sorted(save)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--cfg', type=str, default='yolov5s.yaml', help='model.yaml')
|
||||
parser.add_argument('--batch-size', type=int, default=1, help='total batch size for all GPUs')
|
||||
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
|
||||
parser.add_argument('--profile', action='store_true', help='profile model speed')
|
||||
parser.add_argument('--line-profile', action='store_true', help='profile model speed layer by layer')
|
||||
parser.add_argument('--test', action='store_true', help='test all yolo*.yaml')
|
||||
opt = parser.parse_args()
|
||||
opt.cfg = check_yaml(opt.cfg) # check YAML
|
||||
print_args(vars(opt))
|
||||
device = select_device(opt.device)
|
||||
|
||||
# Create model
|
||||
im = torch.rand(opt.batch_size, 3, 640, 640).to(device)
|
||||
model = Model(opt.cfg).to(device)
|
||||
|
||||
# Options
|
||||
if opt.line_profile: # profile layer by layer
|
||||
model(im, profile=True)
|
||||
|
||||
elif opt.profile: # profile forward-backward
|
||||
results = profile(input=im, ops=[model], n=3)
|
||||
|
||||
elif opt.test: # test all models
|
||||
for cfg in Path(ROOT / 'models').rglob('yolo*.yaml'):
|
||||
try:
|
||||
_ = Model(cfg)
|
||||
except Exception as e:
|
||||
print(f'Error in {cfg}: {e}')
|
||||
|
||||
else: # report fused model summary
|
||||
model.fuse()
|
48
ayf_ocr/yolo_databases/api/models/yolov5l.yaml
Normal file
@ -0,0 +1,48 @@
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
|
||||
# Parameters
|
||||
nc: 80 # number of classes
|
||||
depth_multiple: 1.0 # model depth multiple
|
||||
width_multiple: 1.0 # layer channel multiple
|
||||
anchors:
|
||||
- [10,13, 16,30, 33,23] # P3/8
|
||||
- [30,61, 62,45, 59,119] # P4/16
|
||||
- [116,90, 156,198, 373,326] # P5/32
|
||||
|
||||
# YOLOv5 v6.0 backbone
|
||||
backbone:
|
||||
# [from, number, module, args]
|
||||
[[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
|
||||
[-1, 1, Conv, [128, 3, 2]], # 1-P2/4
|
||||
[-1, 3, C3, [128]],
|
||||
[-1, 1, Conv, [256, 3, 2]], # 3-P3/8
|
||||
[-1, 6, C3, [256]],
|
||||
[-1, 1, Conv, [512, 3, 2]], # 5-P4/16
|
||||
[-1, 9, C3, [512]],
|
||||
[-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
|
||||
[-1, 3, C3, [1024]],
|
||||
[-1, 1, SPPF, [1024, 5]], # 9
|
||||
]
|
||||
|
||||
# YOLOv5 v6.0 head
|
||||
head:
|
||||
[[-1, 1, Conv, [512, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 6], 1, Concat, [1]], # cat backbone P4
|
||||
[-1, 3, C3, [512, False]], # 13
|
||||
|
||||
[-1, 1, Conv, [256, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 4], 1, Concat, [1]], # cat backbone P3
|
||||
[-1, 3, C3, [256, False]], # 17 (P3/8-small)
|
||||
|
||||
[-1, 1, Conv, [256, 3, 2]],
|
||||
[[-1, 14], 1, Concat, [1]], # cat head P4
|
||||
[-1, 3, C3, [512, False]], # 20 (P4/16-medium)
|
||||
|
||||
[-1, 1, Conv, [512, 3, 2]],
|
||||
[[-1, 10], 1, Concat, [1]], # cat head P5
|
||||
[-1, 3, C3, [1024, False]], # 23 (P5/32-large)
|
||||
|
||||
[[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
|
||||
]
|
48
ayf_ocr/yolo_databases/api/models/yolov5m.yaml
Normal file
@ -0,0 +1,48 @@
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
|
||||
# Parameters
|
||||
nc: 80 # number of classes
|
||||
depth_multiple: 0.67 # model depth multiple
|
||||
width_multiple: 0.75 # layer channel multiple
|
||||
anchors:
|
||||
- [10,13, 16,30, 33,23] # P3/8
|
||||
- [30,61, 62,45, 59,119] # P4/16
|
||||
- [116,90, 156,198, 373,326] # P5/32
|
||||
|
||||
# YOLOv5 v6.0 backbone
|
||||
backbone:
|
||||
# [from, number, module, args]
|
||||
[[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
|
||||
[-1, 1, Conv, [128, 3, 2]], # 1-P2/4
|
||||
[-1, 3, C3, [128]],
|
||||
[-1, 1, Conv, [256, 3, 2]], # 3-P3/8
|
||||
[-1, 6, C3, [256]],
|
||||
[-1, 1, Conv, [512, 3, 2]], # 5-P4/16
|
||||
[-1, 9, C3, [512]],
|
||||
[-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
|
||||
[-1, 3, C3, [1024]],
|
||||
[-1, 1, SPPF, [1024, 5]], # 9
|
||||
]
|
||||
|
||||
# YOLOv5 v6.0 head
|
||||
head:
|
||||
[[-1, 1, Conv, [512, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 6], 1, Concat, [1]], # cat backbone P4
|
||||
[-1, 3, C3, [512, False]], # 13
|
||||
|
||||
[-1, 1, Conv, [256, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 4], 1, Concat, [1]], # cat backbone P3
|
||||
[-1, 3, C3, [256, False]], # 17 (P3/8-small)
|
||||
|
||||
[-1, 1, Conv, [256, 3, 2]],
|
||||
[[-1, 14], 1, Concat, [1]], # cat head P4
|
||||
[-1, 3, C3, [512, False]], # 20 (P4/16-medium)
|
||||
|
||||
[-1, 1, Conv, [512, 3, 2]],
|
||||
[[-1, 10], 1, Concat, [1]], # cat head P5
|
||||
[-1, 3, C3, [1024, False]], # 23 (P5/32-large)
|
||||
|
||||
[[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
|
||||
]
|
48
ayf_ocr/yolo_databases/api/models/yolov5n.yaml
Normal file
@ -0,0 +1,48 @@
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
|
||||
# Parameters
|
||||
nc: 80 # number of classes
|
||||
depth_multiple: 0.33 # model depth multiple
|
||||
width_multiple: 0.25 # layer channel multiple
|
||||
anchors:
|
||||
- [10,13, 16,30, 33,23] # P3/8
|
||||
- [30,61, 62,45, 59,119] # P4/16
|
||||
- [116,90, 156,198, 373,326] # P5/32
|
||||
|
||||
# YOLOv5 v6.0 backbone
|
||||
backbone:
|
||||
# [from, number, module, args]
|
||||
[[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
|
||||
[-1, 1, Conv, [128, 3, 2]], # 1-P2/4
|
||||
[-1, 3, C3, [128]],
|
||||
[-1, 1, Conv, [256, 3, 2]], # 3-P3/8
|
||||
[-1, 6, C3, [256]],
|
||||
[-1, 1, Conv, [512, 3, 2]], # 5-P4/16
|
||||
[-1, 9, C3, [512]],
|
||||
[-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
|
||||
[-1, 3, C3, [1024]],
|
||||
[-1, 1, SPPF, [1024, 5]], # 9
|
||||
]
|
||||
|
||||
# YOLOv5 v6.0 head
|
||||
head:
|
||||
[[-1, 1, Conv, [512, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 6], 1, Concat, [1]], # cat backbone P4
|
||||
[-1, 3, C3, [512, False]], # 13
|
||||
|
||||
[-1, 1, Conv, [256, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 4], 1, Concat, [1]], # cat backbone P3
|
||||
[-1, 3, C3, [256, False]], # 17 (P3/8-small)
|
||||
|
||||
[-1, 1, Conv, [256, 3, 2]],
|
||||
[[-1, 14], 1, Concat, [1]], # cat head P4
|
||||
[-1, 3, C3, [512, False]], # 20 (P4/16-medium)
|
||||
|
||||
[-1, 1, Conv, [512, 3, 2]],
|
||||
[[-1, 10], 1, Concat, [1]], # cat head P5
|
||||
[-1, 3, C3, [1024, False]], # 23 (P5/32-large)
|
||||
|
||||
[[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
|
||||
]
|
48
ayf_ocr/yolo_databases/api/models/yolov5s.yaml
Normal file
@ -0,0 +1,48 @@
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
|
||||
# Parameters
|
||||
nc: 1 # number of classes
|
||||
depth_multiple: 0.33 # model depth multiple
|
||||
width_multiple: 0.50 # layer channel multiple
|
||||
anchors:
|
||||
- [10,13, 16,30, 33,23] # P3/8
|
||||
- [30,61, 62,45, 59,119] # P4/16
|
||||
- [116,90, 156,198, 373,326] # P5/32
|
||||
|
||||
# YOLOv5 v6.0 backbone
|
||||
backbone:
|
||||
# [from, number, module, args]
|
||||
[[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
|
||||
[-1, 1, Conv, [128, 3, 2]], # 1-P2/4
|
||||
[-1, 3, C3, [128]],
|
||||
[-1, 1, Conv, [256, 3, 2]], # 3-P3/8
|
||||
[-1, 6, C3, [256]],
|
||||
[-1, 1, Conv, [512, 3, 2]], # 5-P4/16
|
||||
[-1, 9, C3, [512]],
|
||||
[-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
|
||||
[-1, 3, C3, [1024]],
|
||||
[-1, 1, SPPF, [1024, 5]], # 9
|
||||
]
|
||||
|
||||
# YOLOv5 v6.0 head
|
||||
head:
|
||||
[[-1, 1, Conv, [512, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 6], 1, Concat, [1]], # cat backbone P4
|
||||
[-1, 3, C3, [512, False]], # 13
|
||||
|
||||
[-1, 1, Conv, [256, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 4], 1, Concat, [1]], # cat backbone P3
|
||||
[-1, 3, C3, [256, False]], # 17 (P3/8-small)
|
||||
|
||||
[-1, 1, Conv, [256, 3, 2]],
|
||||
[[-1, 14], 1, Concat, [1]], # cat head P4
|
||||
[-1, 3, C3, [512, False]], # 20 (P4/16-medium)
|
||||
|
||||
[-1, 1, Conv, [512, 3, 2]],
|
||||
[[-1, 10], 1, Concat, [1]], # cat head P5
|
||||
[-1, 3, C3, [1024, False]], # 23 (P5/32-large)
|
||||
|
||||
[[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
|
||||
]
|
48
ayf_ocr/yolo_databases/api/models/yolov5x.yaml
Normal file
@ -0,0 +1,48 @@
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
|
||||
# Parameters
|
||||
nc: 80 # number of classes
|
||||
depth_multiple: 1.33 # model depth multiple
|
||||
width_multiple: 1.25 # layer channel multiple
|
||||
anchors:
|
||||
- [10,13, 16,30, 33,23] # P3/8
|
||||
- [30,61, 62,45, 59,119] # P4/16
|
||||
- [116,90, 156,198, 373,326] # P5/32
|
||||
|
||||
# YOLOv5 v6.0 backbone
|
||||
backbone:
|
||||
# [from, number, module, args]
|
||||
[[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
|
||||
[-1, 1, Conv, [128, 3, 2]], # 1-P2/4
|
||||
[-1, 3, C3, [128]],
|
||||
[-1, 1, Conv, [256, 3, 2]], # 3-P3/8
|
||||
[-1, 6, C3, [256]],
|
||||
[-1, 1, Conv, [512, 3, 2]], # 5-P4/16
|
||||
[-1, 9, C3, [512]],
|
||||
[-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
|
||||
[-1, 3, C3, [1024]],
|
||||
[-1, 1, SPPF, [1024, 5]], # 9
|
||||
]
|
||||
|
||||
# YOLOv5 v6.0 head
|
||||
head:
|
||||
[[-1, 1, Conv, [512, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 6], 1, Concat, [1]], # cat backbone P4
|
||||
[-1, 3, C3, [512, False]], # 13
|
||||
|
||||
[-1, 1, Conv, [256, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 4], 1, Concat, [1]], # cat backbone P3
|
||||
[-1, 3, C3, [256, False]], # 17 (P3/8-small)
|
||||
|
||||
[-1, 1, Conv, [256, 3, 2]],
|
||||
[[-1, 14], 1, Concat, [1]], # cat head P4
|
||||
[-1, 3, C3, [512, False]], # 20 (P4/16-medium)
|
||||
|
||||
[-1, 1, Conv, [512, 3, 2]],
|
||||
[[-1, 10], 1, Concat, [1]], # cat head P5
|
||||
[-1, 3, C3, [1024, False]], # 23 (P5/32-large)
|
||||
|
||||
[[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
|
||||
]
|
284
ayf_ocr/yolo_databases/api/segment/predict.py
Normal file
@ -0,0 +1,284 @@
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
"""
|
||||
Run YOLOv5 segmentation inference on images, videos, directories, streams, etc.
|
||||
|
||||
Usage - sources:
|
||||
$ python segment/predict.py --weights yolov5s-seg.pt --source 0 # webcam
|
||||
img.jpg # image
|
||||
vid.mp4 # video
|
||||
screen # screenshot
|
||||
path/ # directory
|
||||
list.txt # list of images
|
||||
list.streams # list of streams
|
||||
'path/*.jpg' # glob
|
||||
'https://youtu.be/Zgi9g1ksQHc' # YouTube
|
||||
'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream
|
||||
|
||||
Usage - formats:
|
||||
$ python segment/predict.py --weights yolov5s-seg.pt # PyTorch
|
||||
yolov5s-seg.torchscript # TorchScript
|
||||
yolov5s-seg.onnx # ONNX Runtime or OpenCV DNN with --dnn
|
||||
yolov5s-seg_openvino_model # OpenVINO
|
||||
yolov5s-seg.engine # TensorRT
|
||||
yolov5s-seg.mlmodel # CoreML (macOS-only)
|
||||
yolov5s-seg_saved_model # TensorFlow SavedModel
|
||||
yolov5s-seg.pb # TensorFlow GraphDef
|
||||
yolov5s-seg.tflite # TensorFlow Lite
|
||||
yolov5s-seg_edgetpu.tflite # TensorFlow Edge TPU
|
||||
yolov5s-seg_paddle_model # PaddlePaddle
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import os
|
||||
import platform
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
import torch
|
||||
|
||||
FILE = Path(__file__).resolve()
|
||||
ROOT = FILE.parents[1] # YOLOv5 root directory
|
||||
if str(ROOT) not in sys.path:
|
||||
sys.path.append(str(ROOT)) # add ROOT to PATH
|
||||
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
|
||||
|
||||
from models.common import DetectMultiBackend
|
||||
from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams
|
||||
from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2,
|
||||
increment_path, non_max_suppression, print_args, scale_boxes, scale_segments,
|
||||
strip_optimizer)
|
||||
from utils.plots import Annotator, colors, save_one_box
|
||||
from utils.segment.general import masks2segments, process_mask, process_mask_native
|
||||
from utils.torch_utils import select_device, smart_inference_mode
|
||||
|
||||
|
||||
@smart_inference_mode()
|
||||
def run(
|
||||
weights=ROOT / 'yolov5s-seg.pt', # model.pt path(s)
|
||||
source=ROOT / 'data/images', # file/dir/URL/glob/screen/0(webcam)
|
||||
data=ROOT / 'data/coco128.yaml', # dataset.yaml path
|
||||
imgsz=(640, 640), # inference size (height, width)
|
||||
conf_thres=0.25, # confidence threshold
|
||||
iou_thres=0.45, # NMS IOU threshold
|
||||
max_det=1000, # maximum detections per image
|
||||
device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
|
||||
view_img=False, # show results
|
||||
save_txt=False, # save results to *.txt
|
||||
save_conf=False, # save confidences in --save-txt labels
|
||||
save_crop=False, # save cropped prediction boxes
|
||||
nosave=False, # do not save images/videos
|
||||
classes=None, # filter by class: --class 0, or --class 0 2 3
|
||||
agnostic_nms=False, # class-agnostic NMS
|
||||
augment=False, # augmented inference
|
||||
visualize=False, # visualize features
|
||||
update=False, # update all models
|
||||
project=ROOT / 'runs/predict-seg', # save results to project/name
|
||||
name='exp', # save results to project/name
|
||||
exist_ok=False, # existing project/name ok, do not increment
|
||||
line_thickness=3, # bounding box thickness (pixels)
|
||||
hide_labels=False, # hide labels
|
||||
hide_conf=False, # hide confidences
|
||||
half=False, # use FP16 half-precision inference
|
||||
dnn=False, # use OpenCV DNN for ONNX inference
|
||||
vid_stride=1, # video frame-rate stride
|
||||
retina_masks=False,
|
||||
):
|
||||
source = str(source)
|
||||
save_img = not nosave and not source.endswith('.txt') # save inference images
|
||||
is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS)
|
||||
is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://'))
|
||||
webcam = source.isnumeric() or source.endswith('.streams') or (is_url and not is_file)
|
||||
screenshot = source.lower().startswith('screen')
|
||||
if is_url and is_file:
|
||||
source = check_file(source) # download
|
||||
|
||||
# Directories
|
||||
save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run
|
||||
(save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
|
||||
|
||||
# Load model
|
||||
device = select_device(device)
|
||||
model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half)
|
||||
stride, names, pt = model.stride, model.names, model.pt
|
||||
imgsz = check_img_size(imgsz, s=stride) # check image size
|
||||
|
||||
# Dataloader
|
||||
bs = 1 # batch_size
|
||||
if webcam:
|
||||
view_img = check_imshow(warn=True)
|
||||
dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride)
|
||||
bs = len(dataset)
|
||||
elif screenshot:
|
||||
dataset = LoadScreenshots(source, img_size=imgsz, stride=stride, auto=pt)
|
||||
else:
|
||||
dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride)
|
||||
vid_path, vid_writer = [None] * bs, [None] * bs
|
||||
|
||||
# Run inference
|
||||
model.warmup(imgsz=(1 if pt else bs, 3, *imgsz)) # warmup
|
||||
seen, windows, dt = 0, [], (Profile(), Profile(), Profile())
|
||||
for path, im, im0s, vid_cap, s in dataset:
|
||||
with dt[0]:
|
||||
im = torch.from_numpy(im).to(model.device)
|
||||
im = im.half() if model.fp16 else im.float() # uint8 to fp16/32
|
||||
im /= 255 # 0 - 255 to 0.0 - 1.0
|
||||
if len(im.shape) == 3:
|
||||
im = im[None] # expand for batch dim
|
||||
|
||||
# Inference
|
||||
with dt[1]:
|
||||
visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False
|
||||
pred, proto = model(im, augment=augment, visualize=visualize)[:2]
|
||||
|
||||
# NMS
|
||||
with dt[2]:
|
||||
pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det, nm=32)
|
||||
|
||||
# Second-stage classifier (optional)
|
||||
# pred = utils.general.apply_classifier(pred, classifier_model, im, im0s)
|
||||
|
||||
# Process predictions
|
||||
for i, det in enumerate(pred): # per image
|
||||
seen += 1
|
||||
if webcam: # batch_size >= 1
|
||||
p, im0, frame = path[i], im0s[i].copy(), dataset.count
|
||||
s += f'{i}: '
|
||||
else:
|
||||
p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0)
|
||||
|
||||
p = Path(p) # to Path
|
||||
save_path = str(save_dir / p.name) # im.jpg
|
||||
txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt
|
||||
s += '%gx%g ' % im.shape[2:] # print string
|
||||
imc = im0.copy() if save_crop else im0 # for save_crop
|
||||
annotator = Annotator(im0, line_width=line_thickness, example=str(names))
|
||||
if len(det):
|
||||
if retina_masks:
|
||||
# scale bbox first the crop masks
|
||||
det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round() # rescale boxes to im0 size
|
||||
masks = process_mask_native(proto[i], det[:, 6:], det[:, :4], im0.shape[:2]) # HWC
|
||||
else:
|
||||
masks = process_mask(proto[i], det[:, 6:], det[:, :4], im.shape[2:], upsample=True) # HWC
|
||||
det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round() # rescale boxes to im0 size
|
||||
|
||||
# Segments
|
||||
if save_txt:
|
||||
segments = [
|
||||
scale_segments(im0.shape if retina_masks else im.shape[2:], x, im0.shape, normalize=True)
|
||||
for x in reversed(masks2segments(masks))]
|
||||
|
||||
# Print results
|
||||
for c in det[:, 5].unique():
|
||||
n = (det[:, 5] == c).sum() # detections per class
|
||||
s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string
|
||||
|
||||
# Mask plotting
|
||||
annotator.masks(
|
||||
masks,
|
||||
colors=[colors(x, True) for x in det[:, 5]],
|
||||
im_gpu=torch.as_tensor(im0, dtype=torch.float16).to(device).permute(2, 0, 1).flip(0).contiguous() /
|
||||
255 if retina_masks else im[i])
|
||||
|
||||
# Write results
|
||||
for j, (*xyxy, conf, cls) in enumerate(reversed(det[:, :6])):
|
||||
if save_txt: # Write to file
|
||||
seg = segments[j].reshape(-1) # (n,2) to (n*2)
|
||||
line = (cls, *seg, conf) if save_conf else (cls, *seg) # label format
|
||||
with open(f'{txt_path}.txt', 'a') as f:
|
||||
f.write(('%g ' * len(line)).rstrip() % line + '\n')
|
||||
|
||||
if save_img or save_crop or view_img: # Add bbox to image
|
||||
c = int(cls) # integer class
|
||||
label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}')
|
||||
annotator.box_label(xyxy, label, color=colors(c, True))
|
||||
# annotator.draw.polygon(segments[j], outline=colors(c, True), width=3)
|
||||
if save_crop:
|
||||
save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True)
|
||||
|
||||
# Stream results
|
||||
im0 = annotator.result()
|
||||
if view_img:
|
||||
if platform.system() == 'Linux' and p not in windows:
|
||||
windows.append(p)
|
||||
cv2.namedWindow(str(p), cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO) # allow window resize (Linux)
|
||||
cv2.resizeWindow(str(p), im0.shape[1], im0.shape[0])
|
||||
cv2.imshow(str(p), im0)
|
||||
if cv2.waitKey(1) == ord('q'): # 1 millisecond
|
||||
exit()
|
||||
|
||||
# Save results (image with detections)
|
||||
if save_img:
|
||||
if dataset.mode == 'image':
|
||||
cv2.imwrite(save_path, im0)
|
||||
else: # 'video' or 'stream'
|
||||
if vid_path[i] != save_path: # new video
|
||||
vid_path[i] = save_path
|
||||
if isinstance(vid_writer[i], cv2.VideoWriter):
|
||||
vid_writer[i].release() # release previous video writer
|
||||
if vid_cap: # video
|
||||
fps = vid_cap.get(cv2.CAP_PROP_FPS)
|
||||
w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
||||
h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
||||
else: # stream
|
||||
fps, w, h = 30, im0.shape[1], im0.shape[0]
|
||||
save_path = str(Path(save_path).with_suffix('.mp4')) # force *.mp4 suffix on results videos
|
||||
vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
|
||||
vid_writer[i].write(im0)
|
||||
|
||||
# Print time (inference-only)
|
||||
LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{dt[1].dt * 1E3:.1f}ms")
|
||||
|
||||
# Print results
|
||||
t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image
|
||||
LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t)
|
||||
if save_txt or save_img:
|
||||
s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
|
||||
LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}")
|
||||
if update:
|
||||
strip_optimizer(weights[0]) # update model (to fix SourceChangeWarning)
|
||||
|
||||
|
||||
def parse_opt():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s-seg.pt', help='model path(s)')
|
||||
parser.add_argument('--source', type=str, default=ROOT / 'data/images', help='file/dir/URL/glob/screen/0(webcam)')
|
||||
parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='(optional) dataset.yaml path')
|
||||
parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w')
|
||||
parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold')
|
||||
parser.add_argument('--iou-thres', type=float, default=0.45, help='NMS IoU threshold')
|
||||
parser.add_argument('--max-det', type=int, default=1000, help='maximum detections per image')
|
||||
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
|
||||
parser.add_argument('--view-img', action='store_true', help='show results')
|
||||
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
|
||||
parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
|
||||
parser.add_argument('--save-crop', action='store_true', help='save cropped prediction boxes')
|
||||
parser.add_argument('--nosave', action='store_true', help='do not save images/videos')
|
||||
parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --classes 0, or --classes 0 2 3')
|
||||
parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
|
||||
parser.add_argument('--augment', action='store_true', help='augmented inference')
|
||||
parser.add_argument('--visualize', action='store_true', help='visualize features')
|
||||
parser.add_argument('--update', action='store_true', help='update all models')
|
||||
parser.add_argument('--project', default=ROOT / 'runs/predict-seg', help='save results to project/name')
|
||||
parser.add_argument('--name', default='exp', help='save results to project/name')
|
||||
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
|
||||
parser.add_argument('--line-thickness', default=3, type=int, help='bounding box thickness (pixels)')
|
||||
parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels')
|
||||
parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences')
|
||||
parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
|
||||
parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference')
|
||||
parser.add_argument('--vid-stride', type=int, default=1, help='video frame-rate stride')
|
||||
parser.add_argument('--retina-masks', action='store_true', help='whether to plot masks in native resolution')
|
||||
opt = parser.parse_args()
|
||||
opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand
|
||||
print_args(vars(opt))
|
||||
return opt
|
||||
|
||||
|
||||
def main(opt):
|
||||
check_requirements(ROOT / 'requirements.txt', exclude=('tensorboard', 'thop'))
|
||||
run(**vars(opt))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
opt = parse_opt()
|
||||
main(opt)
|
666
ayf_ocr/yolo_databases/api/segment/train.py
Normal file
@ -0,0 +1,666 @@
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
"""
|
||||
Train a YOLOv5 segment model on a segment dataset
|
||||
Models and datasets download automatically from the latest YOLOv5 release.
|
||||
|
||||
Usage - Single-GPU training:
|
||||
$ python segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 # from pretrained (recommended)
|
||||
$ python segment/train.py --data coco128-seg.yaml --weights '' --cfg yolov5s-seg.yaml --img 640 # from scratch
|
||||
|
||||
Usage - Multi-GPU DDP training:
|
||||
$ python -m torch.distributed.run --nproc_per_node 4 --master_port 1 segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 --device 0,1,2,3
|
||||
|
||||
Models: https://github.com/ultralytics/yolov5/tree/master/models
|
||||
Datasets: https://github.com/ultralytics/yolov5/tree/master/data
|
||||
Tutorial: https://docs.ultralytics.com/yolov5/tutorials/train_custom_data
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import math
|
||||
import os
|
||||
import random
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
from copy import deepcopy
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
import torch.distributed as dist
|
||||
import torch.nn as nn
|
||||
import yaml
|
||||
from torch.optim import lr_scheduler
|
||||
from tqdm import tqdm
|
||||
|
||||
FILE = Path(__file__).resolve()
|
||||
ROOT = FILE.parents[1] # YOLOv5 root directory
|
||||
if str(ROOT) not in sys.path:
|
||||
sys.path.append(str(ROOT)) # add ROOT to PATH
|
||||
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
|
||||
|
||||
import segment.val as validate # for end-of-epoch mAP
|
||||
from models.experimental import attempt_load
|
||||
from models.yolo import SegmentationModel
|
||||
from utils.autoanchor import check_anchors
|
||||
from utils.autobatch import check_train_batch_size
|
||||
from utils.callbacks import Callbacks
|
||||
from utils.downloads import attempt_download, is_url
|
||||
from utils.general import (LOGGER, TQDM_BAR_FORMAT, check_amp, check_dataset, check_file, check_git_info,
|
||||
check_git_status, check_img_size, check_requirements, check_suffix, check_yaml, colorstr,
|
||||
get_latest_run, increment_path, init_seeds, intersect_dicts, labels_to_class_weights,
|
||||
labels_to_image_weights, one_cycle, print_args, print_mutation, strip_optimizer, yaml_save)
|
||||
from utils.loggers import GenericLogger
|
||||
from utils.plots import plot_evolve, plot_labels
|
||||
from utils.segment.dataloaders import create_dataloader
|
||||
from utils.segment.loss import ComputeLoss
|
||||
from utils.segment.metrics import KEYS, fitness
|
||||
from utils.segment.plots import plot_images_and_masks, plot_results_with_masks
|
||||
from utils.torch_utils import (EarlyStopping, ModelEMA, de_parallel, select_device, smart_DDP, smart_optimizer,
|
||||
smart_resume, torch_distributed_zero_first)
|
||||
|
||||
LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html
|
||||
RANK = int(os.getenv('RANK', -1))
|
||||
WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1))
|
||||
GIT_INFO = check_git_info()
|
||||
|
||||
|
||||
def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictionary
|
||||
save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, noval, nosave, workers, freeze, mask_ratio = \
|
||||
Path(opt.save_dir), opt.epochs, opt.batch_size, opt.weights, opt.single_cls, opt.evolve, opt.data, opt.cfg, \
|
||||
opt.resume, opt.noval, opt.nosave, opt.workers, opt.freeze, opt.mask_ratio
|
||||
# callbacks.run('on_pretrain_routine_start')
|
||||
|
||||
# Directories
|
||||
w = save_dir / 'weights' # weights dir
|
||||
(w.parent if evolve else w).mkdir(parents=True, exist_ok=True) # make dir
|
||||
last, best = w / 'last.pt', w / 'best.pt'
|
||||
|
||||
# Hyperparameters
|
||||
if isinstance(hyp, str):
|
||||
with open(hyp, errors='ignore') as f:
|
||||
hyp = yaml.safe_load(f) # load hyps dict
|
||||
LOGGER.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items()))
|
||||
opt.hyp = hyp.copy() # for saving hyps to checkpoints
|
||||
|
||||
# Save run settings
|
||||
if not evolve:
|
||||
yaml_save(save_dir / 'hyp.yaml', hyp)
|
||||
yaml_save(save_dir / 'opt.yaml', vars(opt))
|
||||
|
||||
# Loggers
|
||||
data_dict = None
|
||||
if RANK in {-1, 0}:
|
||||
logger = GenericLogger(opt=opt, console_logger=LOGGER)
|
||||
|
||||
# Config
|
||||
plots = not evolve and not opt.noplots # create plots
|
||||
overlap = not opt.no_overlap
|
||||
cuda = device.type != 'cpu'
|
||||
init_seeds(opt.seed + 1 + RANK, deterministic=True)
|
||||
with torch_distributed_zero_first(LOCAL_RANK):
|
||||
data_dict = data_dict or check_dataset(data) # check if None
|
||||
train_path, val_path = data_dict['train'], data_dict['val']
|
||||
nc = 1 if single_cls else int(data_dict['nc']) # number of classes
|
||||
names = {0: 'item'} if single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names
|
||||
is_coco = isinstance(val_path, str) and val_path.endswith('coco/val2017.txt') # COCO dataset
|
||||
|
||||
# Model
|
||||
check_suffix(weights, '.pt') # check weights
|
||||
pretrained = weights.endswith('.pt')
|
||||
if pretrained:
|
||||
with torch_distributed_zero_first(LOCAL_RANK):
|
||||
weights = attempt_download(weights) # download if not found locally
|
||||
ckpt = torch.load(weights, map_location='cpu') # load checkpoint to CPU to avoid CUDA memory leak
|
||||
model = SegmentationModel(cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device)
|
||||
exclude = ['anchor'] if (cfg or hyp.get('anchors')) and not resume else [] # exclude keys
|
||||
csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32
|
||||
csd = intersect_dicts(csd, model.state_dict(), exclude=exclude) # intersect
|
||||
model.load_state_dict(csd, strict=False) # load
|
||||
LOGGER.info(f'Transferred {len(csd)}/{len(model.state_dict())} items from {weights}') # report
|
||||
else:
|
||||
model = SegmentationModel(cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create
|
||||
amp = check_amp(model) # check AMP
|
||||
|
||||
# Freeze
|
||||
freeze = [f'model.{x}.' for x in (freeze if len(freeze) > 1 else range(freeze[0]))] # layers to freeze
|
||||
for k, v in model.named_parameters():
|
||||
v.requires_grad = True # train all layers
|
||||
# v.register_hook(lambda x: torch.nan_to_num(x)) # NaN to 0 (commented for erratic training results)
|
||||
if any(x in k for x in freeze):
|
||||
LOGGER.info(f'freezing {k}')
|
||||
v.requires_grad = False
|
||||
|
||||
# Image size
|
||||
gs = max(int(model.stride.max()), 32) # grid size (max stride)
|
||||
imgsz = check_img_size(opt.imgsz, gs, floor=gs * 2) # verify imgsz is gs-multiple
|
||||
|
||||
# Batch size
|
||||
if RANK == -1 and batch_size == -1: # single-GPU only, estimate best batch size
|
||||
batch_size = check_train_batch_size(model, imgsz, amp)
|
||||
logger.update_params({'batch_size': batch_size})
|
||||
# loggers.on_params_update({"batch_size": batch_size})
|
||||
|
||||
# Optimizer
|
||||
nbs = 64 # nominal batch size
|
||||
accumulate = max(round(nbs / batch_size), 1) # accumulate loss before optimizing
|
||||
hyp['weight_decay'] *= batch_size * accumulate / nbs # scale weight_decay
|
||||
optimizer = smart_optimizer(model, opt.optimizer, hyp['lr0'], hyp['momentum'], hyp['weight_decay'])
|
||||
|
||||
# Scheduler
|
||||
if opt.cos_lr:
|
||||
lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf']
|
||||
else:
|
||||
lf = lambda x: (1 - x / epochs) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear
|
||||
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs)
|
||||
|
||||
# EMA
|
||||
ema = ModelEMA(model) if RANK in {-1, 0} else None
|
||||
|
||||
# Resume
|
||||
best_fitness, start_epoch = 0.0, 0
|
||||
if pretrained:
|
||||
if resume:
|
||||
best_fitness, start_epoch, epochs = smart_resume(ckpt, optimizer, ema, weights, epochs, resume)
|
||||
del ckpt, csd
|
||||
|
||||
# DP mode
|
||||
if cuda and RANK == -1 and torch.cuda.device_count() > 1:
|
||||
LOGGER.warning(
|
||||
'WARNING ⚠️ DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.\n'
|
||||
'See Multi-GPU Tutorial at https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training to get started.'
|
||||
)
|
||||
model = torch.nn.DataParallel(model)
|
||||
|
||||
# SyncBatchNorm
|
||||
if opt.sync_bn and cuda and RANK != -1:
|
||||
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)
|
||||
LOGGER.info('Using SyncBatchNorm()')
|
||||
|
||||
# Trainloader
|
||||
train_loader, dataset = create_dataloader(
|
||||
train_path,
|
||||
imgsz,
|
||||
batch_size // WORLD_SIZE,
|
||||
gs,
|
||||
single_cls,
|
||||
hyp=hyp,
|
||||
augment=True,
|
||||
cache=None if opt.cache == 'val' else opt.cache,
|
||||
rect=opt.rect,
|
||||
rank=LOCAL_RANK,
|
||||
workers=workers,
|
||||
image_weights=opt.image_weights,
|
||||
quad=opt.quad,
|
||||
prefix=colorstr('train: '),
|
||||
shuffle=True,
|
||||
mask_downsample_ratio=mask_ratio,
|
||||
overlap_mask=overlap,
|
||||
)
|
||||
labels = np.concatenate(dataset.labels, 0)
|
||||
mlc = int(labels[:, 0].max()) # max label class
|
||||
assert mlc < nc, f'Label class {mlc} exceeds nc={nc} in {data}. Possible class labels are 0-{nc - 1}'
|
||||
|
||||
# Process 0
|
||||
if RANK in {-1, 0}:
|
||||
val_loader = create_dataloader(val_path,
|
||||
imgsz,
|
||||
batch_size // WORLD_SIZE * 2,
|
||||
gs,
|
||||
single_cls,
|
||||
hyp=hyp,
|
||||
cache=None if noval else opt.cache,
|
||||
rect=True,
|
||||
rank=-1,
|
||||
workers=workers * 2,
|
||||
pad=0.5,
|
||||
mask_downsample_ratio=mask_ratio,
|
||||
overlap_mask=overlap,
|
||||
prefix=colorstr('val: '))[0]
|
||||
|
||||
if not resume:
|
||||
if not opt.noautoanchor:
|
||||
check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz) # run AutoAnchor
|
||||
model.half().float() # pre-reduce anchor precision
|
||||
|
||||
if plots:
|
||||
plot_labels(labels, names, save_dir)
|
||||
# callbacks.run('on_pretrain_routine_end', labels, names)
|
||||
|
||||
# DDP mode
|
||||
if cuda and RANK != -1:
|
||||
model = smart_DDP(model)
|
||||
|
||||
# Model attributes
|
||||
nl = de_parallel(model).model[-1].nl # number of detection layers (to scale hyps)
|
||||
hyp['box'] *= 3 / nl # scale to layers
|
||||
hyp['cls'] *= nc / 80 * 3 / nl # scale to classes and layers
|
||||
hyp['obj'] *= (imgsz / 640) ** 2 * 3 / nl # scale to image size and layers
|
||||
hyp['label_smoothing'] = opt.label_smoothing
|
||||
model.nc = nc # attach number of classes to model
|
||||
model.hyp = hyp # attach hyperparameters to model
|
||||
model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc # attach class weights
|
||||
model.names = names
|
||||
|
||||
# Start training
|
||||
t0 = time.time()
|
||||
nb = len(train_loader) # number of batches
|
||||
nw = max(round(hyp['warmup_epochs'] * nb), 100) # number of warmup iterations, max(3 epochs, 100 iterations)
|
||||
# nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training
|
||||
last_opt_step = -1
|
||||
maps = np.zeros(nc) # mAP per class
|
||||
results = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls)
|
||||
scheduler.last_epoch = start_epoch - 1 # do not move
|
||||
scaler = torch.cuda.amp.GradScaler(enabled=amp)
|
||||
stopper, stop = EarlyStopping(patience=opt.patience), False
|
||||
compute_loss = ComputeLoss(model, overlap=overlap) # init loss class
|
||||
# callbacks.run('on_train_start')
|
||||
LOGGER.info(f'Image sizes {imgsz} train, {imgsz} val\n'
|
||||
f'Using {train_loader.num_workers * WORLD_SIZE} dataloader workers\n'
|
||||
f"Logging results to {colorstr('bold', save_dir)}\n"
|
||||
f'Starting training for {epochs} epochs...')
|
||||
for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------
|
||||
# callbacks.run('on_train_epoch_start')
|
||||
model.train()
|
||||
|
||||
# Update image weights (optional, single-GPU only)
|
||||
if opt.image_weights:
|
||||
cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc # class weights
|
||||
iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights
|
||||
dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx
|
||||
|
||||
# Update mosaic border (optional)
|
||||
# b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs)
|
||||
# dataset.mosaic_border = [b - imgsz, -b] # height, width borders
|
||||
|
||||
mloss = torch.zeros(4, device=device) # mean losses
|
||||
if RANK != -1:
|
||||
train_loader.sampler.set_epoch(epoch)
|
||||
pbar = enumerate(train_loader)
|
||||
LOGGER.info(('\n' + '%11s' * 8) %
|
||||
('Epoch', 'GPU_mem', 'box_loss', 'seg_loss', 'obj_loss', 'cls_loss', 'Instances', 'Size'))
|
||||
if RANK in {-1, 0}:
|
||||
pbar = tqdm(pbar, total=nb, bar_format=TQDM_BAR_FORMAT) # progress bar
|
||||
optimizer.zero_grad()
|
||||
for i, (imgs, targets, paths, _, masks) in pbar: # batch ------------------------------------------------------
|
||||
# callbacks.run('on_train_batch_start')
|
||||
ni = i + nb * epoch # number integrated batches (since train start)
|
||||
imgs = imgs.to(device, non_blocking=True).float() / 255 # uint8 to float32, 0-255 to 0.0-1.0
|
||||
|
||||
# Warmup
|
||||
if ni <= nw:
|
||||
xi = [0, nw] # x interp
|
||||
# compute_loss.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou)
|
||||
accumulate = max(1, np.interp(ni, xi, [1, nbs / batch_size]).round())
|
||||
for j, x in enumerate(optimizer.param_groups):
|
||||
# bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
|
||||
x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 0 else 0.0, x['initial_lr'] * lf(epoch)])
|
||||
if 'momentum' in x:
|
||||
x['momentum'] = np.interp(ni, xi, [hyp['warmup_momentum'], hyp['momentum']])
|
||||
|
||||
# Multi-scale
|
||||
if opt.multi_scale:
|
||||
sz = random.randrange(int(imgsz * 0.5), int(imgsz * 1.5) + gs) // gs * gs # size
|
||||
sf = sz / max(imgs.shape[2:]) # scale factor
|
||||
if sf != 1:
|
||||
ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple)
|
||||
imgs = nn.functional.interpolate(imgs, size=ns, mode='bilinear', align_corners=False)
|
||||
|
||||
# Forward
|
||||
with torch.cuda.amp.autocast(amp):
|
||||
pred = model(imgs) # forward
|
||||
loss, loss_items = compute_loss(pred, targets.to(device), masks=masks.to(device).float())
|
||||
if RANK != -1:
|
||||
loss *= WORLD_SIZE # gradient averaged between devices in DDP mode
|
||||
if opt.quad:
|
||||
loss *= 4.
|
||||
|
||||
# Backward
|
||||
scaler.scale(loss).backward()
|
||||
|
||||
# Optimize - https://pytorch.org/docs/master/notes/amp_examples.html
|
||||
if ni - last_opt_step >= accumulate:
|
||||
scaler.unscale_(optimizer) # unscale gradients
|
||||
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=10.0) # clip gradients
|
||||
scaler.step(optimizer) # optimizer.step
|
||||
scaler.update()
|
||||
optimizer.zero_grad()
|
||||
if ema:
|
||||
ema.update(model)
|
||||
last_opt_step = ni
|
||||
|
||||
# Log
|
||||
if RANK in {-1, 0}:
|
||||
mloss = (mloss * i + loss_items) / (i + 1) # update mean losses
|
||||
mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G' # (GB)
|
||||
pbar.set_description(('%11s' * 2 + '%11.4g' * 6) %
|
||||
(f'{epoch}/{epochs - 1}', mem, *mloss, targets.shape[0], imgs.shape[-1]))
|
||||
# callbacks.run('on_train_batch_end', model, ni, imgs, targets, paths)
|
||||
# if callbacks.stop_training:
|
||||
# return
|
||||
|
||||
# Mosaic plots
|
||||
if plots:
|
||||
if ni < 3:
|
||||
plot_images_and_masks(imgs, targets, masks, paths, save_dir / f'train_batch{ni}.jpg')
|
||||
if ni == 10:
|
||||
files = sorted(save_dir.glob('train*.jpg'))
|
||||
logger.log_images(files, 'Mosaics', epoch)
|
||||
# end batch ------------------------------------------------------------------------------------------------
|
||||
|
||||
# Scheduler
|
||||
lr = [x['lr'] for x in optimizer.param_groups] # for loggers
|
||||
scheduler.step()
|
||||
|
||||
if RANK in {-1, 0}:
|
||||
# mAP
|
||||
# callbacks.run('on_train_epoch_end', epoch=epoch)
|
||||
ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'names', 'stride', 'class_weights'])
|
||||
final_epoch = (epoch + 1 == epochs) or stopper.possible_stop
|
||||
if not noval or final_epoch: # Calculate mAP
|
||||
results, maps, _ = validate.run(data_dict,
|
||||
batch_size=batch_size // WORLD_SIZE * 2,
|
||||
imgsz=imgsz,
|
||||
half=amp,
|
||||
model=ema.ema,
|
||||
single_cls=single_cls,
|
||||
dataloader=val_loader,
|
||||
save_dir=save_dir,
|
||||
plots=False,
|
||||
callbacks=callbacks,
|
||||
compute_loss=compute_loss,
|
||||
mask_downsample_ratio=mask_ratio,
|
||||
overlap=overlap)
|
||||
|
||||
# Update best mAP
|
||||
fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95]
|
||||
stop = stopper(epoch=epoch, fitness=fi) # early stop check
|
||||
if fi > best_fitness:
|
||||
best_fitness = fi
|
||||
log_vals = list(mloss) + list(results) + lr
|
||||
# callbacks.run('on_fit_epoch_end', log_vals, epoch, best_fitness, fi)
|
||||
# Log val metrics and media
|
||||
metrics_dict = dict(zip(KEYS, log_vals))
|
||||
logger.log_metrics(metrics_dict, epoch)
|
||||
|
||||
# Save model
|
||||
if (not nosave) or (final_epoch and not evolve): # if save
|
||||
ckpt = {
|
||||
'epoch': epoch,
|
||||
'best_fitness': best_fitness,
|
||||
'model': deepcopy(de_parallel(model)).half(),
|
||||
'ema': deepcopy(ema.ema).half(),
|
||||
'updates': ema.updates,
|
||||
'optimizer': optimizer.state_dict(),
|
||||
'opt': vars(opt),
|
||||
'git': GIT_INFO, # {remote, branch, commit} if a git repo
|
||||
'date': datetime.now().isoformat()}
|
||||
|
||||
# Save last, best and delete
|
||||
torch.save(ckpt, last)
|
||||
if best_fitness == fi:
|
||||
torch.save(ckpt, best)
|
||||
if opt.save_period > 0 and epoch % opt.save_period == 0:
|
||||
torch.save(ckpt, w / f'epoch{epoch}.pt')
|
||||
logger.log_model(w / f'epoch{epoch}.pt')
|
||||
del ckpt
|
||||
# callbacks.run('on_model_save', last, epoch, final_epoch, best_fitness, fi)
|
||||
|
||||
# EarlyStopping
|
||||
if RANK != -1: # if DDP training
|
||||
broadcast_list = [stop if RANK == 0 else None]
|
||||
dist.broadcast_object_list(broadcast_list, 0) # broadcast 'stop' to all ranks
|
||||
if RANK != 0:
|
||||
stop = broadcast_list[0]
|
||||
if stop:
|
||||
break # must break all DDP ranks
|
||||
|
||||
# end epoch ----------------------------------------------------------------------------------------------------
|
||||
# end training -----------------------------------------------------------------------------------------------------
|
||||
if RANK in {-1, 0}:
|
||||
LOGGER.info(f'\n{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.')
|
||||
for f in last, best:
|
||||
if f.exists():
|
||||
strip_optimizer(f) # strip optimizers
|
||||
if f is best:
|
||||
LOGGER.info(f'\nValidating {f}...')
|
||||
results, _, _ = validate.run(
|
||||
data_dict,
|
||||
batch_size=batch_size // WORLD_SIZE * 2,
|
||||
imgsz=imgsz,
|
||||
model=attempt_load(f, device).half(),
|
||||
iou_thres=0.65 if is_coco else 0.60, # best pycocotools at iou 0.65
|
||||
single_cls=single_cls,
|
||||
dataloader=val_loader,
|
||||
save_dir=save_dir,
|
||||
save_json=is_coco,
|
||||
verbose=True,
|
||||
plots=plots,
|
||||
callbacks=callbacks,
|
||||
compute_loss=compute_loss,
|
||||
mask_downsample_ratio=mask_ratio,
|
||||
overlap=overlap) # val best model with plots
|
||||
if is_coco:
|
||||
# callbacks.run('on_fit_epoch_end', list(mloss) + list(results) + lr, epoch, best_fitness, fi)
|
||||
metrics_dict = dict(zip(KEYS, list(mloss) + list(results) + lr))
|
||||
logger.log_metrics(metrics_dict, epoch)
|
||||
|
||||
# callbacks.run('on_train_end', last, best, epoch, results)
|
||||
# on train end callback using genericLogger
|
||||
logger.log_metrics(dict(zip(KEYS[4:16], results)), epochs)
|
||||
if not opt.evolve:
|
||||
logger.log_model(best, epoch)
|
||||
if plots:
|
||||
plot_results_with_masks(file=save_dir / 'results.csv') # save results.png
|
||||
files = ['results.png', 'confusion_matrix.png', *(f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R'))]
|
||||
files = [(save_dir / f) for f in files if (save_dir / f).exists()] # filter
|
||||
LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}")
|
||||
logger.log_images(files, 'Results', epoch + 1)
|
||||
logger.log_images(sorted(save_dir.glob('val*.jpg')), 'Validation', epoch + 1)
|
||||
torch.cuda.empty_cache()
|
||||
return results
|
||||
|
||||
|
||||
def parse_opt(known=False):
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s-seg.pt', help='initial weights path')
|
||||
parser.add_argument('--cfg', type=str, default='', help='model.yaml path')
|
||||
parser.add_argument('--data', type=str, default=ROOT / 'data/coco128-seg.yaml', help='dataset.yaml path')
|
||||
parser.add_argument('--hyp', type=str, default=ROOT / 'data/hyps/hyp.scratch-low.yaml', help='hyperparameters path')
|
||||
parser.add_argument('--epochs', type=int, default=100, help='total training epochs')
|
||||
parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs, -1 for autobatch')
|
||||
parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='train, val image size (pixels)')
|
||||
parser.add_argument('--rect', action='store_true', help='rectangular training')
|
||||
parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training')
|
||||
parser.add_argument('--nosave', action='store_true', help='only save final checkpoint')
|
||||
parser.add_argument('--noval', action='store_true', help='only validate final epoch')
|
||||
parser.add_argument('--noautoanchor', action='store_true', help='disable AutoAnchor')
|
||||
parser.add_argument('--noplots', action='store_true', help='save no plot files')
|
||||
parser.add_argument('--evolve', type=int, nargs='?', const=300, help='evolve hyperparameters for x generations')
|
||||
parser.add_argument('--bucket', type=str, default='', help='gsutil bucket')
|
||||
parser.add_argument('--cache', type=str, nargs='?', const='ram', help='image --cache ram/disk')
|
||||
parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training')
|
||||
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
|
||||
parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%')
|
||||
parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class')
|
||||
parser.add_argument('--optimizer', type=str, choices=['SGD', 'Adam', 'AdamW'], default='SGD', help='optimizer')
|
||||
parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode')
|
||||
parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)')
|
||||
parser.add_argument('--project', default=ROOT / 'runs/train-seg', help='save to project/name')
|
||||
parser.add_argument('--name', default='exp', help='save to project/name')
|
||||
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
|
||||
parser.add_argument('--quad', action='store_true', help='quad dataloader')
|
||||
parser.add_argument('--cos-lr', action='store_true', help='cosine LR scheduler')
|
||||
parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon')
|
||||
parser.add_argument('--patience', type=int, default=100, help='EarlyStopping patience (epochs without improvement)')
|
||||
parser.add_argument('--freeze', nargs='+', type=int, default=[0], help='Freeze layers: backbone=10, first3=0 1 2')
|
||||
parser.add_argument('--save-period', type=int, default=-1, help='Save checkpoint every x epochs (disabled if < 1)')
|
||||
parser.add_argument('--seed', type=int, default=0, help='Global training seed')
|
||||
parser.add_argument('--local_rank', type=int, default=-1, help='Automatic DDP Multi-GPU argument, do not modify')
|
||||
|
||||
# Instance Segmentation Args
|
||||
parser.add_argument('--mask-ratio', type=int, default=4, help='Downsample the truth masks to saving memory')
|
||||
parser.add_argument('--no-overlap', action='store_true', help='Overlap masks train faster at slightly less mAP')
|
||||
|
||||
return parser.parse_known_args()[0] if known else parser.parse_args()
|
||||
|
||||
|
||||
def main(opt, callbacks=Callbacks()):
|
||||
# Checks
|
||||
if RANK in {-1, 0}:
|
||||
print_args(vars(opt))
|
||||
check_git_status()
|
||||
check_requirements(ROOT / 'requirements.txt')
|
||||
|
||||
# Resume
|
||||
if opt.resume and not opt.evolve: # resume from specified or most recent last.pt
|
||||
last = Path(check_file(opt.resume) if isinstance(opt.resume, str) else get_latest_run())
|
||||
opt_yaml = last.parent.parent / 'opt.yaml' # train options yaml
|
||||
opt_data = opt.data # original dataset
|
||||
if opt_yaml.is_file():
|
||||
with open(opt_yaml, errors='ignore') as f:
|
||||
d = yaml.safe_load(f)
|
||||
else:
|
||||
d = torch.load(last, map_location='cpu')['opt']
|
||||
opt = argparse.Namespace(**d) # replace
|
||||
opt.cfg, opt.weights, opt.resume = '', str(last), True # reinstate
|
||||
if is_url(opt_data):
|
||||
opt.data = check_file(opt_data) # avoid HUB resume auth timeout
|
||||
else:
|
||||
opt.data, opt.cfg, opt.hyp, opt.weights, opt.project = \
|
||||
check_file(opt.data), check_yaml(opt.cfg), check_yaml(opt.hyp), str(opt.weights), str(opt.project) # checks
|
||||
assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified'
|
||||
if opt.evolve:
|
||||
if opt.project == str(ROOT / 'runs/train-seg'): # if default project name, rename to runs/evolve-seg
|
||||
opt.project = str(ROOT / 'runs/evolve-seg')
|
||||
opt.exist_ok, opt.resume = opt.resume, False # pass resume to exist_ok and disable resume
|
||||
if opt.name == 'cfg':
|
||||
opt.name = Path(opt.cfg).stem # use model.yaml as name
|
||||
opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok))
|
||||
|
||||
# DDP mode
|
||||
device = select_device(opt.device, batch_size=opt.batch_size)
|
||||
if LOCAL_RANK != -1:
|
||||
msg = 'is not compatible with YOLOv5 Multi-GPU DDP training'
|
||||
assert not opt.image_weights, f'--image-weights {msg}'
|
||||
assert not opt.evolve, f'--evolve {msg}'
|
||||
assert opt.batch_size != -1, f'AutoBatch with --batch-size -1 {msg}, please pass a valid --batch-size'
|
||||
assert opt.batch_size % WORLD_SIZE == 0, f'--batch-size {opt.batch_size} must be multiple of WORLD_SIZE'
|
||||
assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command'
|
||||
torch.cuda.set_device(LOCAL_RANK)
|
||||
device = torch.device('cuda', LOCAL_RANK)
|
||||
dist.init_process_group(backend='nccl' if dist.is_nccl_available() else 'gloo')
|
||||
|
||||
# Train
|
||||
if not opt.evolve:
|
||||
train(opt.hyp, opt, device, callbacks)
|
||||
|
||||
# Evolve hyperparameters (optional)
|
||||
else:
|
||||
# Hyperparameter evolution metadata (mutation scale 0-1, lower_limit, upper_limit)
|
||||
meta = {
|
||||
'lr0': (1, 1e-5, 1e-1), # initial learning rate (SGD=1E-2, Adam=1E-3)
|
||||
'lrf': (1, 0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf)
|
||||
'momentum': (0.3, 0.6, 0.98), # SGD momentum/Adam beta1
|
||||
'weight_decay': (1, 0.0, 0.001), # optimizer weight decay
|
||||
'warmup_epochs': (1, 0.0, 5.0), # warmup epochs (fractions ok)
|
||||
'warmup_momentum': (1, 0.0, 0.95), # warmup initial momentum
|
||||
'warmup_bias_lr': (1, 0.0, 0.2), # warmup initial bias lr
|
||||
'box': (1, 0.02, 0.2), # box loss gain
|
||||
'cls': (1, 0.2, 4.0), # cls loss gain
|
||||
'cls_pw': (1, 0.5, 2.0), # cls BCELoss positive_weight
|
||||
'obj': (1, 0.2, 4.0), # obj loss gain (scale with pixels)
|
||||
'obj_pw': (1, 0.5, 2.0), # obj BCELoss positive_weight
|
||||
'iou_t': (0, 0.1, 0.7), # IoU training threshold
|
||||
'anchor_t': (1, 2.0, 8.0), # anchor-multiple threshold
|
||||
'anchors': (2, 2.0, 10.0), # anchors per output grid (0 to ignore)
|
||||
'fl_gamma': (0, 0.0, 2.0), # focal loss gamma (efficientDet default gamma=1.5)
|
||||
'hsv_h': (1, 0.0, 0.1), # image HSV-Hue augmentation (fraction)
|
||||
'hsv_s': (1, 0.0, 0.9), # image HSV-Saturation augmentation (fraction)
|
||||
'hsv_v': (1, 0.0, 0.9), # image HSV-Value augmentation (fraction)
|
||||
'degrees': (1, 0.0, 45.0), # image rotation (+/- deg)
|
||||
'translate': (1, 0.0, 0.9), # image translation (+/- fraction)
|
||||
'scale': (1, 0.0, 0.9), # image scale (+/- gain)
|
||||
'shear': (1, 0.0, 10.0), # image shear (+/- deg)
|
||||
'perspective': (0, 0.0, 0.001), # image perspective (+/- fraction), range 0-0.001
|
||||
'flipud': (1, 0.0, 1.0), # image flip up-down (probability)
|
||||
'fliplr': (0, 0.0, 1.0), # image flip left-right (probability)
|
||||
'mosaic': (1, 0.0, 1.0), # image mixup (probability)
|
||||
'mixup': (1, 0.0, 1.0), # image mixup (probability)
|
||||
'copy_paste': (1, 0.0, 1.0)} # segment copy-paste (probability)
|
||||
|
||||
with open(opt.hyp, errors='ignore') as f:
|
||||
hyp = yaml.safe_load(f) # load hyps dict
|
||||
if 'anchors' not in hyp: # anchors commented in hyp.yaml
|
||||
hyp['anchors'] = 3
|
||||
if opt.noautoanchor:
|
||||
del hyp['anchors'], meta['anchors']
|
||||
opt.noval, opt.nosave, save_dir = True, True, Path(opt.save_dir) # only val/save final epoch
|
||||
# ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices
|
||||
evolve_yaml, evolve_csv = save_dir / 'hyp_evolve.yaml', save_dir / 'evolve.csv'
|
||||
if opt.bucket:
|
||||
# download evolve.csv if exists
|
||||
subprocess.run([
|
||||
'gsutil',
|
||||
'cp',
|
||||
f'gs://{opt.bucket}/evolve.csv',
|
||||
str(evolve_csv),])
|
||||
|
||||
for _ in range(opt.evolve): # generations to evolve
|
||||
if evolve_csv.exists(): # if evolve.csv exists: select best hyps and mutate
|
||||
# Select parent(s)
|
||||
parent = 'single' # parent selection method: 'single' or 'weighted'
|
||||
x = np.loadtxt(evolve_csv, ndmin=2, delimiter=',', skiprows=1)
|
||||
n = min(5, len(x)) # number of previous results to consider
|
||||
x = x[np.argsort(-fitness(x))][:n] # top n mutations
|
||||
w = fitness(x) - fitness(x).min() + 1E-6 # weights (sum > 0)
|
||||
if parent == 'single' or len(x) == 1:
|
||||
# x = x[random.randint(0, n - 1)] # random selection
|
||||
x = x[random.choices(range(n), weights=w)[0]] # weighted selection
|
||||
elif parent == 'weighted':
|
||||
x = (x * w.reshape(n, 1)).sum(0) / w.sum() # weighted combination
|
||||
|
||||
# Mutate
|
||||
mp, s = 0.8, 0.2 # mutation probability, sigma
|
||||
npr = np.random
|
||||
npr.seed(int(time.time()))
|
||||
g = np.array([meta[k][0] for k in hyp.keys()]) # gains 0-1
|
||||
ng = len(meta)
|
||||
v = np.ones(ng)
|
||||
while all(v == 1): # mutate until a change occurs (prevent duplicates)
|
||||
v = (g * (npr.random(ng) < mp) * npr.randn(ng) * npr.random() * s + 1).clip(0.3, 3.0)
|
||||
for i, k in enumerate(hyp.keys()): # plt.hist(v.ravel(), 300)
|
||||
hyp[k] = float(x[i + 12] * v[i]) # mutate
|
||||
|
||||
# Constrain to limits
|
||||
for k, v in meta.items():
|
||||
hyp[k] = max(hyp[k], v[1]) # lower limit
|
||||
hyp[k] = min(hyp[k], v[2]) # upper limit
|
||||
hyp[k] = round(hyp[k], 5) # significant digits
|
||||
|
||||
# Train mutation
|
||||
results = train(hyp.copy(), opt, device, callbacks)
|
||||
callbacks = Callbacks()
|
||||
# Write mutation results
|
||||
print_mutation(KEYS[4:16], results, hyp.copy(), save_dir, opt.bucket)
|
||||
|
||||
# Plot results
|
||||
plot_evolve(evolve_csv)
|
||||
LOGGER.info(f'Hyperparameter evolution finished {opt.evolve} generations\n'
|
||||
f"Results saved to {colorstr('bold', save_dir)}\n"
|
||||
f'Usage example: $ python train.py --hyp {evolve_yaml}')
|
||||
|
||||
|
||||
def run(**kwargs):
|
||||
# Usage: import train; train.run(data='coco128.yaml', imgsz=320, weights='yolov5m.pt')
|
||||
opt = parse_opt(True)
|
||||
for k, v in kwargs.items():
|
||||
setattr(opt, k, v)
|
||||
main(opt)
|
||||
return opt
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
opt = parse_opt()
|
||||
main(opt)
|
595
ayf_ocr/yolo_databases/api/segment/tutorial.ipynb
Normal file
@ -0,0 +1,595 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "t6MPjfT5NrKQ"
|
||||
},
|
||||
"source": [
|
||||
"<div align=\"center\">\n",
|
||||
"\n",
|
||||
" <a href=\"https://ultralytics.com/yolov5\" target=\"_blank\">\n",
|
||||
" <img width=\"1024\", src=\"https://raw.githubusercontent.com/ultralytics/assets/main/yolov5/v70/splash.png\"></a>\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"<br>\n",
|
||||
" <a href=\"https://bit.ly/yolov5-paperspace-notebook\"><img src=\"https://assets.paperspace.io/img/gradient-badge.svg\" alt=\"Run on Gradient\"></a>\n",
|
||||
" <a href=\"https://colab.research.google.com/github/ultralytics/yolov5/blob/master/segment/tutorial.ipynb\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"></a>\n",
|
||||
" <a href=\"https://www.kaggle.com/ultralytics/yolov5\"><img src=\"https://kaggle.com/static/images/open-in-kaggle.svg\" alt=\"Open In Kaggle\"></a>\n",
|
||||
"<br>\n",
|
||||
"\n",
|
||||
"This <a href=\"https://github.com/ultralytics/yolov5\">YOLOv5</a> 🚀 notebook by <a href=\"https://ultralytics.com\">Ultralytics</a> presents simple train, validate and predict examples to help start your AI adventure.<br>See <a href=\"https://github.com/ultralytics/yolov5/issues/new/choose\">GitHub</a> for community support or <a href=\"https://ultralytics.com/contact\">contact us</a> for professional support.\n",
|
||||
"\n",
|
||||
"</div>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "7mGmQbAO5pQb"
|
||||
},
|
||||
"source": [
|
||||
"# Setup\n",
|
||||
"\n",
|
||||
"Clone GitHub [repository](https://github.com/ultralytics/yolov5), install [dependencies](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) and check PyTorch and GPU."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/"
|
||||
},
|
||||
"id": "wbvMlHd_QwMG",
|
||||
"outputId": "171b23f0-71b9-4cbf-b666-6fa2ecef70c8"
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "stream",
|
||||
"name": "stderr",
|
||||
"text": [
|
||||
"YOLOv5 🚀 v7.0-2-gc9d47ae Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"output_type": "stream",
|
||||
"name": "stdout",
|
||||
"text": [
|
||||
"Setup complete ✅ (2 CPUs, 12.7 GB RAM, 22.6/78.2 GB disk)\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"!git clone https://github.com/ultralytics/yolov5 # clone\n",
|
||||
"%cd yolov5\n",
|
||||
"%pip install -qr requirements.txt # install\n",
|
||||
"\n",
|
||||
"import torch\n",
|
||||
"import utils\n",
|
||||
"display = utils.notebook_init() # checks"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "4JnkELT0cIJg"
|
||||
},
|
||||
"source": [
|
||||
"# 1. Predict\n",
|
||||
"\n",
|
||||
"`segment/predict.py` runs YOLOv5 instance segmentation inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases), and saving results to `runs/predict`. Example inference sources are:\n",
|
||||
"\n",
|
||||
"```shell\n",
|
||||
"python segment/predict.py --source 0 # webcam\n",
|
||||
" img.jpg # image \n",
|
||||
" vid.mp4 # video\n",
|
||||
" screen # screenshot\n",
|
||||
" path/ # directory\n",
|
||||
" 'path/*.jpg' # glob\n",
|
||||
" 'https://youtu.be/Zgi9g1ksQHc' # YouTube\n",
|
||||
" 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/"
|
||||
},
|
||||
"id": "zR9ZbuQCH7FX",
|
||||
"outputId": "3f67f1c7-f15e-4fa5-d251-967c3b77eaad"
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "stream",
|
||||
"name": "stdout",
|
||||
"text": [
|
||||
"\u001b[34m\u001b[1msegment/predict: \u001b[0mweights=['yolov5s-seg.pt'], source=data/images, data=data/coco128.yaml, imgsz=[640, 640], conf_thres=0.25, iou_thres=0.45, max_det=1000, device=, view_img=False, save_txt=False, save_conf=False, save_crop=False, nosave=False, classes=None, agnostic_nms=False, augment=False, visualize=False, update=False, project=runs/predict-seg, name=exp, exist_ok=False, line_thickness=3, hide_labels=False, hide_conf=False, half=False, dnn=False, vid_stride=1, retina_masks=False\n",
|
||||
"YOLOv5 🚀 v7.0-2-gc9d47ae Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n",
|
||||
"\n",
|
||||
"Downloading https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-seg.pt to yolov5s-seg.pt...\n",
|
||||
"100% 14.9M/14.9M [00:01<00:00, 12.0MB/s]\n",
|
||||
"\n",
|
||||
"Fusing layers... \n",
|
||||
"YOLOv5s-seg summary: 224 layers, 7611485 parameters, 0 gradients, 26.4 GFLOPs\n",
|
||||
"image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 18.2ms\n",
|
||||
"image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 1 tie, 13.4ms\n",
|
||||
"Speed: 0.5ms pre-process, 15.8ms inference, 18.5ms NMS per image at shape (1, 3, 640, 640)\n",
|
||||
"Results saved to \u001b[1mruns/predict-seg/exp\u001b[0m\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"!python segment/predict.py --weights yolov5s-seg.pt --img 640 --conf 0.25 --source data/images\n",
|
||||
"#display.Image(filename='runs/predict-seg/exp/zidane.jpg', width=600)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "hkAzDWJ7cWTr"
|
||||
},
|
||||
"source": [
|
||||
" \n",
|
||||
"<img align=\"left\" src=\"https://user-images.githubusercontent.com/26833433/199030123-08c72f8d-6871-4116-8ed3-c373642cf28e.jpg\" width=\"600\">"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "0eq1SMWl6Sfn"
|
||||
},
|
||||
"source": [
|
||||
"# 2. Validate\n",
|
||||
"Validate a model's accuracy on the [COCO](https://cocodataset.org/#home) dataset's `val` or `test` splits. Models are downloaded automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases). To show results by class use the `--verbose` flag."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/"
|
||||
},
|
||||
"id": "WQPtK1QYVaD_",
|
||||
"outputId": "9d751d8c-bee8-4339-cf30-9854ca530449"
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "stream",
|
||||
"name": "stdout",
|
||||
"text": [
|
||||
"Downloading https://github.com/ultralytics/yolov5/releases/download/v1.0/coco2017labels-segments.zip ...\n",
|
||||
"Downloading http://images.cocodataset.org/zips/val2017.zip ...\n",
|
||||
"######################################################################## 100.0%\n",
|
||||
"######################################################################## 100.0%\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Download COCO val\n",
|
||||
"!bash data/scripts/get_coco.sh --val --segments # download (780M - 5000 images)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/"
|
||||
},
|
||||
"id": "X58w8JLpMnjH",
|
||||
"outputId": "a140d67a-02da-479e-9ddb-7d54bf9e407a"
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "stream",
|
||||
"name": "stdout",
|
||||
"text": [
|
||||
"\u001b[34m\u001b[1msegment/val: \u001b[0mdata=/content/yolov5/data/coco.yaml, weights=['yolov5s-seg.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=val, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val-seg, name=exp, exist_ok=False, half=True, dnn=False\n",
|
||||
"YOLOv5 🚀 v7.0-2-gc9d47ae Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n",
|
||||
"\n",
|
||||
"Fusing layers... \n",
|
||||
"YOLOv5s-seg summary: 224 layers, 7611485 parameters, 0 gradients, 26.4 GFLOPs\n",
|
||||
"\u001b[34m\u001b[1mval: \u001b[0mScanning /content/datasets/coco/val2017... 4952 images, 48 backgrounds, 0 corrupt: 100% 5000/5000 [00:03<00:00, 1361.31it/s]\n",
|
||||
"\u001b[34m\u001b[1mval: \u001b[0mNew cache created: /content/datasets/coco/val2017.cache\n",
|
||||
" Class Images Instances Box(P R mAP50 mAP50-95) Mask(P R mAP50 mAP50-95): 100% 157/157 [01:54<00:00, 1.37it/s]\n",
|
||||
" all 5000 36335 0.673 0.517 0.566 0.373 0.672 0.49 0.532 0.319\n",
|
||||
"Speed: 0.6ms pre-process, 4.4ms inference, 2.9ms NMS per image at shape (32, 3, 640, 640)\n",
|
||||
"Results saved to \u001b[1mruns/val-seg/exp\u001b[0m\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Validate YOLOv5s-seg on COCO val\n",
|
||||
"!python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 --half"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "ZY2VXXXu74w5"
|
||||
},
|
||||
"source": [
|
||||
"# 3. Train\n",
|
||||
"\n",
|
||||
"<p align=\"\"><a href=\"https://roboflow.com/?ref=ultralytics\"><img width=\"1000\" src=\"https://github.com/ultralytics/assets/raw/main/im/integrations-loop.png\"/></a></p>\n",
|
||||
"Close the active learning loop by sampling images from your inference conditions with the `roboflow` pip package\n",
|
||||
"<br><br>\n",
|
||||
"\n",
|
||||
"Train a YOLOv5s-seg model on the [COCO128](https://www.kaggle.com/ultralytics/coco128) dataset with `--data coco128-seg.yaml`, starting from pretrained `--weights yolov5s-seg.pt`, or from randomly initialized `--weights '' --cfg yolov5s-seg.yaml`.\n",
|
||||
"\n",
|
||||
"- **Pretrained [Models](https://github.com/ultralytics/yolov5/tree/master/models)** are downloaded\n",
|
||||
"automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases)\n",
|
||||
"- **[Datasets](https://github.com/ultralytics/yolov5/tree/master/data)** available for autodownload include: [COCO](https://github.com/ultralytics/yolov5/blob/master/data/coco.yaml), [COCO128](https://github.com/ultralytics/yolov5/blob/master/data/coco128.yaml), [VOC](https://github.com/ultralytics/yolov5/blob/master/data/VOC.yaml), [Argoverse](https://github.com/ultralytics/yolov5/blob/master/data/Argoverse.yaml), [VisDrone](https://github.com/ultralytics/yolov5/blob/master/data/VisDrone.yaml), [GlobalWheat](https://github.com/ultralytics/yolov5/blob/master/data/GlobalWheat2020.yaml), [xView](https://github.com/ultralytics/yolov5/blob/master/data/xView.yaml), [Objects365](https://github.com/ultralytics/yolov5/blob/master/data/Objects365.yaml), [SKU-110K](https://github.com/ultralytics/yolov5/blob/master/data/SKU-110K.yaml).\n",
|
||||
"- **Training Results** are saved to `runs/train-seg/` with incrementing run directories, i.e. `runs/train-seg/exp2`, `runs/train-seg/exp3` etc.\n",
|
||||
"<br><br>\n",
|
||||
"\n",
|
||||
"A **Mosaic Dataloader** is used for training which combines 4 images into 1 mosaic.\n",
|
||||
"\n",
|
||||
"## Train on Custom Data with Roboflow 🌟 NEW\n",
|
||||
"\n",
|
||||
"[Roboflow](https://roboflow.com/?ref=ultralytics) enables you to easily **organize, label, and prepare** a high quality dataset with your own custom data. Roboflow also makes it easy to establish an active learning pipeline, collaborate with your team on dataset improvement, and integrate directly into your model building workflow with the `roboflow` pip package.\n",
|
||||
"\n",
|
||||
"- Custom Training Example: [https://blog.roboflow.com/train-yolov5-instance-segmentation-custom-dataset/](https://blog.roboflow.com/train-yolov5-instance-segmentation-custom-dataset/?ref=ultralytics)\n",
|
||||
"- Custom Training Notebook: [](https://colab.research.google.com/drive/1JTz7kpmHsg-5qwVz2d2IH3AaenI1tv0N?usp=sharing)\n",
|
||||
"<br>\n",
|
||||
"\n",
|
||||
"<p align=\"\"><a href=\"https://roboflow.com/?ref=ultralytics\"><img width=\"480\" src=\"https://robflow-public-assets.s3.amazonaws.com/how-to-train-yolov5-segmentation-annotation.gif\"/></a></p>Label images lightning fast (including with model-assisted labeling)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "i3oKtE4g-aNn"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#@title Select YOLOv5 🚀 logger {run: 'auto'}\n",
|
||||
"logger = 'Comet' #@param ['Comet', 'ClearML', 'TensorBoard']\n",
|
||||
"\n",
|
||||
"if logger == 'Comet':\n",
|
||||
" %pip install -q comet_ml\n",
|
||||
" import comet_ml; comet_ml.init()\n",
|
||||
"elif logger == 'ClearML':\n",
|
||||
" %pip install -q clearml\n",
|
||||
" import clearml; clearml.browser_login()\n",
|
||||
"elif logger == 'TensorBoard':\n",
|
||||
" %load_ext tensorboard\n",
|
||||
" %tensorboard --logdir runs/train"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/"
|
||||
},
|
||||
"id": "1NcFxRcFdJ_O",
|
||||
"outputId": "3a3e0cf7-e79c-47a5-c8e7-2d26eeeab988"
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "stream",
|
||||
"name": "stdout",
|
||||
"text": [
|
||||
"\u001b[34m\u001b[1msegment/train: \u001b[0mweights=yolov5s-seg.pt, cfg=, data=coco128-seg.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=3, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, bucket=, cache=ram, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train-seg, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, mask_ratio=4, no_overlap=False\n",
|
||||
"\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n",
|
||||
"YOLOv5 🚀 v7.0-2-gc9d47ae Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n",
|
||||
"\n",
|
||||
"\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
|
||||
"\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train-seg', view at http://localhost:6006/\n",
|
||||
"\n",
|
||||
"Dataset not found ⚠️, missing paths ['/content/datasets/coco128-seg/images/train2017']\n",
|
||||
"Downloading https://ultralytics.com/assets/coco128-seg.zip to coco128-seg.zip...\n",
|
||||
"100% 6.79M/6.79M [00:01<00:00, 6.73MB/s]\n",
|
||||
"Dataset download success ✅ (1.9s), saved to \u001b[1m/content/datasets\u001b[0m\n",
|
||||
"\n",
|
||||
" from n params module arguments \n",
|
||||
" 0 -1 1 3520 models.common.Conv [3, 32, 6, 2, 2] \n",
|
||||
" 1 -1 1 18560 models.common.Conv [32, 64, 3, 2] \n",
|
||||
" 2 -1 1 18816 models.common.C3 [64, 64, 1] \n",
|
||||
" 3 -1 1 73984 models.common.Conv [64, 128, 3, 2] \n",
|
||||
" 4 -1 2 115712 models.common.C3 [128, 128, 2] \n",
|
||||
" 5 -1 1 295424 models.common.Conv [128, 256, 3, 2] \n",
|
||||
" 6 -1 3 625152 models.common.C3 [256, 256, 3] \n",
|
||||
" 7 -1 1 1180672 models.common.Conv [256, 512, 3, 2] \n",
|
||||
" 8 -1 1 1182720 models.common.C3 [512, 512, 1] \n",
|
||||
" 9 -1 1 656896 models.common.SPPF [512, 512, 5] \n",
|
||||
" 10 -1 1 131584 models.common.Conv [512, 256, 1, 1] \n",
|
||||
" 11 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n",
|
||||
" 12 [-1, 6] 1 0 models.common.Concat [1] \n",
|
||||
" 13 -1 1 361984 models.common.C3 [512, 256, 1, False] \n",
|
||||
" 14 -1 1 33024 models.common.Conv [256, 128, 1, 1] \n",
|
||||
" 15 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n",
|
||||
" 16 [-1, 4] 1 0 models.common.Concat [1] \n",
|
||||
" 17 -1 1 90880 models.common.C3 [256, 128, 1, False] \n",
|
||||
" 18 -1 1 147712 models.common.Conv [128, 128, 3, 2] \n",
|
||||
" 19 [-1, 14] 1 0 models.common.Concat [1] \n",
|
||||
" 20 -1 1 296448 models.common.C3 [256, 256, 1, False] \n",
|
||||
" 21 -1 1 590336 models.common.Conv [256, 256, 3, 2] \n",
|
||||
" 22 [-1, 10] 1 0 models.common.Concat [1] \n",
|
||||
" 23 -1 1 1182720 models.common.C3 [512, 512, 1, False] \n",
|
||||
" 24 [17, 20, 23] 1 615133 models.yolo.Segment [80, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], 32, 128, [128, 256, 512]]\n",
|
||||
"Model summary: 225 layers, 7621277 parameters, 7621277 gradients, 26.6 GFLOPs\n",
|
||||
"\n",
|
||||
"Transferred 367/367 items from yolov5s-seg.pt\n",
|
||||
"\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
|
||||
"\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 60 weight(decay=0.0), 63 weight(decay=0.0005), 63 bias\n",
|
||||
"\u001b[34m\u001b[1malbumentations: \u001b[0mBlur(p=0.01, blur_limit=(3, 7)), MedianBlur(p=0.01, blur_limit=(3, 7)), ToGray(p=0.01), CLAHE(p=0.01, clip_limit=(1, 4.0), tile_grid_size=(8, 8))\n",
|
||||
"\u001b[34m\u001b[1mtrain: \u001b[0mScanning /content/datasets/coco128-seg/labels/train2017... 126 images, 2 backgrounds, 0 corrupt: 100% 128/128 [00:00<00:00, 1389.59it/s]\n",
|
||||
"\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: /content/datasets/coco128-seg/labels/train2017.cache\n",
|
||||
"\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:00<00:00, 238.86it/s]\n",
|
||||
"\u001b[34m\u001b[1mval: \u001b[0mScanning /content/datasets/coco128-seg/labels/train2017.cache... 126 images, 2 backgrounds, 0 corrupt: 100% 128/128 [00:00<?, ?it/s]\n",
|
||||
"\u001b[34m\u001b[1mval: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:01<00:00, 98.90it/s]\n",
|
||||
"\n",
|
||||
"\u001b[34m\u001b[1mAutoAnchor: \u001b[0m4.27 anchors/target, 0.994 Best Possible Recall (BPR). Current anchors are a good fit to dataset ✅\n",
|
||||
"Plotting labels to runs/train-seg/exp/labels.jpg... \n",
|
||||
"Image sizes 640 train, 640 val\n",
|
||||
"Using 2 dataloader workers\n",
|
||||
"Logging results to \u001b[1mruns/train-seg/exp\u001b[0m\n",
|
||||
"Starting training for 3 epochs...\n",
|
||||
"\n",
|
||||
" Epoch GPU_mem box_loss seg_loss obj_loss cls_loss Instances Size\n",
|
||||
" 0/2 4.92G 0.0417 0.04646 0.06066 0.02126 192 640: 100% 8/8 [00:08<00:00, 1.10s/it]\n",
|
||||
" Class Images Instances Box(P R mAP50 mAP50-95) Mask(P R mAP50 mAP50-95): 100% 4/4 [00:02<00:00, 1.81it/s]\n",
|
||||
" all 128 929 0.737 0.649 0.715 0.492 0.719 0.617 0.658 0.408\n",
|
||||
"\n",
|
||||
" Epoch GPU_mem box_loss seg_loss obj_loss cls_loss Instances Size\n",
|
||||
" 1/2 6.29G 0.04157 0.04503 0.05772 0.01777 208 640: 100% 8/8 [00:09<00:00, 1.21s/it]\n",
|
||||
" Class Images Instances Box(P R mAP50 mAP50-95) Mask(P R mAP50 mAP50-95): 100% 4/4 [00:02<00:00, 1.87it/s]\n",
|
||||
" all 128 929 0.756 0.674 0.738 0.506 0.725 0.64 0.68 0.422\n",
|
||||
"\n",
|
||||
" Epoch GPU_mem box_loss seg_loss obj_loss cls_loss Instances Size\n",
|
||||
" 2/2 6.29G 0.0425 0.04793 0.06784 0.01863 161 640: 100% 8/8 [00:03<00:00, 2.02it/s]\n",
|
||||
" Class Images Instances Box(P R mAP50 mAP50-95) Mask(P R mAP50 mAP50-95): 100% 4/4 [00:02<00:00, 1.88it/s]\n",
|
||||
" all 128 929 0.736 0.694 0.747 0.522 0.769 0.622 0.683 0.427\n",
|
||||
"\n",
|
||||
"3 epochs completed in 0.009 hours.\n",
|
||||
"Optimizer stripped from runs/train-seg/exp/weights/last.pt, 15.6MB\n",
|
||||
"Optimizer stripped from runs/train-seg/exp/weights/best.pt, 15.6MB\n",
|
||||
"\n",
|
||||
"Validating runs/train-seg/exp/weights/best.pt...\n",
|
||||
"Fusing layers... \n",
|
||||
"Model summary: 165 layers, 7611485 parameters, 0 gradients, 26.4 GFLOPs\n",
|
||||
" Class Images Instances Box(P R mAP50 mAP50-95) Mask(P R mAP50 mAP50-95): 100% 4/4 [00:06<00:00, 1.59s/it]\n",
|
||||
" all 128 929 0.738 0.694 0.746 0.522 0.759 0.625 0.682 0.426\n",
|
||||
" person 128 254 0.845 0.756 0.836 0.55 0.861 0.669 0.759 0.407\n",
|
||||
" bicycle 128 6 0.475 0.333 0.549 0.341 0.711 0.333 0.526 0.322\n",
|
||||
" car 128 46 0.612 0.565 0.539 0.257 0.555 0.435 0.477 0.171\n",
|
||||
" motorcycle 128 5 0.73 0.8 0.752 0.571 0.747 0.8 0.752 0.42\n",
|
||||
" airplane 128 6 1 0.943 0.995 0.732 0.92 0.833 0.839 0.555\n",
|
||||
" bus 128 7 0.677 0.714 0.722 0.653 0.711 0.714 0.722 0.593\n",
|
||||
" train 128 3 1 0.951 0.995 0.551 1 0.884 0.995 0.781\n",
|
||||
" truck 128 12 0.555 0.417 0.457 0.285 0.624 0.417 0.397 0.277\n",
|
||||
" boat 128 6 0.624 0.5 0.584 0.186 1 0.326 0.412 0.133\n",
|
||||
" traffic light 128 14 0.513 0.302 0.411 0.247 0.435 0.214 0.376 0.251\n",
|
||||
" stop sign 128 2 0.824 1 0.995 0.796 0.906 1 0.995 0.747\n",
|
||||
" bench 128 9 0.75 0.667 0.763 0.367 0.724 0.585 0.698 0.209\n",
|
||||
" bird 128 16 0.961 1 0.995 0.686 0.918 0.938 0.91 0.525\n",
|
||||
" cat 128 4 0.771 0.857 0.945 0.752 0.76 0.8 0.945 0.728\n",
|
||||
" dog 128 9 0.987 0.778 0.963 0.681 1 0.705 0.89 0.574\n",
|
||||
" horse 128 2 0.703 1 0.995 0.697 0.759 1 0.995 0.249\n",
|
||||
" elephant 128 17 0.916 0.882 0.93 0.691 0.811 0.765 0.829 0.537\n",
|
||||
" bear 128 1 0.664 1 0.995 0.995 0.701 1 0.995 0.895\n",
|
||||
" zebra 128 4 0.864 1 0.995 0.921 0.879 1 0.995 0.804\n",
|
||||
" giraffe 128 9 0.883 0.889 0.94 0.683 0.845 0.778 0.78 0.463\n",
|
||||
" backpack 128 6 1 0.59 0.701 0.372 1 0.474 0.52 0.252\n",
|
||||
" umbrella 128 18 0.654 0.839 0.887 0.52 0.517 0.556 0.427 0.229\n",
|
||||
" handbag 128 19 0.54 0.211 0.408 0.221 0.796 0.206 0.396 0.196\n",
|
||||
" tie 128 7 0.864 0.857 0.857 0.577 0.925 0.857 0.857 0.534\n",
|
||||
" suitcase 128 4 0.716 1 0.945 0.647 0.767 1 0.945 0.634\n",
|
||||
" frisbee 128 5 0.708 0.8 0.761 0.643 0.737 0.8 0.761 0.501\n",
|
||||
" skis 128 1 0.691 1 0.995 0.796 0.761 1 0.995 0.199\n",
|
||||
" snowboard 128 7 0.918 0.857 0.904 0.604 0.32 0.286 0.235 0.137\n",
|
||||
" sports ball 128 6 0.902 0.667 0.701 0.466 0.727 0.5 0.497 0.471\n",
|
||||
" kite 128 10 0.586 0.4 0.511 0.231 0.663 0.394 0.417 0.139\n",
|
||||
" baseball bat 128 4 0.359 0.5 0.401 0.169 0.631 0.5 0.526 0.133\n",
|
||||
" baseball glove 128 7 1 0.519 0.58 0.327 0.687 0.286 0.455 0.328\n",
|
||||
" skateboard 128 5 0.729 0.8 0.862 0.631 0.599 0.6 0.604 0.379\n",
|
||||
" tennis racket 128 7 0.57 0.714 0.645 0.448 0.608 0.714 0.645 0.412\n",
|
||||
" bottle 128 18 0.469 0.393 0.537 0.357 0.661 0.389 0.543 0.349\n",
|
||||
" wine glass 128 16 0.677 0.938 0.866 0.441 0.53 0.625 0.67 0.334\n",
|
||||
" cup 128 36 0.777 0.722 0.812 0.466 0.725 0.583 0.762 0.467\n",
|
||||
" fork 128 6 0.948 0.333 0.425 0.27 0.527 0.167 0.18 0.102\n",
|
||||
" knife 128 16 0.757 0.587 0.669 0.458 0.79 0.5 0.552 0.34\n",
|
||||
" spoon 128 22 0.74 0.364 0.559 0.269 0.925 0.364 0.513 0.213\n",
|
||||
" bowl 128 28 0.766 0.714 0.725 0.559 0.803 0.584 0.665 0.353\n",
|
||||
" banana 128 1 0.408 1 0.995 0.398 0.539 1 0.995 0.497\n",
|
||||
" sandwich 128 2 1 0 0.695 0.536 1 0 0.498 0.448\n",
|
||||
" orange 128 4 0.467 1 0.995 0.693 0.518 1 0.995 0.663\n",
|
||||
" broccoli 128 11 0.462 0.455 0.383 0.259 0.548 0.455 0.384 0.256\n",
|
||||
" carrot 128 24 0.631 0.875 0.77 0.533 0.757 0.909 0.853 0.499\n",
|
||||
" hot dog 128 2 0.555 1 0.995 0.995 0.578 1 0.995 0.796\n",
|
||||
" pizza 128 5 0.89 0.8 0.962 0.796 1 0.778 0.962 0.766\n",
|
||||
" donut 128 14 0.695 1 0.893 0.772 0.704 1 0.893 0.696\n",
|
||||
" cake 128 4 0.826 1 0.995 0.92 0.862 1 0.995 0.846\n",
|
||||
" chair 128 35 0.53 0.571 0.613 0.336 0.67 0.6 0.538 0.271\n",
|
||||
" couch 128 6 0.972 0.667 0.833 0.627 1 0.62 0.696 0.394\n",
|
||||
" potted plant 128 14 0.7 0.857 0.883 0.552 0.836 0.857 0.883 0.473\n",
|
||||
" bed 128 3 0.979 0.667 0.83 0.366 1 0 0.83 0.373\n",
|
||||
" dining table 128 13 0.775 0.308 0.505 0.364 0.644 0.231 0.25 0.0804\n",
|
||||
" toilet 128 2 0.836 1 0.995 0.846 0.887 1 0.995 0.797\n",
|
||||
" tv 128 2 0.6 1 0.995 0.846 0.655 1 0.995 0.896\n",
|
||||
" laptop 128 3 0.822 0.333 0.445 0.307 1 0 0.392 0.12\n",
|
||||
" mouse 128 2 1 0 0 0 1 0 0 0\n",
|
||||
" remote 128 8 0.745 0.5 0.62 0.459 0.821 0.5 0.624 0.449\n",
|
||||
" cell phone 128 8 0.686 0.375 0.502 0.272 0.488 0.25 0.28 0.132\n",
|
||||
" microwave 128 3 0.831 1 0.995 0.722 0.867 1 0.995 0.592\n",
|
||||
" oven 128 5 0.439 0.4 0.435 0.294 0.823 0.6 0.645 0.418\n",
|
||||
" sink 128 6 0.677 0.5 0.565 0.448 0.722 0.5 0.46 0.362\n",
|
||||
" refrigerator 128 5 0.533 0.8 0.783 0.524 0.558 0.8 0.783 0.527\n",
|
||||
" book 128 29 0.732 0.379 0.423 0.196 0.69 0.207 0.38 0.131\n",
|
||||
" clock 128 9 0.889 0.778 0.917 0.677 0.908 0.778 0.875 0.604\n",
|
||||
" vase 128 2 0.375 1 0.995 0.995 0.455 1 0.995 0.796\n",
|
||||
" scissors 128 1 1 0 0.0166 0.00166 1 0 0 0\n",
|
||||
" teddy bear 128 21 0.813 0.829 0.841 0.457 0.826 0.678 0.786 0.422\n",
|
||||
" toothbrush 128 5 0.806 1 0.995 0.733 0.991 1 0.995 0.628\n",
|
||||
"Results saved to \u001b[1mruns/train-seg/exp\u001b[0m\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Train YOLOv5s on COCO128 for 3 epochs\n",
|
||||
"!python segment/train.py --img 640 --batch 16 --epochs 3 --data coco128-seg.yaml --weights yolov5s-seg.pt --cache"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "15glLzbQx5u0"
|
||||
},
|
||||
"source": [
|
||||
"# 4. Visualize"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "nWOsI5wJR1o3"
|
||||
},
|
||||
"source": [
|
||||
"## Comet Logging and Visualization 🌟 NEW\n",
|
||||
"\n",
|
||||
"[Comet](https://www.comet.com/site/lp/yolov5-with-comet/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://www.comet.com/docs/v2/guides/comet-dashboard/code-panels/about-panels/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes!\n",
|
||||
"\n",
|
||||
"Getting started is easy:\n",
|
||||
"```shell\n",
|
||||
"pip install comet_ml # 1. install\n",
|
||||
"export COMET_API_KEY=<Your API Key> # 2. paste API key\n",
|
||||
"python train.py --img 640 --epochs 3 --data coco128.yaml --weights yolov5s.pt # 3. train\n",
|
||||
"```\n",
|
||||
"To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://docs.ultralytics.com/yolov5/tutorials/comet_logging_integration). If you'd like to learn more about Comet, head over to our [documentation](https://www.comet.com/docs/v2/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab). Get started by trying out the Comet Colab Notebook:\n",
|
||||
"[](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing)\n",
|
||||
"\n",
|
||||
"<a href=\"https://bit.ly/yolov5-readme-comet2\">\n",
|
||||
"<img alt=\"Comet Dashboard\" src=\"https://user-images.githubusercontent.com/26833433/202851203-164e94e1-2238-46dd-91f8-de020e9d6b41.png\" width=\"1280\"/></a>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "Lay2WsTjNJzP"
|
||||
},
|
||||
"source": [
|
||||
"## ClearML Logging and Automation 🌟 NEW\n",
|
||||
"\n",
|
||||
"[ClearML](https://cutt.ly/yolov5-notebook-clearml) is completely integrated into YOLOv5 to track your experimentation, manage dataset versions and even remotely execute training runs. To enable ClearML (check cells above):\n",
|
||||
"\n",
|
||||
"- `pip install clearml`\n",
|
||||
"- run `clearml-init` to connect to a ClearML server (**deploy your own [open-source server](https://github.com/allegroai/clearml-server)**, or use our [free hosted server](https://cutt.ly/yolov5-notebook-clearml))\n",
|
||||
"\n",
|
||||
"You'll get all the great expected features from an experiment manager: live updates, model upload, experiment comparison etc. but ClearML also tracks uncommitted changes and installed packages for example. Thanks to that ClearML Tasks (which is what we call experiments) are also reproducible on different machines! With only 1 extra line, we can schedule a YOLOv5 training task on a queue to be executed by any number of ClearML Agents (workers).\n",
|
||||
"\n",
|
||||
"You can use ClearML Data to version your dataset and then pass it to YOLOv5 simply using its unique ID. This will help you keep track of your data without adding extra hassle. Explore the [ClearML Tutorial](https://docs.ultralytics.com/yolov5/tutorials/clearml_logging_integration) for details!\n",
|
||||
"\n",
|
||||
"<a href=\"https://cutt.ly/yolov5-notebook-clearml\">\n",
|
||||
"<img alt=\"ClearML Experiment Management UI\" src=\"https://github.com/thepycoder/clearml_screenshots/raw/main/scalars.jpg\" width=\"1280\"/></a>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "-WPvRbS5Swl6"
|
||||
},
|
||||
"source": [
|
||||
"## Local Logging\n",
|
||||
"\n",
|
||||
"Training results are automatically logged with [Tensorboard](https://www.tensorflow.org/tensorboard) and [CSV](https://github.com/ultralytics/yolov5/pull/4148) loggers to `runs/train`, with a new experiment directory created for each new training as `runs/train/exp2`, `runs/train/exp3`, etc.\n",
|
||||
"\n",
|
||||
"This directory contains train and val statistics, mosaics, labels, predictions and augmentated mosaics, as well as metrics and charts including precision-recall (PR) curves and confusion matrices. \n",
|
||||
"\n",
|
||||
"<img alt=\"Local logging results\" src=\"https://user-images.githubusercontent.com/26833433/183222430-e1abd1b7-782c-4cde-b04d-ad52926bf818.jpg\" width=\"1280\"/>\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "Zelyeqbyt3GD"
|
||||
},
|
||||
"source": [
|
||||
"# Environments\n",
|
||||
"\n",
|
||||
"YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled):\n",
|
||||
"\n",
|
||||
"- **Notebooks** with free GPU: <a href=\"https://bit.ly/yolov5-paperspace-notebook\"><img src=\"https://assets.paperspace.io/img/gradient-badge.svg\" alt=\"Run on Gradient\"></a> <a href=\"https://colab.research.google.com/github/ultralytics/yolov5/blob/master/tutorial.ipynb\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"></a> <a href=\"https://www.kaggle.com/ultralytics/yolov5\"><img src=\"https://kaggle.com/static/images/open-in-kaggle.svg\" alt=\"Open In Kaggle\"></a>\n",
|
||||
"- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/google_cloud_quickstart_tutorial/)\n",
|
||||
"- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/aws_quickstart_tutorial/)\n",
|
||||
"- **Docker Image**. See [Docker Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/docker_image_quickstart_tutorial/) <a href=\"https://hub.docker.com/r/ultralytics/yolov5\"><img src=\"https://img.shields.io/docker/pulls/ultralytics/yolov5?logo=docker\" alt=\"Docker Pulls\"></a>\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "6Qu7Iesl0p54"
|
||||
},
|
||||
"source": [
|
||||
"# Status\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), testing ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on macOS, Windows, and Ubuntu every 24 hours and on every commit.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "IEijrePND_2I"
|
||||
},
|
||||
"source": [
|
||||
"# Appendix\n",
|
||||
"\n",
|
||||
"Additional content below."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "GMusP4OAxFu6"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# YOLOv5 PyTorch HUB Inference (DetectionModels only)\n",
|
||||
"import torch\n",
|
||||
"\n",
|
||||
"model = torch.hub.load('ultralytics/yolov5', 'yolov5s-seg') # yolov5n - yolov5x6 or custom\n",
|
||||
"im = 'https://ultralytics.com/images/zidane.jpg' # file, Path, PIL.Image, OpenCV, nparray, list\n",
|
||||
"results = model(im) # inference\n",
|
||||
"results.print() # or .show(), .save(), .crop(), .pandas(), etc."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"accelerator": "GPU",
|
||||
"colab": {
|
||||
"name": "YOLOv5 Segmentation Tutorial",
|
||||
"provenance": [],
|
||||
"toc_visible": true
|
||||
},
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.7.12"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0
|
||||
}
|
473
ayf_ocr/yolo_databases/api/segment/val.py
Normal file
@ -0,0 +1,473 @@
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
"""
|
||||
Validate a trained YOLOv5 segment model on a segment dataset
|
||||
|
||||
Usage:
|
||||
$ bash data/scripts/get_coco.sh --val --segments # download COCO-segments val split (1G, 5000 images)
|
||||
$ python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 # validate COCO-segments
|
||||
|
||||
Usage - formats:
|
||||
$ python segment/val.py --weights yolov5s-seg.pt # PyTorch
|
||||
yolov5s-seg.torchscript # TorchScript
|
||||
yolov5s-seg.onnx # ONNX Runtime or OpenCV DNN with --dnn
|
||||
yolov5s-seg_openvino_label # OpenVINO
|
||||
yolov5s-seg.engine # TensorRT
|
||||
yolov5s-seg.mlmodel # CoreML (macOS-only)
|
||||
yolov5s-seg_saved_model # TensorFlow SavedModel
|
||||
yolov5s-seg.pb # TensorFlow GraphDef
|
||||
yolov5s-seg.tflite # TensorFlow Lite
|
||||
yolov5s-seg_edgetpu.tflite # TensorFlow Edge TPU
|
||||
yolov5s-seg_paddle_model # PaddlePaddle
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
from multiprocessing.pool import ThreadPool
|
||||
from pathlib import Path
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
from tqdm import tqdm
|
||||
|
||||
FILE = Path(__file__).resolve()
|
||||
ROOT = FILE.parents[1] # YOLOv5 root directory
|
||||
if str(ROOT) not in sys.path:
|
||||
sys.path.append(str(ROOT)) # add ROOT to PATH
|
||||
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
|
||||
|
||||
import torch.nn.functional as F
|
||||
|
||||
from models.common import DetectMultiBackend
|
||||
from models.yolo import SegmentationModel
|
||||
from utils.callbacks import Callbacks
|
||||
from utils.general import (LOGGER, NUM_THREADS, TQDM_BAR_FORMAT, Profile, check_dataset, check_img_size,
|
||||
check_requirements, check_yaml, coco80_to_coco91_class, colorstr, increment_path,
|
||||
non_max_suppression, print_args, scale_boxes, xywh2xyxy, xyxy2xywh)
|
||||
from utils.metrics import ConfusionMatrix, box_iou
|
||||
from utils.plots import output_to_target, plot_val_study
|
||||
from utils.segment.dataloaders import create_dataloader
|
||||
from utils.segment.general import mask_iou, process_mask, process_mask_native, scale_image
|
||||
from utils.segment.metrics import Metrics, ap_per_class_box_and_mask
|
||||
from utils.segment.plots import plot_images_and_masks
|
||||
from utils.torch_utils import de_parallel, select_device, smart_inference_mode
|
||||
|
||||
|
||||
def save_one_txt(predn, save_conf, shape, file):
|
||||
# Save one txt result
|
||||
gn = torch.tensor(shape)[[1, 0, 1, 0]] # normalization gain whwh
|
||||
for *xyxy, conf, cls in predn.tolist():
|
||||
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
|
||||
line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format
|
||||
with open(file, 'a') as f:
|
||||
f.write(('%g ' * len(line)).rstrip() % line + '\n')
|
||||
|
||||
|
||||
def save_one_json(predn, jdict, path, class_map, pred_masks):
|
||||
# Save one JSON result {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}
|
||||
from pycocotools.mask import encode
|
||||
|
||||
def single_encode(x):
|
||||
rle = encode(np.asarray(x[:, :, None], order='F', dtype='uint8'))[0]
|
||||
rle['counts'] = rle['counts'].decode('utf-8')
|
||||
return rle
|
||||
|
||||
image_id = int(path.stem) if path.stem.isnumeric() else path.stem
|
||||
box = xyxy2xywh(predn[:, :4]) # xywh
|
||||
box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner
|
||||
pred_masks = np.transpose(pred_masks, (2, 0, 1))
|
||||
with ThreadPool(NUM_THREADS) as pool:
|
||||
rles = pool.map(single_encode, pred_masks)
|
||||
for i, (p, b) in enumerate(zip(predn.tolist(), box.tolist())):
|
||||
jdict.append({
|
||||
'image_id': image_id,
|
||||
'category_id': class_map[int(p[5])],
|
||||
'bbox': [round(x, 3) for x in b],
|
||||
'score': round(p[4], 5),
|
||||
'segmentation': rles[i]})
|
||||
|
||||
|
||||
def process_batch(detections, labels, iouv, pred_masks=None, gt_masks=None, overlap=False, masks=False):
|
||||
"""
|
||||
Return correct prediction matrix
|
||||
Arguments:
|
||||
detections (array[N, 6]), x1, y1, x2, y2, conf, class
|
||||
labels (array[M, 5]), class, x1, y1, x2, y2
|
||||
Returns:
|
||||
correct (array[N, 10]), for 10 IoU levels
|
||||
"""
|
||||
if masks:
|
||||
if overlap:
|
||||
nl = len(labels)
|
||||
index = torch.arange(nl, device=gt_masks.device).view(nl, 1, 1) + 1
|
||||
gt_masks = gt_masks.repeat(nl, 1, 1) # shape(1,640,640) -> (n,640,640)
|
||||
gt_masks = torch.where(gt_masks == index, 1.0, 0.0)
|
||||
if gt_masks.shape[1:] != pred_masks.shape[1:]:
|
||||
gt_masks = F.interpolate(gt_masks[None], pred_masks.shape[1:], mode='bilinear', align_corners=False)[0]
|
||||
gt_masks = gt_masks.gt_(0.5)
|
||||
iou = mask_iou(gt_masks.view(gt_masks.shape[0], -1), pred_masks.view(pred_masks.shape[0], -1))
|
||||
else: # boxes
|
||||
iou = box_iou(labels[:, 1:], detections[:, :4])
|
||||
|
||||
correct = np.zeros((detections.shape[0], iouv.shape[0])).astype(bool)
|
||||
correct_class = labels[:, 0:1] == detections[:, 5]
|
||||
for i in range(len(iouv)):
|
||||
x = torch.where((iou >= iouv[i]) & correct_class) # IoU > threshold and classes match
|
||||
if x[0].shape[0]:
|
||||
matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() # [label, detect, iou]
|
||||
if x[0].shape[0] > 1:
|
||||
matches = matches[matches[:, 2].argsort()[::-1]]
|
||||
matches = matches[np.unique(matches[:, 1], return_index=True)[1]]
|
||||
# matches = matches[matches[:, 2].argsort()[::-1]]
|
||||
matches = matches[np.unique(matches[:, 0], return_index=True)[1]]
|
||||
correct[matches[:, 1].astype(int), i] = True
|
||||
return torch.tensor(correct, dtype=torch.bool, device=iouv.device)
|
||||
|
||||
|
||||
@smart_inference_mode()
|
||||
def run(
|
||||
data,
|
||||
weights=None, # model.pt path(s)
|
||||
batch_size=32, # batch size
|
||||
imgsz=640, # inference size (pixels)
|
||||
conf_thres=0.001, # confidence threshold
|
||||
iou_thres=0.6, # NMS IoU threshold
|
||||
max_det=300, # maximum detections per image
|
||||
task='val', # train, val, test, speed or study
|
||||
device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
|
||||
workers=8, # max dataloader workers (per RANK in DDP mode)
|
||||
single_cls=False, # treat as single-class dataset
|
||||
augment=False, # augmented inference
|
||||
verbose=False, # verbose output
|
||||
save_txt=False, # save results to *.txt
|
||||
save_hybrid=False, # save label+prediction hybrid results to *.txt
|
||||
save_conf=False, # save confidences in --save-txt labels
|
||||
save_json=False, # save a COCO-JSON results file
|
||||
project=ROOT / 'runs/val-seg', # save to project/name
|
||||
name='exp', # save to project/name
|
||||
exist_ok=False, # existing project/name ok, do not increment
|
||||
half=True, # use FP16 half-precision inference
|
||||
dnn=False, # use OpenCV DNN for ONNX inference
|
||||
model=None,
|
||||
dataloader=None,
|
||||
save_dir=Path(''),
|
||||
plots=True,
|
||||
overlap=False,
|
||||
mask_downsample_ratio=1,
|
||||
compute_loss=None,
|
||||
callbacks=Callbacks(),
|
||||
):
|
||||
if save_json:
|
||||
check_requirements('pycocotools>=2.0.6')
|
||||
process = process_mask_native # more accurate
|
||||
else:
|
||||
process = process_mask # faster
|
||||
|
||||
# Initialize/load model and set device
|
||||
training = model is not None
|
||||
if training: # called by train.py
|
||||
device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model
|
||||
half &= device.type != 'cpu' # half precision only supported on CUDA
|
||||
model.half() if half else model.float()
|
||||
nm = de_parallel(model).model[-1].nm # number of masks
|
||||
else: # called directly
|
||||
device = select_device(device, batch_size=batch_size)
|
||||
|
||||
# Directories
|
||||
save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run
|
||||
(save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
|
||||
|
||||
# Load model
|
||||
model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half)
|
||||
stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine
|
||||
imgsz = check_img_size(imgsz, s=stride) # check image size
|
||||
half = model.fp16 # FP16 supported on limited backends with CUDA
|
||||
nm = de_parallel(model).model.model[-1].nm if isinstance(model, SegmentationModel) else 32 # number of masks
|
||||
if engine:
|
||||
batch_size = model.batch_size
|
||||
else:
|
||||
device = model.device
|
||||
if not (pt or jit):
|
||||
batch_size = 1 # export.py models default to batch-size 1
|
||||
LOGGER.info(f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models')
|
||||
|
||||
# Data
|
||||
data = check_dataset(data) # check
|
||||
|
||||
# Configure
|
||||
model.eval()
|
||||
cuda = device.type != 'cpu'
|
||||
is_coco = isinstance(data.get('val'), str) and data['val'].endswith(f'coco{os.sep}val2017.txt') # COCO dataset
|
||||
nc = 1 if single_cls else int(data['nc']) # number of classes
|
||||
iouv = torch.linspace(0.5, 0.95, 10, device=device) # iou vector for mAP@0.5:0.95
|
||||
niou = iouv.numel()
|
||||
|
||||
# Dataloader
|
||||
if not training:
|
||||
if pt and not single_cls: # check --weights are trained on --data
|
||||
ncm = model.model.nc
|
||||
assert ncm == nc, f'{weights} ({ncm} classes) trained on different --data than what you passed ({nc} ' \
|
||||
f'classes). Pass correct combination of --weights and --data that are trained together.'
|
||||
model.warmup(imgsz=(1 if pt else batch_size, 3, imgsz, imgsz)) # warmup
|
||||
pad, rect = (0.0, False) if task == 'speed' else (0.5, pt) # square inference for benchmarks
|
||||
task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images
|
||||
dataloader = create_dataloader(data[task],
|
||||
imgsz,
|
||||
batch_size,
|
||||
stride,
|
||||
single_cls,
|
||||
pad=pad,
|
||||
rect=rect,
|
||||
workers=workers,
|
||||
prefix=colorstr(f'{task}: '),
|
||||
overlap_mask=overlap,
|
||||
mask_downsample_ratio=mask_downsample_ratio)[0]
|
||||
|
||||
seen = 0
|
||||
confusion_matrix = ConfusionMatrix(nc=nc)
|
||||
names = model.names if hasattr(model, 'names') else model.module.names # get class names
|
||||
if isinstance(names, (list, tuple)): # old format
|
||||
names = dict(enumerate(names))
|
||||
class_map = coco80_to_coco91_class() if is_coco else list(range(1000))
|
||||
s = ('%22s' + '%11s' * 10) % ('Class', 'Images', 'Instances', 'Box(P', 'R', 'mAP50', 'mAP50-95)', 'Mask(P', 'R',
|
||||
'mAP50', 'mAP50-95)')
|
||||
dt = Profile(), Profile(), Profile()
|
||||
metrics = Metrics()
|
||||
loss = torch.zeros(4, device=device)
|
||||
jdict, stats = [], []
|
||||
# callbacks.run('on_val_start')
|
||||
pbar = tqdm(dataloader, desc=s, bar_format=TQDM_BAR_FORMAT) # progress bar
|
||||
for batch_i, (im, targets, paths, shapes, masks) in enumerate(pbar):
|
||||
# callbacks.run('on_val_batch_start')
|
||||
with dt[0]:
|
||||
if cuda:
|
||||
im = im.to(device, non_blocking=True)
|
||||
targets = targets.to(device)
|
||||
masks = masks.to(device)
|
||||
masks = masks.float()
|
||||
im = im.half() if half else im.float() # uint8 to fp16/32
|
||||
im /= 255 # 0 - 255 to 0.0 - 1.0
|
||||
nb, _, height, width = im.shape # batch size, channels, height, width
|
||||
|
||||
# Inference
|
||||
with dt[1]:
|
||||
preds, protos, train_out = model(im) if compute_loss else (*model(im, augment=augment)[:2], None)
|
||||
|
||||
# Loss
|
||||
if compute_loss:
|
||||
loss += compute_loss((train_out, protos), targets, masks)[1] # box, obj, cls
|
||||
|
||||
# NMS
|
||||
targets[:, 2:] *= torch.tensor((width, height, width, height), device=device) # to pixels
|
||||
lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling
|
||||
with dt[2]:
|
||||
preds = non_max_suppression(preds,
|
||||
conf_thres,
|
||||
iou_thres,
|
||||
labels=lb,
|
||||
multi_label=True,
|
||||
agnostic=single_cls,
|
||||
max_det=max_det,
|
||||
nm=nm)
|
||||
|
||||
# Metrics
|
||||
plot_masks = [] # masks for plotting
|
||||
for si, (pred, proto) in enumerate(zip(preds, protos)):
|
||||
labels = targets[targets[:, 0] == si, 1:]
|
||||
nl, npr = labels.shape[0], pred.shape[0] # number of labels, predictions
|
||||
path, shape = Path(paths[si]), shapes[si][0]
|
||||
correct_masks = torch.zeros(npr, niou, dtype=torch.bool, device=device) # init
|
||||
correct_bboxes = torch.zeros(npr, niou, dtype=torch.bool, device=device) # init
|
||||
seen += 1
|
||||
|
||||
if npr == 0:
|
||||
if nl:
|
||||
stats.append((correct_masks, correct_bboxes, *torch.zeros((2, 0), device=device), labels[:, 0]))
|
||||
if plots:
|
||||
confusion_matrix.process_batch(detections=None, labels=labels[:, 0])
|
||||
continue
|
||||
|
||||
# Masks
|
||||
midx = [si] if overlap else targets[:, 0] == si
|
||||
gt_masks = masks[midx]
|
||||
pred_masks = process(proto, pred[:, 6:], pred[:, :4], shape=im[si].shape[1:])
|
||||
|
||||
# Predictions
|
||||
if single_cls:
|
||||
pred[:, 5] = 0
|
||||
predn = pred.clone()
|
||||
scale_boxes(im[si].shape[1:], predn[:, :4], shape, shapes[si][1]) # native-space pred
|
||||
|
||||
# Evaluate
|
||||
if nl:
|
||||
tbox = xywh2xyxy(labels[:, 1:5]) # target boxes
|
||||
scale_boxes(im[si].shape[1:], tbox, shape, shapes[si][1]) # native-space labels
|
||||
labelsn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels
|
||||
correct_bboxes = process_batch(predn, labelsn, iouv)
|
||||
correct_masks = process_batch(predn, labelsn, iouv, pred_masks, gt_masks, overlap=overlap, masks=True)
|
||||
if plots:
|
||||
confusion_matrix.process_batch(predn, labelsn)
|
||||
stats.append((correct_masks, correct_bboxes, pred[:, 4], pred[:, 5], labels[:, 0])) # (conf, pcls, tcls)
|
||||
|
||||
pred_masks = torch.as_tensor(pred_masks, dtype=torch.uint8)
|
||||
if plots and batch_i < 3:
|
||||
plot_masks.append(pred_masks[:15]) # filter top 15 to plot
|
||||
|
||||
# Save/log
|
||||
if save_txt:
|
||||
save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / f'{path.stem}.txt')
|
||||
if save_json:
|
||||
pred_masks = scale_image(im[si].shape[1:],
|
||||
pred_masks.permute(1, 2, 0).contiguous().cpu().numpy(), shape, shapes[si][1])
|
||||
save_one_json(predn, jdict, path, class_map, pred_masks) # append to COCO-JSON dictionary
|
||||
# callbacks.run('on_val_image_end', pred, predn, path, names, im[si])
|
||||
|
||||
# Plot images
|
||||
if plots and batch_i < 3:
|
||||
if len(plot_masks):
|
||||
plot_masks = torch.cat(plot_masks, dim=0)
|
||||
plot_images_and_masks(im, targets, masks, paths, save_dir / f'val_batch{batch_i}_labels.jpg', names)
|
||||
plot_images_and_masks(im, output_to_target(preds, max_det=15), plot_masks, paths,
|
||||
save_dir / f'val_batch{batch_i}_pred.jpg', names) # pred
|
||||
|
||||
# callbacks.run('on_val_batch_end')
|
||||
|
||||
# Compute metrics
|
||||
stats = [torch.cat(x, 0).cpu().numpy() for x in zip(*stats)] # to numpy
|
||||
if len(stats) and stats[0].any():
|
||||
results = ap_per_class_box_and_mask(*stats, plot=plots, save_dir=save_dir, names=names)
|
||||
metrics.update(results)
|
||||
nt = np.bincount(stats[4].astype(int), minlength=nc) # number of targets per class
|
||||
|
||||
# Print results
|
||||
pf = '%22s' + '%11i' * 2 + '%11.3g' * 8 # print format
|
||||
LOGGER.info(pf % ('all', seen, nt.sum(), *metrics.mean_results()))
|
||||
if nt.sum() == 0:
|
||||
LOGGER.warning(f'WARNING ⚠️ no labels found in {task} set, can not compute metrics without labels')
|
||||
|
||||
# Print results per class
|
||||
if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats):
|
||||
for i, c in enumerate(metrics.ap_class_index):
|
||||
LOGGER.info(pf % (names[c], seen, nt[c], *metrics.class_result(i)))
|
||||
|
||||
# Print speeds
|
||||
t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image
|
||||
if not training:
|
||||
shape = (batch_size, 3, imgsz, imgsz)
|
||||
LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}' % t)
|
||||
|
||||
# Plots
|
||||
if plots:
|
||||
confusion_matrix.plot(save_dir=save_dir, names=list(names.values()))
|
||||
# callbacks.run('on_val_end')
|
||||
|
||||
mp_bbox, mr_bbox, map50_bbox, map_bbox, mp_mask, mr_mask, map50_mask, map_mask = metrics.mean_results()
|
||||
|
||||
# Save JSON
|
||||
if save_json and len(jdict):
|
||||
w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights
|
||||
anno_json = str(Path('../datasets/coco/annotations/instances_val2017.json')) # annotations
|
||||
pred_json = str(save_dir / f'{w}_predictions.json') # predictions
|
||||
LOGGER.info(f'\nEvaluating pycocotools mAP... saving {pred_json}...')
|
||||
with open(pred_json, 'w') as f:
|
||||
json.dump(jdict, f)
|
||||
|
||||
try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
|
||||
from pycocotools.coco import COCO
|
||||
from pycocotools.cocoeval import COCOeval
|
||||
|
||||
anno = COCO(anno_json) # init annotations api
|
||||
pred = anno.loadRes(pred_json) # init predictions api
|
||||
results = []
|
||||
for eval in COCOeval(anno, pred, 'bbox'), COCOeval(anno, pred, 'segm'):
|
||||
if is_coco:
|
||||
eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.im_files] # img ID to evaluate
|
||||
eval.evaluate()
|
||||
eval.accumulate()
|
||||
eval.summarize()
|
||||
results.extend(eval.stats[:2]) # update results (mAP@0.5:0.95, mAP@0.5)
|
||||
map_bbox, map50_bbox, map_mask, map50_mask = results
|
||||
except Exception as e:
|
||||
LOGGER.info(f'pycocotools unable to run: {e}')
|
||||
|
||||
# Return results
|
||||
model.float() # for training
|
||||
if not training:
|
||||
s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
|
||||
LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}")
|
||||
final_metric = mp_bbox, mr_bbox, map50_bbox, map_bbox, mp_mask, mr_mask, map50_mask, map_mask
|
||||
return (*final_metric, *(loss.cpu() / len(dataloader)).tolist()), metrics.get_maps(nc), t
|
||||
|
||||
|
||||
def parse_opt():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--data', type=str, default=ROOT / 'data/coco128-seg.yaml', help='dataset.yaml path')
|
||||
parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s-seg.pt', help='model path(s)')
|
||||
parser.add_argument('--batch-size', type=int, default=32, help='batch size')
|
||||
parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)')
|
||||
parser.add_argument('--conf-thres', type=float, default=0.001, help='confidence threshold')
|
||||
parser.add_argument('--iou-thres', type=float, default=0.6, help='NMS IoU threshold')
|
||||
parser.add_argument('--max-det', type=int, default=300, help='maximum detections per image')
|
||||
parser.add_argument('--task', default='val', help='train, val, test, speed or study')
|
||||
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
|
||||
parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)')
|
||||
parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset')
|
||||
parser.add_argument('--augment', action='store_true', help='augmented inference')
|
||||
parser.add_argument('--verbose', action='store_true', help='report mAP by class')
|
||||
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
|
||||
parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt')
|
||||
parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
|
||||
parser.add_argument('--save-json', action='store_true', help='save a COCO-JSON results file')
|
||||
parser.add_argument('--project', default=ROOT / 'runs/val-seg', help='save results to project/name')
|
||||
parser.add_argument('--name', default='exp', help='save to project/name')
|
||||
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
|
||||
parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
|
||||
parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference')
|
||||
opt = parser.parse_args()
|
||||
opt.data = check_yaml(opt.data) # check YAML
|
||||
# opt.save_json |= opt.data.endswith('coco.yaml')
|
||||
opt.save_txt |= opt.save_hybrid
|
||||
print_args(vars(opt))
|
||||
return opt
|
||||
|
||||
|
||||
def main(opt):
|
||||
check_requirements(ROOT / 'requirements.txt', exclude=('tensorboard', 'thop'))
|
||||
|
||||
if opt.task in ('train', 'val', 'test'): # run normally
|
||||
if opt.conf_thres > 0.001: # https://github.com/ultralytics/yolov5/issues/1466
|
||||
LOGGER.warning(f'WARNING ⚠️ confidence threshold {opt.conf_thres} > 0.001 produces invalid results')
|
||||
if opt.save_hybrid:
|
||||
LOGGER.warning('WARNING ⚠️ --save-hybrid returns high mAP from hybrid labels, not from predictions alone')
|
||||
run(**vars(opt))
|
||||
|
||||
else:
|
||||
weights = opt.weights if isinstance(opt.weights, list) else [opt.weights]
|
||||
opt.half = torch.cuda.is_available() and opt.device != 'cpu' # FP16 for fastest results
|
||||
if opt.task == 'speed': # speed benchmarks
|
||||
# python val.py --task speed --data coco.yaml --batch 1 --weights yolov5n.pt yolov5s.pt...
|
||||
opt.conf_thres, opt.iou_thres, opt.save_json = 0.25, 0.45, False
|
||||
for opt.weights in weights:
|
||||
run(**vars(opt), plots=False)
|
||||
|
||||
elif opt.task == 'study': # speed vs mAP benchmarks
|
||||
# python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n.pt yolov5s.pt...
|
||||
for opt.weights in weights:
|
||||
f = f'study_{Path(opt.data).stem}_{Path(opt.weights).stem}.txt' # filename to save to
|
||||
x, y = list(range(256, 1536 + 128, 128)), [] # x axis (image sizes), y axis
|
||||
for opt.imgsz in x: # img-size
|
||||
LOGGER.info(f'\nRunning {f} --imgsz {opt.imgsz}...')
|
||||
r, _, t = run(**vars(opt), plots=False)
|
||||
y.append(r + t) # results and times
|
||||
np.savetxt(f, y, fmt='%10.4g') # save
|
||||
subprocess.run(['zip', '-r', 'study.zip', 'study_*.txt'])
|
||||
plot_val_study(x=x) # plot
|
||||
else:
|
||||
raise NotImplementedError(f'--task {opt.task} not in ("train", "val", "test", "speed", "study")')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
opt = parse_opt()
|
||||
main(opt)
|
12
ayf_ocr/yolo_databases/api/test.py
Normal file
@ -0,0 +1,12 @@
|
||||
import base64
|
||||
import requests
|
||||
|
||||
with open('16329967796715117.jpg', 'rb') as f:
|
||||
image = base64.b64encode(f.read())
|
||||
|
||||
data = {
|
||||
'images': image
|
||||
}
|
||||
|
||||
response = requests.post('http://127.0.0.1:8888/', data=data)
|
||||
print(response.json())
|
82
ayf_ocr/yolo_databases/api/utils/__init__.py
Normal file
@ -0,0 +1,82 @@
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
"""
|
||||
utils/initialization
|
||||
"""
|
||||
|
||||
import contextlib
|
||||
import platform
|
||||
import threading
|
||||
|
||||
|
||||
def emojis(str=''):
|
||||
# Return platform-dependent emoji-safe version of string
|
||||
return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str
|
||||
|
||||
|
||||
class TryExcept(contextlib.ContextDecorator):
|
||||
# YOLOv5 TryExcept class. Usage: @TryExcept() decorator or 'with TryExcept():' context manager
|
||||
def __init__(self, msg=''):
|
||||
self.msg = msg
|
||||
|
||||
def __enter__(self):
|
||||
pass
|
||||
|
||||
def __exit__(self, exc_type, value, traceback):
|
||||
if value:
|
||||
print(emojis(f"{self.msg}{': ' if self.msg else ''}{value}"))
|
||||
return True
|
||||
|
||||
|
||||
def threaded(func):
|
||||
# Multi-threads a target function and returns thread. Usage: @threaded decorator
|
||||
def wrapper(*args, **kwargs):
|
||||
thread = threading.Thread(target=func, args=args, kwargs=kwargs, daemon=True)
|
||||
thread.start()
|
||||
return thread
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
def join_threads(verbose=False):
|
||||
# Join all daemon threads, i.e. atexit.register(lambda: join_threads())
|
||||
main_thread = threading.current_thread()
|
||||
for t in threading.enumerate():
|
||||
if t is not main_thread:
|
||||
if verbose:
|
||||
print(f'Joining thread {t.name}')
|
||||
t.join()
|
||||
|
||||
|
||||
def notebook_init(verbose=True):
|
||||
# Check system software and hardware
|
||||
print('Checking setup...')
|
||||
|
||||
import os
|
||||
import shutil
|
||||
|
||||
from utils.general import check_font, check_requirements, is_colab
|
||||
from utils.torch_utils import select_device # imports
|
||||
|
||||
check_font()
|
||||
|
||||
import psutil
|
||||
|
||||
if is_colab():
|
||||
shutil.rmtree('/content/sample_data', ignore_errors=True) # remove colab /sample_data directory
|
||||
|
||||
# System info
|
||||
display = None
|
||||
if verbose:
|
||||
gb = 1 << 30 # bytes to GiB (1024 ** 3)
|
||||
ram = psutil.virtual_memory().total
|
||||
total, used, free = shutil.disk_usage('/')
|
||||
with contextlib.suppress(Exception): # clear display if ipython is installed
|
||||
from IPython import display
|
||||
display.clear_output()
|
||||
s = f'({os.cpu_count()} CPUs, {ram / gb:.1f} GB RAM, {(total - free) / gb:.1f}/{total / gb:.1f} GB disk)'
|
||||
else:
|
||||
s = ''
|
||||
|
||||
select_device(newline=False)
|
||||
print(emojis(f'Setup complete ✅ {s}'))
|
||||
return display
|
103
ayf_ocr/yolo_databases/api/utils/activations.py
Normal file
@ -0,0 +1,103 @@
|
||||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
|
||||
"""
|
||||
Activation functions
|
||||
"""
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
|
||||
|
||||
class SiLU(nn.Module):
|
||||
# SiLU activation https://arxiv.org/pdf/1606.08415.pdf
|
||||
@staticmethod
|
||||
def forward(x):
|
||||
return x * torch.sigmoid(x)
|
||||
|
||||
|
||||
class Hardswish(nn.Module):
|
||||
# Hard-SiLU activation
|
||||
@staticmethod
|
||||
def forward(x):
|
||||
# return x * F.hardsigmoid(x) # for TorchScript and CoreML
|
||||
return x * F.hardtanh(x + 3, 0.0, 6.0) / 6.0 # for TorchScript, CoreML and ONNX
|
||||
|
||||
|
||||
class Mish(nn.Module):
|
||||
# Mish activation https://github.com/digantamisra98/Mish
|
||||
@staticmethod
|
||||
def forward(x):
|
||||
return x * F.softplus(x).tanh()
|
||||
|
||||
|
||||
class MemoryEfficientMish(nn.Module):
|
||||
# Mish activation memory-efficient
|
||||
class F(torch.autograd.Function):
|
||||
|
||||
@staticmethod
|
||||
def forward(ctx, x):
|
||||
ctx.save_for_backward(x)
|
||||
return x.mul(torch.tanh(F.softplus(x))) # x * tanh(ln(1 + exp(x)))
|
||||
|
||||
@staticmethod
|
||||
def backward(ctx, grad_output):
|
||||
x = ctx.saved_tensors[0]
|
||||
sx = torch.sigmoid(x)
|
||||
fx = F.softplus(x).tanh()
|
||||
return grad_output * (fx + x * sx * (1 - fx * fx))
|
||||
|
||||
def forward(self, x):
|
||||
return self.F.apply(x)
|
||||
|
||||
|
||||
class FReLU(nn.Module):
|
||||
# FReLU activation https://arxiv.org/abs/2007.11824
|
||||
def __init__(self, c1, k=3): # ch_in, kernel
|
||||
super().__init__()
|
||||
self.conv = nn.Conv2d(c1, c1, k, 1, 1, groups=c1, bias=False)
|
||||
self.bn = nn.BatchNorm2d(c1)
|
||||
|
||||
def forward(self, x):
|
||||
return torch.max(x, self.bn(self.conv(x)))
|
||||
|
||||
|
||||
class AconC(nn.Module):
|
||||
r""" ACON activation (activate or not)
|
||||
AconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is a learnable parameter
|
||||
according to "Activate or Not: Learning Customized Activation" <https://arxiv.org/pdf/2009.04759.pdf>.
|
||||
"""
|
||||
|
||||
def __init__(self, c1):
|
||||
super().__init__()
|
||||
self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1))
|
||||
self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1))
|
||||
self.beta = nn.Parameter(torch.ones(1, c1, 1, 1))
|
||||
|
||||
def forward(self, x):
|
||||
dpx = (self.p1 - self.p2) * x
|
||||
return dpx * torch.sigmoid(self.beta * dpx) + self.p2 * x
|
||||
|
||||
|
||||
class MetaAconC(nn.Module):
|
||||
r""" ACON activation (activate or not)
|
||||
MetaAconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is generated by a small network
|
||||
according to "Activate or Not: Learning Customized Activation" <https://arxiv.org/pdf/2009.04759.pdf>.
|
||||
"""
|
||||
|
||||
def __init__(self, c1, k=1, s=1, r=16): # ch_in, kernel, stride, r
|
||||
super().__init__()
|
||||
c2 = max(r, c1 // r)
|
||||
self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1))
|
||||
self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1))
|
||||
self.fc1 = nn.Conv2d(c1, c2, k, s, bias=True)
|
||||
self.fc2 = nn.Conv2d(c2, c1, k, s, bias=True)
|
||||
# self.bn1 = nn.BatchNorm2d(c2)
|
||||
# self.bn2 = nn.BatchNorm2d(c1)
|
||||
|
||||
def forward(self, x):
|
||||
y = x.mean(dim=2, keepdims=True).mean(dim=3, keepdims=True)
|
||||
# batch-size 1 bug/instabilities https://github.com/ultralytics/yolov5/issues/2891
|
||||
# beta = torch.sigmoid(self.bn2(self.fc2(self.bn1(self.fc1(y))))) # bug/unstable
|
||||
beta = torch.sigmoid(self.fc2(self.fc1(y))) # bug patch BN layers removed
|
||||
dpx = (self.p1 - self.p2) * x
|
||||
return dpx * torch.sigmoid(beta * dpx) + self.p2 * x
|