You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

utils.py 53 kB

3 years ago
1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270
  1. import glob
  2. import math
  3. import os
  4. import random
  5. import shutil
  6. import subprocess
  7. import time
  8. from copy import copy
  9. from pathlib import Path
  10. from sys import platform
  11. import cv2
  12. import matplotlib
  13. import matplotlib.pyplot as plt
  14. import numpy as np
  15. import torch
  16. import torch.nn as nn
  17. import torchvision
  18. import yaml
  19. from scipy.signal import butter, filtfilt
  20. import itertools
  21. from tqdm import tqdm
  22. from . import torch_utils, google_utils #  torch_utils, google_utils
  23. # Set printoptions
  24. torch.set_printoptions(linewidth=320, precision=5, profile='long')
  25. np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5
  26. matplotlib.rc('font', **{'size': 11})
  27. # Prevent OpenCV from multithreading (to use PyTorch DataLoader)
  28. cv2.setNumThreads(0)
  29. def init_seeds(seed=0):
  30. random.seed(seed)
  31. np.random.seed(seed)
  32. torch_utils.init_seeds(seed=seed)
  33. def check_git_status():
  34. # Suggest 'git pull' if repo is out of date
  35. if platform in ['linux', 'darwin']:
  36. s = subprocess.check_output('if [ -d .git ]; then git fetch && git status -uno; fi', shell=True).decode('utf-8')
  37. if 'Your branch is behind' in s:
  38. print(s[s.find('Your branch is behind'):s.find('\n\n')] + '\n')
  39. def check_img_size(img_size, s=32):
  40. # Verify img_size is a multiple of stride s
  41. new_size = make_divisible(img_size, s) # ceil gs-multiple
  42. if new_size != img_size:
  43. print('WARNING: --img-size %g must be multiple of max stride %g, updating to %g' % (img_size, s, new_size))
  44. return new_size
  45. def check_anchors(dataset, model, thr=4.0, imgsz=640):
  46. # Check anchor fit to data, recompute if necessary
  47. print('\nAnalyzing anchors... ', end='')
  48. m = model.module.model[-1] if hasattr(model, 'module') else model.model[-1] # Detect()
  49. shapes = imgsz * dataset.shapes / dataset.shapes.max(1, keepdims=True)
  50. wh = torch.tensor(np.concatenate([l[:, 3:5] * s for s, l in zip(shapes, dataset.labels)])).float() # wh
  51. def metric(k): # compute metric
  52. r = wh[:, None] / k[None]
  53. x = torch.min(r, 1. / r).min(2)[0] # ratio metric
  54. best = x.max(1)[0] # best_x
  55. return (best > 1. / thr).float().mean() #  best possible recall
  56. bpr = metric(m.anchor_grid.clone().cpu().view(-1, 2))
  57. print('Best Possible Recall (BPR) = %.4f' % bpr, end='')
  58. if bpr < 0.99: # threshold to recompute
  59. print('. Attempting to generate improved anchors, please wait...' % bpr)
  60. na = m.anchor_grid.numel() // 2 # number of anchors
  61. new_anchors = kmean_anchors(dataset, n=na, img_size=imgsz, thr=thr, gen=1000, verbose=False)
  62. new_bpr = metric(new_anchors.reshape(-1, 2))
  63. if new_bpr > bpr: # replace anchors
  64. new_anchors = torch.tensor(new_anchors, device=m.anchors.device).type_as(m.anchors)
  65. m.anchor_grid[:] = new_anchors.clone().view_as(m.anchor_grid) # for inference
  66. m.anchors[:] = new_anchors.clone().view_as(m.anchors) / m.stride.to(m.anchors.device).view(-1, 1, 1) # loss
  67. print('New anchors saved to model. Update model *.yaml to use these anchors in the future.')
  68. else:
  69. print('Original anchors better than new anchors. Proceeding with original anchors.')
  70. print('') # newline
  71. def check_file(file):
  72. # Searches for file if not found locally
  73. if os.path.isfile(file):
  74. return file
  75. else:
  76. files = glob.glob('./**/' + file, recursive=True) # find file
  77. assert len(files), 'File Not Found: %s' % file # assert file was found
  78. return files[0] # return first file if multiple found
  79. def make_divisible(x, divisor):
  80. # Returns x evenly divisble by divisor
  81. return math.ceil(x / divisor) * divisor
  82. def labels_to_class_weights(labels, nc=80):
  83. # Get class weights (inverse frequency) from training labels
  84. if labels[0] is None: # no labels loaded
  85. return torch.Tensor()
  86. labels = np.concatenate(labels, 0) # labels.shape = (866643, 5) for COCO
  87. classes = labels[:, 0].astype(np.int) # labels = [class xywh]
  88. weights = np.bincount(classes, minlength=nc) # occurences per class
  89. # Prepend gridpoint count (for uCE trianing)
  90. # gpi = ((320 / 32 * np.array([1, 2, 4])) ** 2 * 3).sum() # gridpoints per image
  91. # weights = np.hstack([gpi * len(labels) - weights.sum() * 9, weights * 9]) ** 0.5 # prepend gridpoints to start
  92. weights[weights == 0] = 1 # replace empty bins with 1
  93. weights = 1 / weights # number of targets per class
  94. weights /= weights.sum() # normalize
  95. return torch.from_numpy(weights)
  96. def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)):
  97. # Produces image weights based on class mAPs
  98. n = len(labels)
  99. class_counts = np.array([np.bincount(labels[i][:, 0].astype(np.int), minlength=nc) for i in range(n)])
  100. image_weights = (class_weights.reshape(1, nc) * class_counts).sum(1)
  101. # index = random.choices(range(n), weights=image_weights, k=1) # weight image sample
  102. return image_weights
  103. def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper)
  104. # https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/
  105. # a = np.loadtxt('data/coco.names', dtype='str', delimiter='\n')
  106. # b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\n')
  107. # x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco
  108. # x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet
  109. x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34,
  110. 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
  111. 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90]
  112. return x
  113. def xyxy2xywh(x):
  114. # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right
  115. y = torch.zeros_like(x) if isinstance(x, torch.Tensor) else np.zeros_like(x)
  116. y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center
  117. y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center
  118. y[:, 2] = x[:, 2] - x[:, 0] # width
  119. y[:, 3] = x[:, 3] - x[:, 1] # height
  120. return y
  121. def xywh2xyxy(x):
  122. # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
  123. y = torch.zeros_like(x) if isinstance(x, torch.Tensor) else np.zeros_like(x)
  124. y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x
  125. y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y
  126. y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x
  127. y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y
  128. return y
  129. def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):
  130. # Rescale coords (xyxy) from img1_shape to img0_shape
  131. if ratio_pad is None: # calculate from img0_shape
  132. gain = max(img1_shape) / max(img0_shape) # gain = old / new
  133. pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
  134. else:
  135. gain = ratio_pad[0][0]
  136. pad = ratio_pad[1]
  137. coords[:, [0, 2]] -= pad[0] # x padding
  138. coords[:, [1, 3]] -= pad[1] # y padding
  139. coords[:, :4] /= gain
  140. clip_coords(coords, img0_shape)
  141. return coords
  142. def clip_coords(boxes, img_shape):
  143. # Clip bounding xyxy bounding boxes to image shape (height, width)
  144. boxes[:, 0].clamp_(0, img_shape[1]) # x1
  145. boxes[:, 1].clamp_(0, img_shape[0]) # y1
  146. boxes[:, 2].clamp_(0, img_shape[1]) # x2
  147. boxes[:, 3].clamp_(0, img_shape[0]) # y2
  148. def ap_per_class(tp, conf, pred_cls, target_cls):
  149. """ Compute the average precision, given the recall and precision curves.
  150. Source: https://github.com/rafaelpadilla/Object-Detection-Metrics.
  151. # Arguments
  152. tp: True positives (nparray, nx1 or nx10).
  153. conf: Objectness value from 0-1 (nparray).
  154. pred_cls: Predicted object classes (nparray).
  155. target_cls: True object classes (nparray).
  156. # Returns
  157. The average precision as computed in py-faster-rcnn.
  158. """
  159. # Sort by objectness
  160. i = np.argsort(-conf)
  161. tp, conf, pred_cls = tp[i], conf[i], pred_cls[i]
  162. # Find unique classes
  163. unique_classes = np.unique(target_cls)
  164. # Create Precision-Recall curve and compute AP for each class
  165. pr_score = 0.1 # score to evaluate P and R https://github.com/ultralytics/yolov3/issues/898
  166. s = [unique_classes.shape[0], tp.shape[1]] # number class, number iou thresholds (i.e. 10 for mAP0.5...0.95)
  167. ap, p, r = np.zeros(s), np.zeros(s), np.zeros(s)
  168. for ci, c in enumerate(unique_classes):
  169. i = pred_cls == c
  170. n_gt = (target_cls == c).sum() # Number of ground truth objects
  171. n_p = i.sum() # Number of predicted objects
  172. if n_p == 0 or n_gt == 0:
  173. continue
  174. else:
  175. # Accumulate FPs and TPs
  176. fpc = (1 - tp[i]).cumsum(0)
  177. tpc = tp[i].cumsum(0)
  178. # Recall
  179. recall = tpc / (n_gt + 1e-16) # recall curve
  180. r[ci] = np.interp(-pr_score, -conf[i], recall[:, 0]) # r at pr_score, negative x, xp because xp decreases
  181. # Precision
  182. precision = tpc / (tpc + fpc) # precision curve
  183. p[ci] = np.interp(-pr_score, -conf[i], precision[:, 0]) # p at pr_score
  184. # AP from recall-precision curve
  185. for j in range(tp.shape[1]):
  186. ap[ci, j] = compute_ap(recall[:, j], precision[:, j])
  187. # Plot
  188. # fig, ax = plt.subplots(1, 1, figsize=(5, 5))
  189. # ax.plot(recall, precision)
  190. # ax.set_xlabel('Recall')
  191. # ax.set_ylabel('Precision')
  192. # ax.set_xlim(0, 1.01)
  193. # ax.set_ylim(0, 1.01)
  194. # fig.tight_layout()
  195. # fig.savefig('PR_curve.png', dpi=300)
  196. # Compute F1 score (harmonic mean of precision and recall)
  197. f1 = 2 * p * r / (p + r + 1e-16)
  198. return p, r, ap, f1, unique_classes.astype('int32')
  199. def compute_ap(recall, precision):
  200. """ Compute the average precision, given the recall and precision curves.
  201. Source: https://github.com/rbgirshick/py-faster-rcnn.
  202. # Arguments
  203. recall: The recall curve (list).
  204. precision: The precision curve (list).
  205. # Returns
  206. The average precision as computed in py-faster-rcnn.
  207. """
  208. # Append sentinel values to beginning and end
  209. mrec = np.concatenate(([0.], recall, [min(recall[-1] + 1E-3, 1.)]))
  210. mpre = np.concatenate(([0.], precision, [0.]))
  211. # Compute the precision envelope
  212. mpre = np.flip(np.maximum.accumulate(np.flip(mpre)))
  213. # Integrate area under curve
  214. method = 'interp' # methods: 'continuous', 'interp'
  215. if method == 'interp':
  216. x = np.linspace(0, 1, 101) # 101-point interp (COCO)
  217. ap = np.trapz(np.interp(x, mrec, mpre), x) # integrate
  218. else: # 'continuous'
  219. i = np.where(mrec[1:] != mrec[:-1])[0] # points where x axis (recall) changes
  220. ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) # area under curve
  221. return ap
  222. def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False):
  223. # Returns the IoU of box1 to box2. box1 is 4, box2 is nx4
  224. box2 = box2.t()
  225. # Get the coordinates of bounding boxes
  226. if x1y1x2y2: # x1, y1, x2, y2 = box1
  227. b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
  228. b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
  229. else: # transform from xywh to xyxy
  230. b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2
  231. b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2
  232. b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2
  233. b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2
  234. # Intersection area
  235. inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \
  236. (torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0)
  237. # Union Area
  238. w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1
  239. w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1
  240. union = (w1 * h1 + 1e-16) + w2 * h2 - inter
  241. iou = inter / union # iou
  242. if GIoU or DIoU or CIoU:
  243. cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width
  244. ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height
  245. if GIoU: # Generalized IoU https://arxiv.org/pdf/1902.09630.pdf
  246. c_area = cw * ch + 1e-16 # convex area
  247. return iou - (c_area - union) / c_area # GIoU
  248. if DIoU or CIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1
  249. # convex diagonal squared
  250. c2 = cw ** 2 + ch ** 2 + 1e-16
  251. # centerpoint distance squared
  252. rho2 = ((b2_x1 + b2_x2) - (b1_x1 + b1_x2)) ** 2 / 4 + ((b2_y1 + b2_y2) - (b1_y1 + b1_y2)) ** 2 / 4
  253. if DIoU:
  254. return iou - rho2 / c2 # DIoU
  255. elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47
  256. v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2)
  257. with torch.no_grad():
  258. alpha = v / (1 - iou + v)
  259. return iou - (rho2 / c2 + v * alpha) # CIoU
  260. return iou
  261. def box_iou(box1, box2):
  262. # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py
  263. """
  264. Return intersection-over-union (Jaccard index) of boxes.
  265. Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
  266. Arguments:
  267. box1 (Tensor[N, 4])
  268. box2 (Tensor[M, 4])
  269. Returns:
  270. iou (Tensor[N, M]): the NxM matrix containing the pairwise
  271. IoU values for every element in boxes1 and boxes2
  272. """
  273. def box_area(box):
  274. # box = 4xn
  275. return (box[2] - box[0]) * (box[3] - box[1])
  276. area1 = box_area(box1.t())
  277. area2 = box_area(box2.t())
  278. # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2)
  279. inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2)
  280. return inter / (area1[:, None] + area2 - inter) # iou = inter / (area1 + area2 - inter)
  281. def wh_iou(wh1, wh2):
  282. # Returns the nxm IoU matrix. wh1 is nx2, wh2 is mx2
  283. wh1 = wh1[:, None] # [N,1,2]
  284. wh2 = wh2[None] # [1,M,2]
  285. inter = torch.min(wh1, wh2).prod(2) # [N,M]
  286. return inter / (wh1.prod(2) + wh2.prod(2) - inter) # iou = inter / (area1 + area2 - inter)
  287. class FocalLoss(nn.Module):
  288. # Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5)
  289. def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
  290. super(FocalLoss, self).__init__()
  291. self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss()
  292. self.gamma = gamma
  293. self.alpha = alpha
  294. self.reduction = loss_fcn.reduction
  295. self.loss_fcn.reduction = 'none' # required to apply FL to each element
  296. def forward(self, pred, true):
  297. loss = self.loss_fcn(pred, true)
  298. # p_t = torch.exp(-loss)
  299. # loss *= self.alpha * (1.000001 - p_t) ** self.gamma # non-zero power for gradient stability
  300. # TF implementation https://github.com/tensorflow/addons/blob/v0.7.1/tensorflow_addons/losses/focal_loss.py
  301. pred_prob = torch.sigmoid(pred) # prob from logits
  302. p_t = true * pred_prob + (1 - true) * (1 - pred_prob)
  303. alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha)
  304. modulating_factor = (1.0 - p_t) ** self.gamma
  305. loss *= alpha_factor * modulating_factor
  306. if self.reduction == 'mean':
  307. return loss.mean()
  308. elif self.reduction == 'sum':
  309. return loss.sum()
  310. else: # 'none'
  311. return loss
  312. def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441
  313. # return positive, negative label smoothing BCE targets
  314. return 1.0 - 0.5 * eps, 0.5 * eps
  315. class BCEBlurWithLogitsLoss(nn.Module):
  316. # BCEwithLogitLoss() with reduced missing label effects.
  317. def __init__(self, alpha=0.05):
  318. super(BCEBlurWithLogitsLoss, self).__init__()
  319. self.loss_fcn = nn.BCEWithLogitsLoss(reduction='none') # must be nn.BCEWithLogitsLoss()
  320. self.alpha = alpha
  321. def forward(self, pred, true):
  322. loss = self.loss_fcn(pred, true)
  323. pred = torch.sigmoid(pred) # prob from logits
  324. dx = pred - true # reduce only missing label effects
  325. # dx = (pred - true).abs() # reduce missing label and false label effects
  326. alpha_factor = 1 - torch.exp((dx - 1) / (self.alpha + 1e-4))
  327. loss *= alpha_factor
  328. return loss.mean()
  329. def compute_loss(p, targets, model): # predictions, targets, model
  330. ft = torch.cuda.FloatTensor if p[0].is_cuda else torch.Tensor
  331. lcls, lbox, lobj = ft([0]), ft([0]), ft([0])
  332. tcls, tbox, indices, anchors = build_targets(p, targets, model) # targets
  333. h = model.hyp # hyperparameters
  334. red = 'mean' # Loss reduction (sum or mean)
  335. # Define criteria
  336. BCEcls = nn.BCEWithLogitsLoss(pos_weight=ft([h['cls_pw']]), reduction=red)
  337. BCEobj = nn.BCEWithLogitsLoss(pos_weight=ft([h['obj_pw']]), reduction=red)
  338. # class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3
  339. cp, cn = smooth_BCE(eps=0.0)
  340. # focal loss
  341. g = h['fl_gamma'] # focal loss gamma
  342. if g > 0:
  343. BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g)
  344. # per output
  345. nt = 0 # targets
  346. for i, pi in enumerate(p): # layer index, layer predictions
  347. b, a, gj, gi = indices[i] # image, anchor, gridy, gridx
  348. tobj = torch.zeros_like(pi[..., 0]) # target obj
  349. nb = b.shape[0] # number of targets
  350. if nb:
  351. nt += nb # cumulative targets
  352. ps = pi[b, a, gj, gi] # prediction subset corresponding to targets
  353. # GIoU
  354. pxy = ps[:, :2].sigmoid() * 2. - 0.5
  355. pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[i]
  356. pbox = torch.cat((pxy, pwh), 1) # predicted box
  357. giou = bbox_iou(pbox.t(), tbox[i], x1y1x2y2=False, GIoU=True) # giou(prediction, target)
  358. lbox += (1.0 - giou).sum() if red == 'sum' else (1.0 - giou).mean() # giou loss
  359. # Obj
  360. tobj[b, a, gj, gi] = (1.0 - model.gr) + model.gr * giou.detach().clamp(0).type(tobj.dtype) # giou ratio
  361. # Class
  362. if model.nc > 1: # cls loss (only if multiple classes)
  363. t = torch.full_like(ps[:, 5:], cn) # targets
  364. t[range(nb), tcls[i]] = cp
  365. lcls += BCEcls(ps[:, 5:], t) # BCE
  366. # Append targets to text file
  367. # with open('targets.txt', 'a') as file:
  368. # [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)]
  369. lobj += BCEobj(pi[..., 4], tobj) # obj loss
  370. lbox *= h['giou']
  371. lobj *= h['obj']
  372. lcls *= h['cls']
  373. bs = tobj.shape[0] # batch size
  374. if red == 'sum':
  375. g = 3.0 # loss gain
  376. lobj *= g / bs
  377. if nt:
  378. lcls *= g / nt / model.nc
  379. lbox *= g / nt
  380. loss = lbox + lobj + lcls
  381. return loss * bs, torch.cat((lbox, lobj, lcls, loss)).detach()
  382. def build_targets(p, targets, model):
  383. # Build targets for compute_loss(), input targets(image,class,x,y,w,h)
  384. det = model.module.model[-1] if type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel) \
  385. else model.model[-1] # Detect() module
  386. na, nt = det.na, targets.shape[0] # number of anchors, targets
  387. tcls, tbox, indices, anch = [], [], [], []
  388. gain = torch.ones(6, device=targets.device) # normalized to gridspace gain
  389. off = torch.tensor([[1, 0], [0, 1], [-1, 0], [0, -1]], device=targets.device).float() # overlap offsets
  390. at = torch.arange(na).view(na, 1).repeat(1, nt) # anchor tensor, same as .repeat_interleave(nt)
  391. style = 'rect4'
  392. for i in range(det.nl):
  393. anchors = det.anchors[i]
  394. gain[2:] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain
  395. # Match targets to anchors
  396. a, t, offsets = [], targets * gain, 0
  397. if nt:
  398. r = t[None, :, 4:6] / anchors[:, None] # wh ratio
  399. j = torch.max(r, 1. / r).max(2)[0] < model.hyp['anchor_t'] # compare
  400. # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n) = wh_iou(anchors(3,2), gwh(n,2))
  401. a, t = at[j], t.repeat(na, 1, 1)[j] # filter
  402. # overlaps
  403. gxy = t[:, 2:4] # grid xy
  404. z = torch.zeros_like(gxy)
  405. if style == 'rect2':
  406. g = 0.2 # offset
  407. j, k = ((gxy % 1. < g) & (gxy > 1.)).T
  408. a, t = torch.cat((a, a[j], a[k]), 0), torch.cat((t, t[j], t[k]), 0)
  409. offsets = torch.cat((z, z[j] + off[0], z[k] + off[1]), 0) * g
  410. elif style == 'rect4':
  411. g = 0.5 # offset
  412. j, k = ((gxy % 1. < g) & (gxy > 1.)).T
  413. l, m = ((gxy % 1. > (1 - g)) & (gxy < (gain[[2, 3]] - 1.))).T
  414. a, t = torch.cat((a, a[j], a[k], a[l], a[m]), 0), torch.cat((t, t[j], t[k], t[l], t[m]), 0)
  415. offsets = torch.cat((z, z[j] + off[0], z[k] + off[1], z[l] + off[2], z[m] + off[3]), 0) * g
  416. # Define
  417. b, c = t[:, :2].long().T # image, class
  418. gxy = t[:, 2:4] # grid xy
  419. gwh = t[:, 4:6] # grid wh
  420. gij = (gxy - offsets).long()
  421. gi, gj = gij.T # grid xy indices
  422. # Append
  423. indices.append((b, a, gj, gi)) # image, anchor, grid indices
  424. tbox.append(torch.cat((gxy - gij, gwh), 1)) # box
  425. anch.append(anchors[a]) # anchors
  426. tcls.append(c) # class
  427. return tcls, tbox, indices, anch
  428. def non_max_suppression(prediction, conf_thres=0.1, iou_thres=0.6, fast=False, classes=None, agnostic=False):
  429. """Performs Non-Maximum Suppression (NMS) on inference results
  430. Returns:
  431. detections with shape: nx6 (x1, y1, x2, y2, conf, cls)
  432. """
  433. if prediction.dtype is torch.float16:
  434. prediction = prediction.float() # to FP32
  435. nc = prediction[0].shape[1] - 5 # number of classes
  436. xc = prediction[..., 4] > conf_thres # candidates
  437. # Settings
  438. min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height
  439. max_det = 300 # maximum number of detections per image
  440. time_limit = 10.0 # seconds to quit after
  441. redundant = True # require redundant detections
  442. fast |= conf_thres > 0.001 # fast mode
  443. multi_label = nc > 1 # multiple labels per box (adds 0.5ms/img)
  444. if fast:
  445. merge = False
  446. else:
  447. merge = True # merge for best mAP (adds 0.5ms/img)
  448. t = time.time()
  449. output = [None] * prediction.shape[0]
  450. for xi, x in enumerate(prediction): # image index, image inference
  451. # Apply constraints
  452. # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height
  453. x = x[xc[xi]] # confidence
  454. # If none remain process next image
  455. if not x.shape[0]:
  456. continue
  457. # Compute conf
  458. x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf
  459. # Box (center x, center y, width, height) to (x1, y1, x2, y2)
  460. box = xywh2xyxy(x[:, :4])
  461. # Detections matrix nx6 (xyxy, conf, cls)
  462. if multi_label:
  463. i, j = (x[:, 5:] > conf_thres).nonzero().t()
  464. x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)
  465. else: # best class only
  466. conf, j = x[:, 5:].max(1, keepdim=True)
  467. x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]
  468. # Filter by class
  469. if classes:
  470. x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]
  471. # Apply finite constraint
  472. # if not torch.isfinite(x).all():
  473. # x = x[torch.isfinite(x).all(1)]
  474. # If none remain process next image
  475. n = x.shape[0] # number of boxes
  476. if not n:
  477. continue
  478. # Sort by confidence
  479. # x = x[x[:, 4].argsort(descending=True)]
  480. # Batched NMS
  481. c = x[:, 5:6] * (0 if agnostic else max_wh) # classes
  482. boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores
  483. i = torchvision.ops.boxes.nms(boxes, scores, iou_thres)
  484. if i.shape[0] > max_det: # limit detections
  485. i = i[:max_det]
  486. if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean)
  487. try: # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)
  488. iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix
  489. weights = iou * scores[None] # box weights
  490. x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes
  491. if redundant:
  492. i = i[iou.sum(1) > 1] # require redundancy
  493. except: # possible CUDA error https://github.com/ultralytics/yolov3/issues/1139
  494. print(x, i, x.shape, i.shape)
  495. pass
  496. output[xi] = x[i]
  497. if (time.time() - t) > time_limit:
  498. break # time limit exceeded
  499. return output
  500. def strip_optimizer(f='weights/best.pt'): # from utils.utils import *; strip_optimizer()
  501. # Strip optimizer from *.pt files for lighter files (reduced by 1/2 size)
  502. x = torch.load(f, map_location=torch.device('cpu'))
  503. x['optimizer'] = None
  504. x['model'].half() # to FP16
  505. torch.save(x, f)
  506. print('Optimizer stripped from %s' % f)
  507. def create_backbone(f='weights/best.pt', s='weights/backbone.pt'): # from utils.utils import *; create_backbone()
  508. # create backbone 's' from 'f'
  509. device = torch.device('cpu')
  510. x = torch.load(s, map_location=device)
  511. x['optimizer'] = None
  512. x['training_results'] = None
  513. x['epoch'] = -1
  514. x['model'].half() # to FP16
  515. for p in x['model'].parameters():
  516. p.requires_grad = True
  517. torch.save(x, s)
  518. print('%s modified for backbone use and saved as %s' % (f, s))
  519. def coco_class_count(path='../coco/labels/train2014/'):
  520. # Histogram of occurrences per class
  521. nc = 80 # number classes
  522. x = np.zeros(nc, dtype='int32')
  523. files = sorted(glob.glob('%s/*.*' % path))
  524. for i, file in enumerate(files):
  525. labels = np.loadtxt(file, dtype=np.float32).reshape(-1, 5)
  526. x += np.bincount(labels[:, 0].astype('int32'), minlength=nc)
  527. print(i, len(files))
  528. def coco_only_people(path='../coco/labels/train2017/'): # from utils.utils import *; coco_only_people()
  529. # Find images with only people
  530. files = sorted(glob.glob('%s/*.*' % path))
  531. for i, file in enumerate(files):
  532. labels = np.loadtxt(file, dtype=np.float32).reshape(-1, 5)
  533. if all(labels[:, 0] == 0):
  534. print(labels.shape[0], file)
  535. def crop_images_random(path='../images/', scale=0.50): # from utils.utils import *; crop_images_random()
  536. # crops images into random squares up to scale fraction
  537. # WARNING: overwrites images!
  538. for file in tqdm(sorted(glob.glob('%s/*.*' % path))):
  539. img = cv2.imread(file) # BGR
  540. if img is not None:
  541. h, w = img.shape[:2]
  542. # create random mask
  543. a = 30 # minimum size (pixels)
  544. mask_h = random.randint(a, int(max(a, h * scale))) # mask height
  545. mask_w = mask_h # mask width
  546. # box
  547. xmin = max(0, random.randint(0, w) - mask_w // 2)
  548. ymin = max(0, random.randint(0, h) - mask_h // 2)
  549. xmax = min(w, xmin + mask_w)
  550. ymax = min(h, ymin + mask_h)
  551. # apply random color mask
  552. cv2.imwrite(file, img[ymin:ymax, xmin:xmax])
  553. def coco_single_class_labels(path='../coco/labels/train2014/', label_class=43):
  554. # Makes single-class coco datasets. from utils.utils import *; coco_single_class_labels()
  555. if os.path.exists('new/'):
  556. shutil.rmtree('new/') # delete output folder
  557. os.makedirs('new/') # make new output folder
  558. os.makedirs('new/labels/')
  559. os.makedirs('new/images/')
  560. for file in tqdm(sorted(glob.glob('%s/*.*' % path))):
  561. with open(file, 'r') as f:
  562. labels = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32)
  563. i = labels[:, 0] == label_class
  564. if any(i):
  565. img_file = file.replace('labels', 'images').replace('txt', 'jpg')
  566. labels[:, 0] = 0 # reset class to 0
  567. with open('new/images.txt', 'a') as f: # add image to dataset list
  568. f.write(img_file + '\n')
  569. with open('new/labels/' + Path(file).name, 'a') as f: # write label
  570. for l in labels[i]:
  571. f.write('%g %.6f %.6f %.6f %.6f\n' % tuple(l))
  572. shutil.copyfile(src=img_file, dst='new/images/' + Path(file).name.replace('txt', 'jpg')) # copy images
  573. def kmean_anchors(path='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen=1000, verbose=True):
  574. """ Creates kmeans-evolved anchors from training dataset
  575. Arguments:
  576. path: path to dataset *.yaml, or a loaded dataset
  577. n: number of anchors
  578. img_size: image size used for training
  579. thr: anchor-label wh ratio threshold hyperparameter hyp['anchor_t'] used for training, default=4.0
  580. gen: generations to evolve anchors using genetic algorithm
  581. Return:
  582. k: kmeans evolved anchors
  583. Usage:
  584. from utils.utils import *; _ = kmean_anchors()
  585. """
  586. thr = 1. / thr
  587. def metric(k, wh): # compute metrics
  588. r = wh[:, None] / k[None]
  589. x = torch.min(r, 1. / r).min(2)[0] # ratio metric
  590. # x = wh_iou(wh, torch.tensor(k)) # iou metric
  591. return x, x.max(1)[0] # x, best_x
  592. def fitness(k): # mutation fitness
  593. _, best = metric(torch.tensor(k, dtype=torch.float32), wh)
  594. return (best * (best > thr).float()).mean() # fitness
  595. def print_results(k):
  596. k = k[np.argsort(k.prod(1))] # sort small to large
  597. x, best = metric(k, wh0)
  598. bpr, aat = (best > thr).float().mean(), (x > thr).float().mean() * n # best possible recall, anch > thr
  599. print('thr=%.2f: %.4f best possible recall, %.2f anchors past thr' % (thr, bpr, aat))
  600. print('n=%g, img_size=%s, metric_all=%.3f/%.3f-mean/best, past_thr=%.3f-mean: ' %
  601. (n, img_size, x.mean(), best.mean(), x[x > thr].mean()), end='')
  602. for i, x in enumerate(k):
  603. print('%i,%i' % (round(x[0]), round(x[1])), end=', ' if i < len(k) - 1 else '\n') # use in *.cfg
  604. return k
  605. if isinstance(path, str): # *.yaml file
  606. with open(path) as f:
  607. data_dict = yaml.load(f, Loader=yaml.FullLoader) # model dict
  608. from utils.datasets import LoadImagesAndLabels
  609. dataset = LoadImagesAndLabels(data_dict['train'], augment=True, rect=True)
  610. else:
  611. dataset = path # dataset
  612. # Get label wh
  613. shapes = img_size * dataset.shapes / dataset.shapes.max(1, keepdims=True)
  614. wh0 = np.concatenate([l[:, 3:5] * s for s, l in zip(shapes, dataset.labels)]) # wh
  615. # Filter
  616. i = (wh0 < 4.0).any(1).sum()
  617. if i:
  618. print('WARNING: Extremely small objects found. '
  619. '%g of %g labels are < 4 pixels in width or height.' % (i, len(wh0)))
  620. wh = wh0[(wh0 >= 4.0).any(1)] # filter > 2 pixels
  621. # Kmeans calculation
  622. from scipy.cluster.vq import kmeans
  623. print('Running kmeans for %g anchors on %g points...' % (n, len(wh)))
  624. s = wh.std(0) # sigmas for whitening
  625. k, dist = kmeans(wh / s, n, iter=30) # points, mean distance
  626. k *= s
  627. wh = torch.tensor(wh, dtype=torch.float32) # filtered
  628. wh0 = torch.tensor(wh0, dtype=torch.float32) # unflitered
  629. k = print_results(k)
  630. # Plot
  631. # k, d = [None] * 20, [None] * 20
  632. # for i in tqdm(range(1, 21)):
  633. # k[i-1], d[i-1] = kmeans(wh / s, i) # points, mean distance
  634. # fig, ax = plt.subplots(1, 2, figsize=(14, 7))
  635. # ax = ax.ravel()
  636. # ax[0].plot(np.arange(1, 21), np.array(d) ** 2, marker='.')
  637. # fig, ax = plt.subplots(1, 2, figsize=(14, 7)) # plot wh
  638. # ax[0].hist(wh[wh[:, 0]<100, 0],400)
  639. # ax[1].hist(wh[wh[:, 1]<100, 1],400)
  640. # fig.tight_layout()
  641. # fig.savefig('wh.png', dpi=200)
  642. # Evolve
  643. npr = np.random
  644. f, sh, mp, s = fitness(k), k.shape, 0.9, 0.1 # fitness, generations, mutation prob, sigma
  645. pbar = tqdm(range(gen), desc='Evolving anchors with Genetic Algorithm') # progress bar
  646. for _ in pbar:
  647. v = np.ones(sh)
  648. while (v == 1).all(): # mutate until a change occurs (prevent duplicates)
  649. v = ((npr.random(sh) < mp) * npr.random() * npr.randn(*sh) * s + 1).clip(0.3, 3.0)
  650. kg = (k.copy() * v).clip(min=2.0)
  651. fg = fitness(kg)
  652. if fg > f:
  653. f, k = fg, kg.copy()
  654. pbar.desc = 'Evolving anchors with Genetic Algorithm: fitness = %.4f' % f
  655. if verbose:
  656. print_results(k)
  657. return print_results(k)
  658. def print_mutation(hyp, results, bucket=''):
  659. # Print mutation results to evolve.txt (for use with train.py --evolve)
  660. a = '%10s' * len(hyp) % tuple(hyp.keys()) # hyperparam keys
  661. b = '%10.3g' * len(hyp) % tuple(hyp.values()) # hyperparam values
  662. c = '%10.4g' * len(results) % results # results (P, R, mAP, F1, test_loss)
  663. print('\n%s\n%s\nEvolved fitness: %s\n' % (a, b, c))
  664. if bucket:
  665. os.system('gsutil cp gs://%s/evolve.txt .' % bucket) # download evolve.txt
  666. with open('evolve.txt', 'a') as f: # append result
  667. f.write(c + b + '\n')
  668. x = np.unique(np.loadtxt('evolve.txt', ndmin=2), axis=0) # load unique rows
  669. np.savetxt('evolve.txt', x[np.argsort(-fitness(x))], '%10.3g') # save sort by fitness
  670. if bucket:
  671. os.system('gsutil cp evolve.txt gs://%s' % bucket) # upload evolve.txt
  672. def apply_classifier(x, model, img, im0):
  673. # applies a second stage classifier to yolo outputs
  674. im0 = [im0] if isinstance(im0, np.ndarray) else im0
  675. for i, d in enumerate(x): # per image
  676. if d is not None and len(d):
  677. d = d.clone()
  678. # Reshape and pad cutouts
  679. b = xyxy2xywh(d[:, :4]) # boxes
  680. b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # rectangle to square
  681. b[:, 2:] = b[:, 2:] * 1.3 + 30 # pad
  682. d[:, :4] = xywh2xyxy(b).long()
  683. # Rescale boxes from img_size to im0 size
  684. scale_coords(img.shape[2:], d[:, :4], im0[i].shape)
  685. # Classes
  686. pred_cls1 = d[:, 5].long()
  687. ims = []
  688. for j, a in enumerate(d): # per item
  689. cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])]
  690. im = cv2.resize(cutout, (224, 224)) # BGR
  691. # cv2.imwrite('test%i.jpg' % j, cutout)
  692. im = im[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
  693. im = np.ascontiguousarray(im, dtype=np.float32) # uint8 to float32
  694. im /= 255.0 # 0 - 255 to 0.0 - 1.0
  695. ims.append(im)
  696. pred_cls2 = model(torch.Tensor(ims).to(d.device)).argmax(1) # classifier prediction
  697. x[i] = x[i][pred_cls1 == pred_cls2] # retain matching class detections
  698. return x
  699. def fitness(x):
  700. # Returns fitness (for use with results.txt or evolve.txt)
  701. w = [0.0, 0.0, 0.1, 0.9] # weights for [P, R, mAP@0.5, mAP@0.5:0.95]
  702. return (x[:, :4] * w).sum(1)
  703. def output_to_target(output, width, height):
  704. """
  705. Convert a YOLO model output to target format
  706. [batch_id, class_id, x, y, w, h, conf]
  707. """
  708. if isinstance(output, torch.Tensor):
  709. output = output.cpu().numpy()
  710. targets = []
  711. for i, o in enumerate(output):
  712. if o is not None:
  713. for pred in o:
  714. box = pred[:4]
  715. w = (box[2] - box[0]) / width
  716. h = (box[3] - box[1]) / height
  717. x = box[0] / width + w / 2
  718. y = box[1] / height + h / 2
  719. conf = pred[4]
  720. cls = int(pred[5])
  721. targets.append([i, cls, x, y, w, h, conf])
  722. return np.array(targets)
  723. # Plotting functions ---------------------------------------------------------------------------------------------------
  724. def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5):
  725. # https://stackoverflow.com/questions/28536191/how-to-filter-smooth-with-scipy-numpy
  726. def butter_lowpass(cutoff, fs, order):
  727. nyq = 0.5 * fs
  728. normal_cutoff = cutoff / nyq
  729. b, a = butter(order, normal_cutoff, btype='low', analog=False)
  730. return b, a
  731. b, a = butter_lowpass(cutoff, fs, order=order)
  732. return filtfilt(b, a, data) # forward-backward filter
  733. def plot_one_box(x, img, color=None, label=None, line_thickness=None):
  734. # Plots one bounding box on image img
  735. tl = line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 # line/font thickness
  736. color = [255, 0, 0]
  737. c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
  738. cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)
  739. if label:
  740. tf = max(tl - 1, 1) # font thickness
  741. t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
  742. c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
  743. cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA) # filled
  744. cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)
  745. def plot_dots_on_people(x, img):
  746. # Plotting centers of people with green dot.
  747. thickness = -1;
  748. color = [0, 255, 0] # green
  749. center = ((int(x[2])+int(x[0]))//2,(int(x[3])+int(x[1]))//2)
  750. radius = 10
  751. cv2.circle(img, center, radius, color, thickness)
  752. def distancing(people_coords, img, dist_thres_lim=(200,250)):
  753. # Plot lines connecting people
  754. already_red = dict() # dictionary to store if a plotted rectangle has already been labelled as high risk
  755. centers = []
  756. for i in people_coords:
  757. centers.append(((int(i[2])+int(i[0]))//2,(int(i[3])+int(i[1]))//2))
  758. for j in centers:
  759. already_red[j] = 0
  760. x_combs = list(itertools.combinations(people_coords,2))
  761. radius = 10
  762. thickness = 5
  763. for x in x_combs:
  764. xyxy1, xyxy2 = x[0],x[1]
  765. cntr1 = ((int(xyxy1[2])+int(xyxy1[0]))//2,(int(xyxy1[3])+int(xyxy1[1]))//2)
  766. cntr2 = ((int(xyxy2[2])+int(xyxy2[0]))//2,(int(xyxy2[3])+int(xyxy2[1]))//2)
  767. dist = ((cntr2[0]-cntr1[0])**2 + (cntr2[1]-cntr1[1])**2)**0.5
  768. #print("---------------距离:{0}".format(dist))
  769. if dist > dist_thres_lim[0] and dist < dist_thres_lim[1]:
  770. color = (0, 255, 255)
  771. label = "Low Risk "
  772. cv2.line(img, cntr1, cntr2, color, thickness)
  773. if already_red[cntr1] == 0:
  774. cv2.circle(img, cntr1, radius, color, -1)
  775. if already_red[cntr2] == 0:
  776. cv2.circle(img, cntr2, radius, color, -1)
  777. # Plots one bounding box on image img
  778. tl = round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 # line/font thickness
  779. for xy in x:
  780. cntr = ((int(xy[2])+int(xy[0]))//2,(int(xy[3])+int(xy[1]))//2)
  781. if already_red[cntr] == 0:
  782. c1, c2 = (int(xy[0]), int(xy[1])), (int(xy[2]), int(xy[3]))
  783. tf = max(tl - 1, 1) # font thickness
  784. t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
  785. c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
  786. cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)
  787. cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA) # filled
  788. cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)
  789. elif dist < dist_thres_lim[0]:
  790. color = (0, 0, 255)
  791. label = "High Risk"
  792. already_red[cntr1] = 1
  793. already_red[cntr2] = 1
  794. cv2.line(img, cntr1, cntr2, color, thickness)
  795. cv2.circle(img, cntr1, radius, color, -1)
  796. cv2.circle(img, cntr2, radius, color, -1)
  797. # Plots one bounding box on image img
  798. tl = round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 # line/font thickness
  799. for xy in x:
  800. c1, c2 = (int(xy[0]), int(xy[1])), (int(xy[2]), int(xy[3]))
  801. tf = max(tl - 1, 1) # font thickness
  802. t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
  803. c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
  804. cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)
  805. cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA) # filled
  806. cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)
  807. def plot_wh_methods(): # from utils.utils import *; plot_wh_methods()
  808. # Compares the two methods for width-height anchor multiplication
  809. # https://github.com/ultralytics/yolov3/issues/168
  810. x = np.arange(-4.0, 4.0, .1)
  811. ya = np.exp(x)
  812. yb = torch.sigmoid(torch.from_numpy(x)).numpy() * 2
  813. fig = plt.figure(figsize=(6, 3), dpi=150)
  814. plt.plot(x, ya, '.-', label='yolo method')
  815. plt.plot(x, yb ** 2, '.-', label='^2 power method')
  816. plt.plot(x, yb ** 2.5, '.-', label='^2.5 power method')
  817. plt.xlim(left=-4, right=4)
  818. plt.ylim(bottom=0, top=6)
  819. plt.xlabel('input')
  820. plt.ylabel('output')
  821. plt.legend()
  822. fig.tight_layout()
  823. fig.savefig('comparison.png', dpi=200)
  824. def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=640, max_subplots=16):
  825. tl = 3 # line thickness
  826. tf = max(tl - 1, 1) # font thickness
  827. if os.path.isfile(fname): # do not overwrite
  828. return None
  829. if isinstance(images, torch.Tensor):
  830. images = images.cpu().float().numpy()
  831. if isinstance(targets, torch.Tensor):
  832. targets = targets.cpu().numpy()
  833. # un-normalise
  834. if np.max(images[0]) <= 1:
  835. images *= 255
  836. bs, _, h, w = images.shape # batch size, _, height, width
  837. bs = min(bs, max_subplots) # limit plot images
  838. ns = np.ceil(bs ** 0.5) # number of subplots (square)
  839. # Check if we should resize
  840. scale_factor = max_size / max(h, w)
  841. if scale_factor < 1:
  842. h = math.ceil(scale_factor * h)
  843. w = math.ceil(scale_factor * w)
  844. # Empty array for output
  845. mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8)
  846. # Fix class - colour map
  847. prop_cycle = plt.rcParams['axes.prop_cycle']
  848. # https://stackoverflow.com/questions/51350872/python-from-color-name-to-rgb
  849. hex2rgb = lambda h: tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4))
  850. color_lut = [hex2rgb(h) for h in prop_cycle.by_key()['color']]
  851. for i, img in enumerate(images):
  852. if i == max_subplots: # if last batch has fewer images than we expect
  853. break
  854. block_x = int(w * (i // ns))
  855. block_y = int(h * (i % ns))
  856. img = img.transpose(1, 2, 0)
  857. if scale_factor < 1:
  858. img = cv2.resize(img, (w, h))
  859. mosaic[block_y:block_y + h, block_x:block_x + w, :] = img
  860. if len(targets) > 0:
  861. image_targets = targets[targets[:, 0] == i]
  862. boxes = xywh2xyxy(image_targets[:, 2:6]).T
  863. classes = image_targets[:, 1].astype('int')
  864. gt = image_targets.shape[1] == 6 # ground truth if no conf column
  865. conf = None if gt else image_targets[:, 6] # check for confidence presence (gt vs pred)
  866. boxes[[0, 2]] *= w
  867. boxes[[0, 2]] += block_x
  868. boxes[[1, 3]] *= h
  869. boxes[[1, 3]] += block_y
  870. for j, box in enumerate(boxes.T):
  871. cls = int(classes[j])
  872. color = color_lut[cls % len(color_lut)]
  873. cls = names[cls] if names else cls
  874. if gt or conf[j] > 0.3: # 0.3 conf thresh
  875. label = '%s' % cls if gt else '%s %.1f' % (cls, conf[j])
  876. plot_one_box(box, mosaic, label=label, color=color, line_thickness=tl)
  877. # Draw image filename labels
  878. if paths is not None:
  879. label = os.path.basename(paths[i])[:40] # trim to 40 char
  880. t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
  881. cv2.putText(mosaic, label, (block_x + 5, block_y + t_size[1] + 5), 0, tl / 3, [220, 220, 220], thickness=tf,
  882. lineType=cv2.LINE_AA)
  883. # Image border
  884. cv2.rectangle(mosaic, (block_x, block_y), (block_x + w, block_y + h), (255, 255, 255), thickness=3)
  885. if fname is not None:
  886. mosaic = cv2.resize(mosaic, (int(ns * w * 0.5), int(ns * h * 0.5)), interpolation=cv2.INTER_AREA)
  887. cv2.imwrite(fname, cv2.cvtColor(mosaic, cv2.COLOR_BGR2RGB))
  888. return mosaic
  889. def plot_lr_scheduler(optimizer, scheduler, epochs=300):
  890. # Plot LR simulating training for full epochs
  891. optimizer, scheduler = copy(optimizer), copy(scheduler) # do not modify originals
  892. y = []
  893. for _ in range(epochs):
  894. scheduler.step()
  895. y.append(optimizer.param_groups[0]['lr'])
  896. plt.plot(y, '.-', label='LR')
  897. plt.xlabel('epoch')
  898. plt.ylabel('LR')
  899. plt.grid()
  900. plt.xlim(0, epochs)
  901. plt.ylim(0)
  902. plt.tight_layout()
  903. plt.savefig('LR.png', dpi=200)
  904. def plot_test_txt(): # from utils.utils import *; plot_test()
  905. # Plot test.txt histograms
  906. x = np.loadtxt('test.txt', dtype=np.float32)
  907. box = xyxy2xywh(x[:, :4])
  908. cx, cy = box[:, 0], box[:, 1]
  909. fig, ax = plt.subplots(1, 1, figsize=(6, 6), tight_layout=True)
  910. ax.hist2d(cx, cy, bins=600, cmax=10, cmin=0)
  911. ax.set_aspect('equal')
  912. plt.savefig('hist2d.png', dpi=300)
  913. fig, ax = plt.subplots(1, 2, figsize=(12, 6), tight_layout=True)
  914. ax[0].hist(cx, bins=600)
  915. ax[1].hist(cy, bins=600)
  916. plt.savefig('hist1d.png', dpi=200)
  917. def plot_targets_txt(): # from utils.utils import *; plot_targets_txt()
  918. # Plot targets.txt histograms
  919. x = np.loadtxt('targets.txt', dtype=np.float32).T
  920. s = ['x targets', 'y targets', 'width targets', 'height targets']
  921. fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)
  922. ax = ax.ravel()
  923. for i in range(4):
  924. ax[i].hist(x[i], bins=100, label='%.3g +/- %.3g' % (x[i].mean(), x[i].std()))
  925. ax[i].legend()
  926. ax[i].set_title(s[i])
  927. plt.savefig('targets.jpg', dpi=200)
  928. def plot_study_txt(f='study.txt', x=None): # from utils.utils import *; plot_study_txt()
  929. # Plot study.txt generated by test.py
  930. fig, ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True)
  931. ax = ax.ravel()
  932. fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True)
  933. for f in ['coco_study/study_coco_yolov5%s.txt' % x for x in ['s', 'm', 'l', 'x']]:
  934. y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T
  935. x = np.arange(y.shape[1]) if x is None else np.array(x)
  936. s = ['P', 'R', 'mAP@.5', 'mAP@.5:.95', 't_inference (ms/img)', 't_NMS (ms/img)', 't_total (ms/img)']
  937. for i in range(7):
  938. ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8)
  939. ax[i].set_title(s[i])
  940. j = y[3].argmax() + 1
  941. ax2.plot(y[6, :j], y[3, :j] * 1E2, '.-', linewidth=2, markersize=8,
  942. label=Path(f).stem.replace('study_coco_', '').replace('yolo', 'YOLO'))
  943. ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [33.5, 39.1, 42.5, 45.9, 49., 50.5],
  944. 'k.-', linewidth=2, markersize=8, alpha=.25, label='EfficientDet')
  945. ax2.set_xlim(0, 30)
  946. ax2.set_ylim(25, 50)
  947. ax2.set_xlabel('GPU Latency (ms)')
  948. ax2.set_ylabel('COCO AP val')
  949. ax2.legend(loc='lower right')
  950. ax2.grid()
  951. plt.savefig('study_mAP_latency.png', dpi=300)
  952. plt.savefig(f.replace('.txt', '.png'), dpi=200)
  953. def plot_labels(labels):
  954. # plot dataset labels
  955. c, b = labels[:, 0], labels[:, 1:].transpose() # classees, boxes
  956. def hist2d(x, y, n=100):
  957. xedges, yedges = np.linspace(x.min(), x.max(), n), np.linspace(y.min(), y.max(), n)
  958. hist, xedges, yedges = np.histogram2d(x, y, (xedges, yedges))
  959. xidx = np.clip(np.digitize(x, xedges) - 1, 0, hist.shape[0] - 1)
  960. yidx = np.clip(np.digitize(y, yedges) - 1, 0, hist.shape[1] - 1)
  961. return np.log(hist[xidx, yidx])
  962. fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)
  963. ax = ax.ravel()
  964. ax[0].hist(c, bins=int(c.max() + 1))
  965. ax[0].set_xlabel('classes')
  966. ax[1].scatter(b[0], b[1], c=hist2d(b[0], b[1], 90), cmap='jet')
  967. ax[1].set_xlabel('x')
  968. ax[1].set_ylabel('y')
  969. ax[2].scatter(b[2], b[3], c=hist2d(b[2], b[3], 90), cmap='jet')
  970. ax[2].set_xlabel('width')
  971. ax[2].set_ylabel('height')
  972. plt.savefig('labels.png', dpi=200)
  973. plt.close()
  974. def plot_evolution_results(hyp): # from utils.utils import *; plot_evolution_results(hyp)
  975. # Plot hyperparameter evolution results in evolve.txt
  976. x = np.loadtxt('evolve.txt', ndmin=2)
  977. f = fitness(x)
  978. # weights = (f - f.min()) ** 2 # for weighted results
  979. plt.figure(figsize=(12, 10), tight_layout=True)
  980. matplotlib.rc('font', **{'size': 8})
  981. for i, (k, v) in enumerate(hyp.items()):
  982. y = x[:, i + 7]
  983. # mu = (y * weights).sum() / weights.sum() # best weighted result
  984. mu = y[f.argmax()] # best single result
  985. plt.subplot(4, 5, i + 1)
  986. plt.plot(mu, f.max(), 'o', markersize=10)
  987. plt.plot(y, f, '.')
  988. plt.title('%s = %.3g' % (k, mu), fontdict={'size': 9}) # limit to 40 characters
  989. print('%15s: %.3g' % (k, mu))
  990. plt.savefig('evolve.png', dpi=200)
  991. def plot_results_overlay(start=0, stop=0): # from utils.utils import *; plot_results_overlay()
  992. # Plot training 'results*.txt', overlaying train and val losses
  993. s = ['train', 'train', 'train', 'Precision', 'mAP@0.5', 'val', 'val', 'val', 'Recall', 'mAP@0.5:0.95'] # legends
  994. t = ['GIoU', 'Objectness', 'Classification', 'P-R', 'mAP-F1'] # titles
  995. for f in sorted(glob.glob('results*.txt') + glob.glob('../../Downloads/results*.txt')):
  996. results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T
  997. n = results.shape[1] # number of rows
  998. x = range(start, min(stop, n) if stop else n)
  999. fig, ax = plt.subplots(1, 5, figsize=(14, 3.5), tight_layout=True)
  1000. ax = ax.ravel()
  1001. for i in range(5):
  1002. for j in [i, i + 5]:
  1003. y = results[j, x]
  1004. ax[i].plot(x, y, marker='.', label=s[j])
  1005. # y_smooth = butter_lowpass_filtfilt(y)
  1006. # ax[i].plot(x, np.gradient(y_smooth), marker='.', label=s[j])
  1007. ax[i].set_title(t[i])
  1008. ax[i].legend()
  1009. ax[i].set_ylabel(f) if i == 0 else None # add filename
  1010. fig.savefig(f.replace('.txt', '.png'), dpi=200)
  1011. def plot_results(start=0, stop=0, bucket='', id=(), labels=()): # from utils.utils import *; plot_results()
  1012. # Plot training 'results*.txt' as seen in https://github.com/ultralytics/yolov5#reproduce-our-training
  1013. fig, ax = plt.subplots(2, 5, figsize=(12, 6))
  1014. ax = ax.ravel()
  1015. s = ['GIoU', 'Objectness', 'Classification', 'Precision', 'Recall',
  1016. 'val GIoU', 'val Objectness', 'val Classification', 'mAP@0.5', 'mAP@0.5:0.95']
  1017. if bucket:
  1018. os.system('rm -rf storage.googleapis.com')
  1019. files = ['https://storage.googleapis.com/%s/results%g.txt' % (bucket, x) for x in id]
  1020. else:
  1021. files = glob.glob('results*.txt') + glob.glob('../../Downloads/results*.txt')
  1022. for fi, f in enumerate(files):
  1023. try:
  1024. results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T
  1025. n = results.shape[1] # number of rows
  1026. x = range(start, min(stop, n) if stop else n)
  1027. for i in range(10):
  1028. y = results[i, x]
  1029. if i in [0, 1, 2, 5, 6, 7]:
  1030. y[y == 0] = np.nan # dont show zero loss values
  1031. # y /= y[0] # normalize
  1032. label = labels[fi] if len(labels) else Path(f).stem
  1033. ax[i].plot(x, y, marker='.', label=label, linewidth=2, markersize=8)
  1034. ax[i].set_title(s[i])
  1035. # if i in [5, 6, 7]: # share train and val loss y axes
  1036. # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5])
  1037. except:
  1038. print('Warning: Plotting error for %s, skipping file' % f)
  1039. fig.tight_layout()
  1040. ax[1].legend()
  1041. fig.savefig('results.png', dpi=200)

随着人工智能和大数据的发展,任一方面对自动化工具有着一定的需求,在当下疫情防控期间,使用mindspore来实现yolo模型来进行目标检测及语义分割,对视频或图片都可以进行口罩佩戴检测和行人社交距离检测,来对公共场所的疫情防控来实行自动化管理。