Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

general.py 21 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
  1. # General utils
  2. import glob
  3. import logging
  4. import math
  5. import os
  6. import random
  7. import re
  8. import subprocess
  9. import time
  10. from pathlib import Path
  11. import cv2
  12. import numpy as np
  13. import torch
  14. import torchvision
  15. import yaml
  16. from utils.google_utils import gsutil_getsize
  17. from utils.metrics import fitness
  18. from utils.torch_utils import init_torch_seeds
  19. # Settings
  20. torch.set_printoptions(linewidth=320, precision=5, profile='long')
  21. np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5
  22. cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader)
  23. os.environ['NUMEXPR_MAX_THREADS'] = str(min(os.cpu_count(), 8)) # NumExpr max threads
  24. def set_logging(rank=-1):
  25. logging.basicConfig(
  26. format="%(message)s",
  27. level=logging.INFO if rank in [-1, 0] else logging.WARN)
  28. def init_seeds(seed=0):
  29. # Initialize random number generator (RNG) seeds
  30. random.seed(seed)
  31. np.random.seed(seed)
  32. init_torch_seeds(seed)
  33. def get_latest_run(search_dir='.'):
  34. # Return path to most recent 'last.pt' in /runs (i.e. to --resume from)
  35. last_list = glob.glob(f'{search_dir}/**/last*.pt', recursive=True)
  36. return max(last_list, key=os.path.getctime) if last_list else ''
  37. def check_online():
  38. # Check internet connectivity
  39. import socket
  40. try:
  41. socket.create_connection(("1.1.1.1", 53)) # check host accesability
  42. return True
  43. except OSError:
  44. return False
  45. def check_git_status():
  46. # Recommend 'git pull' if code is out of date
  47. print(colorstr('github: '), end='')
  48. try:
  49. assert Path('.git').exists(), 'skipping check (not a git repository)'
  50. assert not Path('/workspace').exists(), 'skipping check (Docker image)' # not Path('/.dockerenv').exists()
  51. assert check_online(), 'skipping check (offline)'
  52. cmd = 'git fetch && git config --get remote.origin.url' # github repo url
  53. url = subprocess.check_output(cmd, shell=True).decode()[:-1]
  54. cmd = 'git rev-list $(git rev-parse --abbrev-ref HEAD)..origin/master --count' # commits behind
  55. n = int(subprocess.check_output(cmd, shell=True))
  56. if n > 0:
  57. print(f"⚠️ WARNING: code is out of date by {n} {'commits' if n > 1 else 'commmit'}. "
  58. f"Use 'git pull' to update or 'git clone {url}' to download latest.")
  59. else:
  60. print(f'up to date with {url} ✅')
  61. except Exception as e:
  62. print(e)
  63. def check_requirements(file='requirements.txt'):
  64. # Check installed dependencies meet requirements
  65. import pkg_resources
  66. requirements = pkg_resources.parse_requirements(Path(file).open())
  67. requirements = [x.name + ''.join(*x.specs) if len(x.specs) else x.name for x in requirements]
  68. pkg_resources.require(requirements) # DistributionNotFound or VersionConflict exception if requirements not met
  69. def check_img_size(img_size, s=32):
  70. # Verify img_size is a multiple of stride s
  71. new_size = make_divisible(img_size, int(s)) # ceil gs-multiple
  72. if new_size != img_size:
  73. print('WARNING: --img-size %g must be multiple of max stride %g, updating to %g' % (img_size, s, new_size))
  74. return new_size
  75. def check_file(file):
  76. # Search for file if not found
  77. if os.path.isfile(file) or file == '':
  78. return file
  79. else:
  80. files = glob.glob('./**/' + file, recursive=True) # find file
  81. assert len(files), 'File Not Found: %s' % file # assert file was found
  82. assert len(files) == 1, "Multiple files match '%s', specify exact path: %s" % (file, files) # assert unique
  83. return files[0] # return file
  84. def check_dataset(dict):
  85. # Download dataset if not found locally
  86. val, s = dict.get('val'), dict.get('download')
  87. if val and len(val):
  88. val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path
  89. if not all(x.exists() for x in val):
  90. print('\nWARNING: Dataset not found, nonexistent paths: %s' % [str(x) for x in val if not x.exists()])
  91. if s and len(s): # download script
  92. print('Downloading %s ...' % s)
  93. if s.startswith('http') and s.endswith('.zip'): # URL
  94. f = Path(s).name # filename
  95. torch.hub.download_url_to_file(s, f)
  96. r = os.system('unzip -q %s -d ../ && rm %s' % (f, f)) # unzip
  97. else: # bash script
  98. r = os.system(s)
  99. print('Dataset autodownload %s\n' % ('success' if r == 0 else 'failure')) # analyze return value
  100. else:
  101. raise Exception('Dataset not found.')
  102. def make_divisible(x, divisor):
  103. # Returns x evenly divisible by divisor
  104. return math.ceil(x / divisor) * divisor
  105. def clean_str(s):
  106. # Cleans a string by replacing special characters with underscore _
  107. return re.sub(pattern="[|@#!¡·$€%&()=?¿^*;:,¨´><+]", repl="_", string=s)
  108. def one_cycle(y1=0.0, y2=1.0, steps=100):
  109. # lambda function for sinusoidal ramp from y1 to y2
  110. return lambda x: ((1 - math.cos(x * math.pi / steps)) / 2) * (y2 - y1) + y1
  111. def colorstr(*input):
  112. # Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world')
  113. *args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string
  114. colors = {'black': '\033[30m', # basic colors
  115. 'red': '\033[31m',
  116. 'green': '\033[32m',
  117. 'yellow': '\033[33m',
  118. 'blue': '\033[34m',
  119. 'magenta': '\033[35m',
  120. 'cyan': '\033[36m',
  121. 'white': '\033[37m',
  122. 'bright_black': '\033[90m', # bright colors
  123. 'bright_red': '\033[91m',
  124. 'bright_green': '\033[92m',
  125. 'bright_yellow': '\033[93m',
  126. 'bright_blue': '\033[94m',
  127. 'bright_magenta': '\033[95m',
  128. 'bright_cyan': '\033[96m',
  129. 'bright_white': '\033[97m',
  130. 'end': '\033[0m', # misc
  131. 'bold': '\033[1m',
  132. 'underline': '\033[4m'}
  133. return ''.join(colors[x] for x in args) + f'{string}' + colors['end']
  134. def labels_to_class_weights(labels, nc=80):
  135. # Get class weights (inverse frequency) from training labels
  136. if labels[0] is None: # no labels loaded
  137. return torch.Tensor()
  138. labels = np.concatenate(labels, 0) # labels.shape = (866643, 5) for COCO
  139. classes = labels[:, 0].astype(np.int) # labels = [class xywh]
  140. weights = np.bincount(classes, minlength=nc) # occurrences per class
  141. # Prepend gridpoint count (for uCE training)
  142. # gpi = ((320 / 32 * np.array([1, 2, 4])) ** 2 * 3).sum() # gridpoints per image
  143. # weights = np.hstack([gpi * len(labels) - weights.sum() * 9, weights * 9]) ** 0.5 # prepend gridpoints to start
  144. weights[weights == 0] = 1 # replace empty bins with 1
  145. weights = 1 / weights # number of targets per class
  146. weights /= weights.sum() # normalize
  147. return torch.from_numpy(weights)
  148. def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)):
  149. # Produces image weights based on class_weights and image contents
  150. class_counts = np.array([np.bincount(x[:, 0].astype(np.int), minlength=nc) for x in labels])
  151. image_weights = (class_weights.reshape(1, nc) * class_counts).sum(1)
  152. # index = random.choices(range(n), weights=image_weights, k=1) # weight image sample
  153. return image_weights
  154. def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper)
  155. # https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/
  156. # a = np.loadtxt('data/coco.names', dtype='str', delimiter='\n')
  157. # b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\n')
  158. # x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco
  159. # x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet
  160. x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34,
  161. 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
  162. 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90]
  163. return x
  164. def xyxy2xywh(x):
  165. # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right
  166. y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
  167. y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center
  168. y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center
  169. y[:, 2] = x[:, 2] - x[:, 0] # width
  170. y[:, 3] = x[:, 3] - x[:, 1] # height
  171. return y
  172. def xywh2xyxy(x):
  173. # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
  174. y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
  175. y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x
  176. y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y
  177. y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x
  178. y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y
  179. return y
  180. def xywhn2xyxy(x, w=640, h=640, padw=32, padh=32):
  181. # Convert nx4 boxes from [x, y, w, h] normalized to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
  182. y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
  183. y[:, 0] = w * (x[:, 0] - x[:, 2] / 2) + padw # top left x
  184. y[:, 1] = h * (x[:, 1] - x[:, 3] / 2) + padh # top left y
  185. y[:, 2] = w * (x[:, 0] + x[:, 2] / 2) + padw # bottom right x
  186. y[:, 3] = h * (x[:, 1] + x[:, 3] / 2) + padh # bottom right y
  187. return y
  188. def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):
  189. # Rescale coords (xyxy) from img1_shape to img0_shape
  190. if ratio_pad is None: # calculate from img0_shape
  191. gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new
  192. pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
  193. else:
  194. gain = ratio_pad[0][0]
  195. pad = ratio_pad[1]
  196. coords[:, [0, 2]] -= pad[0] # x padding
  197. coords[:, [1, 3]] -= pad[1] # y padding
  198. coords[:, :4] /= gain
  199. clip_coords(coords, img0_shape)
  200. return coords
  201. def clip_coords(boxes, img_shape):
  202. # Clip bounding xyxy bounding boxes to image shape (height, width)
  203. boxes[:, 0].clamp_(0, img_shape[1]) # x1
  204. boxes[:, 1].clamp_(0, img_shape[0]) # y1
  205. boxes[:, 2].clamp_(0, img_shape[1]) # x2
  206. boxes[:, 3].clamp_(0, img_shape[0]) # y2
  207. def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-9):
  208. # Returns the IoU of box1 to box2. box1 is 4, box2 is nx4
  209. box2 = box2.T
  210. # Get the coordinates of bounding boxes
  211. if x1y1x2y2: # x1, y1, x2, y2 = box1
  212. b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
  213. b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
  214. else: # transform from xywh to xyxy
  215. b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2
  216. b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2
  217. b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2
  218. b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2
  219. # Intersection area
  220. inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \
  221. (torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0)
  222. # Union Area
  223. w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps
  224. w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps
  225. union = w1 * h1 + w2 * h2 - inter + eps
  226. iou = inter / union
  227. if GIoU or DIoU or CIoU:
  228. cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width
  229. ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height
  230. if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1
  231. c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared
  232. rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 +
  233. (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center distance squared
  234. if DIoU:
  235. return iou - rho2 / c2 # DIoU
  236. elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47
  237. v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2)
  238. with torch.no_grad():
  239. alpha = v / ((1 + eps) - iou + v)
  240. return iou - (rho2 / c2 + v * alpha) # CIoU
  241. else: # GIoU https://arxiv.org/pdf/1902.09630.pdf
  242. c_area = cw * ch + eps # convex area
  243. return iou - (c_area - union) / c_area # GIoU
  244. else:
  245. return iou # IoU
  246. def box_iou(box1, box2):
  247. # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py
  248. """
  249. Return intersection-over-union (Jaccard index) of boxes.
  250. Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
  251. Arguments:
  252. box1 (Tensor[N, 4])
  253. box2 (Tensor[M, 4])
  254. Returns:
  255. iou (Tensor[N, M]): the NxM matrix containing the pairwise
  256. IoU values for every element in boxes1 and boxes2
  257. """
  258. def box_area(box):
  259. # box = 4xn
  260. return (box[2] - box[0]) * (box[3] - box[1])
  261. area1 = box_area(box1.T)
  262. area2 = box_area(box2.T)
  263. # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2)
  264. inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2)
  265. return inter / (area1[:, None] + area2 - inter) # iou = inter / (area1 + area2 - inter)
  266. def wh_iou(wh1, wh2):
  267. # Returns the nxm IoU matrix. wh1 is nx2, wh2 is mx2
  268. wh1 = wh1[:, None] # [N,1,2]
  269. wh2 = wh2[None] # [1,M,2]
  270. inter = torch.min(wh1, wh2).prod(2) # [N,M]
  271. return inter / (wh1.prod(2) + wh2.prod(2) - inter) # iou = inter / (area1 + area2 - inter)
  272. def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, labels=()):
  273. """Performs Non-Maximum Suppression (NMS) on inference results
  274. Returns:
  275. detections with shape: nx6 (x1, y1, x2, y2, conf, cls)
  276. """
  277. nc = prediction.shape[2] - 5 # number of classes
  278. xc = prediction[..., 4] > conf_thres # candidates
  279. # Settings
  280. min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height
  281. max_det = 300 # maximum number of detections per image
  282. max_nms = 30000 # maximum number of boxes into torchvision.ops.nms()
  283. time_limit = 10.0 # seconds to quit after
  284. redundant = True # require redundant detections
  285. multi_label = nc > 1 # multiple labels per box (adds 0.5ms/img)
  286. merge = False # use merge-NMS
  287. t = time.time()
  288. output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0]
  289. for xi, x in enumerate(prediction): # image index, image inference
  290. # Apply constraints
  291. # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height
  292. x = x[xc[xi]] # confidence
  293. # Cat apriori labels if autolabelling
  294. if labels and len(labels[xi]):
  295. l = labels[xi]
  296. v = torch.zeros((len(l), nc + 5), device=x.device)
  297. v[:, :4] = l[:, 1:5] # box
  298. v[:, 4] = 1.0 # conf
  299. v[range(len(l)), l[:, 0].long() + 5] = 1.0 # cls
  300. x = torch.cat((x, v), 0)
  301. # If none remain process next image
  302. if not x.shape[0]:
  303. continue
  304. # Compute conf
  305. x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf
  306. # Box (center x, center y, width, height) to (x1, y1, x2, y2)
  307. box = xywh2xyxy(x[:, :4])
  308. # Detections matrix nx6 (xyxy, conf, cls)
  309. if multi_label:
  310. i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T
  311. x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)
  312. else: # best class only
  313. conf, j = x[:, 5:].max(1, keepdim=True)
  314. x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]
  315. # Filter by class
  316. if classes is not None:
  317. x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]
  318. # Apply finite constraint
  319. # if not torch.isfinite(x).all():
  320. # x = x[torch.isfinite(x).all(1)]
  321. # Check shape
  322. n = x.shape[0] # number of boxes
  323. if not n: # no boxes
  324. continue
  325. elif n > max_nms: # excess boxes
  326. x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence
  327. # Batched NMS
  328. c = x[:, 5:6] * (0 if agnostic else max_wh) # classes
  329. boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores
  330. i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS
  331. if i.shape[0] > max_det: # limit detections
  332. i = i[:max_det]
  333. if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean)
  334. # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)
  335. iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix
  336. weights = iou * scores[None] # box weights
  337. x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes
  338. if redundant:
  339. i = i[iou.sum(1) > 1] # require redundancy
  340. output[xi] = x[i]
  341. if (time.time() - t) > time_limit:
  342. print(f'WARNING: NMS time limit {time_limit}s exceeded')
  343. break # time limit exceeded
  344. return output
  345. def strip_optimizer(f='weights/best.pt', s=''): # from utils.general import *; strip_optimizer()
  346. # Strip optimizer from 'f' to finalize training, optionally save as 's'
  347. x = torch.load(f, map_location=torch.device('cpu'))
  348. for key in 'optimizer', 'training_results', 'wandb_id':
  349. x[key] = None
  350. x['epoch'] = -1
  351. x['model'].half() # to FP16
  352. for p in x['model'].parameters():
  353. p.requires_grad = False
  354. torch.save(x, s or f)
  355. mb = os.path.getsize(s or f) / 1E6 # filesize
  356. print('Optimizer stripped from %s,%s %.1fMB' % (f, (' saved as %s,' % s) if s else '', mb))
  357. def print_mutation(hyp, results, yaml_file='hyp_evolved.yaml', bucket=''):
  358. # Print mutation results to evolve.txt (for use with train.py --evolve)
  359. a = '%10s' * len(hyp) % tuple(hyp.keys()) # hyperparam keys
  360. b = '%10.3g' * len(hyp) % tuple(hyp.values()) # hyperparam values
  361. c = '%10.4g' * len(results) % results # results (P, R, mAP@0.5, mAP@0.5:0.95, val_losses x 3)
  362. print('\n%s\n%s\nEvolved fitness: %s\n' % (a, b, c))
  363. if bucket:
  364. url = 'gs://%s/evolve.txt' % bucket
  365. if gsutil_getsize(url) > (os.path.getsize('evolve.txt') if os.path.exists('evolve.txt') else 0):
  366. os.system('gsutil cp %s .' % url) # download evolve.txt if larger than local
  367. with open('evolve.txt', 'a') as f: # append result
  368. f.write(c + b + '\n')
  369. x = np.unique(np.loadtxt('evolve.txt', ndmin=2), axis=0) # load unique rows
  370. x = x[np.argsort(-fitness(x))] # sort
  371. np.savetxt('evolve.txt', x, '%10.3g') # save sort by fitness
  372. # Save yaml
  373. for i, k in enumerate(hyp.keys()):
  374. hyp[k] = float(x[0, i + 7])
  375. with open(yaml_file, 'w') as f:
  376. results = tuple(x[0, :7])
  377. c = '%10.4g' * len(results) % results # results (P, R, mAP@0.5, mAP@0.5:0.95, val_losses x 3)
  378. f.write('# Hyperparameter Evolution Results\n# Generations: %g\n# Metrics: ' % len(x) + c + '\n\n')
  379. yaml.dump(hyp, f, sort_keys=False)
  380. if bucket:
  381. os.system('gsutil cp evolve.txt %s gs://%s' % (yaml_file, bucket)) # upload
  382. def apply_classifier(x, model, img, im0):
  383. # applies a second stage classifier to yolo outputs
  384. im0 = [im0] if isinstance(im0, np.ndarray) else im0
  385. for i, d in enumerate(x): # per image
  386. if d is not None and len(d):
  387. d = d.clone()
  388. # Reshape and pad cutouts
  389. b = xyxy2xywh(d[:, :4]) # boxes
  390. b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # rectangle to square
  391. b[:, 2:] = b[:, 2:] * 1.3 + 30 # pad
  392. d[:, :4] = xywh2xyxy(b).long()
  393. # Rescale boxes from img_size to im0 size
  394. scale_coords(img.shape[2:], d[:, :4], im0[i].shape)
  395. # Classes
  396. pred_cls1 = d[:, 5].long()
  397. ims = []
  398. for j, a in enumerate(d): # per item
  399. cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])]
  400. im = cv2.resize(cutout, (224, 224)) # BGR
  401. # cv2.imwrite('test%i.jpg' % j, cutout)
  402. im = im[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
  403. im = np.ascontiguousarray(im, dtype=np.float32) # uint8 to float32
  404. im /= 255.0 # 0 - 255 to 0.0 - 1.0
  405. ims.append(im)
  406. pred_cls2 = model(torch.Tensor(ims).to(d.device)).argmax(1) # classifier prediction
  407. x[i] = x[i][pred_cls1 == pred_cls2] # retain matching class detections
  408. return x
  409. def increment_path(path, exist_ok=True, sep=''):
  410. # Increment path, i.e. runs/exp --> runs/exp{sep}0, runs/exp{sep}1 etc.
  411. path = Path(path) # os-agnostic
  412. if (path.exists() and exist_ok) or (not path.exists()):
  413. return str(path)
  414. else:
  415. dirs = glob.glob(f"{path}{sep}*") # similar paths
  416. matches = [re.search(rf"%s{sep}(\d+)" % path.stem, d) for d in dirs]
  417. i = [int(m.groups()[0]) for m in matches if m] # indices
  418. n = max(i) + 1 if i else 2 # increment number
  419. return f"{path}{sep}{n}" # update path
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...