Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

#970 Update YoloNASQuickstart.md

Merged
Ghost merged 1 commits into Deci-AI:master from deci-ai:bugfix/SG-000_fix_readme_yolonas_snippets
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
  1. import math
  2. import os
  3. import pathlib
  4. import random
  5. from abc import ABC, abstractmethod
  6. from enum import Enum
  7. from typing import Callable, List, Union, Tuple, Optional, Dict
  8. import cv2
  9. import numpy as np
  10. import torch
  11. import torchvision
  12. from omegaconf import ListConfig
  13. from torch import nn
  14. from torch.utils.data.dataloader import default_collate
  15. from super_gradients.common.registry.registry import register_collate_function
  16. from super_gradients.training.utils.visualization.detection import draw_bbox
  17. from super_gradients.training.utils.visualization.utils import generate_color_mapping
  18. class DetectionTargetsFormat(Enum):
  19. """
  20. Enum class for the different detection output formats
  21. When NORMALIZED is not specified- the type refers to unnormalized image coordinates (of the bboxes).
  22. For example:
  23. LABEL_NORMALIZED_XYXY means [class_idx,x1,y1,x2,y2]
  24. """
  25. LABEL_XYXY = "LABEL_XYXY"
  26. XYXY_LABEL = "XYXY_LABEL"
  27. LABEL_NORMALIZED_XYXY = "LABEL_NORMALIZED_XYXY"
  28. NORMALIZED_XYXY_LABEL = "NORMALIZED_XYXY_LABEL"
  29. LABEL_CXCYWH = "LABEL_CXCYWH"
  30. CXCYWH_LABEL = "CXCYWH_LABEL"
  31. LABEL_NORMALIZED_CXCYWH = "LABEL_NORMALIZED_CXCYWH"
  32. NORMALIZED_CXCYWH_LABEL = "NORMALIZED_CXCYWH_LABEL"
  33. def get_cls_posx_in_target(target_format: DetectionTargetsFormat) -> int:
  34. """Get the label of a given target
  35. :param target_format: Representation of the target (ex: LABEL_XYXY)
  36. :return: Position of the class id in a bbox
  37. ex: 0 if bbox of format label_xyxy | -1 if bbox of format xyxy_label
  38. """
  39. format_split = target_format.value.split("_")
  40. if format_split[0] == "LABEL":
  41. return 0
  42. elif format_split[-1] == "LABEL":
  43. return -1
  44. else:
  45. raise NotImplementedError(f"No implementation to find index of LABEL in {target_format.value}")
  46. def _set_batch_labels_index(labels_batch):
  47. for i, labels in enumerate(labels_batch):
  48. labels[:, 0] = i
  49. return labels_batch
  50. def convert_cxcywh_bbox_to_xyxy(input_bbox: torch.Tensor):
  51. """
  52. Converts bounding box format from [cx, cy, w, h] to [x1, y1, x2, y2]
  53. :param input_bbox: input bbox either 2-dimensional (for all boxes of a single image) or 3-dimensional (for
  54. boxes of a batch of images)
  55. :return: Converted bbox in same dimensions as the original
  56. """
  57. need_squeeze = False
  58. # the input is always processed as a batch. in case it not a batch, it is unsqueezed, process and than squeeze back.
  59. if input_bbox.dim() < 3:
  60. need_squeeze = True
  61. input_bbox = input_bbox.unsqueeze(0)
  62. converted_bbox = torch.zeros_like(input_bbox) if isinstance(input_bbox, torch.Tensor) else np.zeros_like(input_bbox)
  63. converted_bbox[:, :, 0] = input_bbox[:, :, 0] - input_bbox[:, :, 2] / 2
  64. converted_bbox[:, :, 1] = input_bbox[:, :, 1] - input_bbox[:, :, 3] / 2
  65. converted_bbox[:, :, 2] = input_bbox[:, :, 0] + input_bbox[:, :, 2] / 2
  66. converted_bbox[:, :, 3] = input_bbox[:, :, 1] + input_bbox[:, :, 3] / 2
  67. # squeeze back if needed
  68. if need_squeeze:
  69. converted_bbox = converted_bbox[0]
  70. return converted_bbox
  71. def _iou(CIoU: bool, DIoU: bool, GIoU: bool, b1_x1, b1_x2, b1_y1, b1_y2, b2_x1, b2_x2, b2_y1, b2_y2, eps):
  72. """
  73. Internal function for the use of calculate_bbox_iou_matrix and calculate_bbox_iou_elementwise functions
  74. DO NOT CALL THIS FUNCTIONS DIRECTLY - use one of the functions mentioned above
  75. """
  76. # Intersection area
  77. intersection_area = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * (torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0)
  78. # Union Area
  79. w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1
  80. w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1
  81. union_area = w1 * h1 + w2 * h2 - intersection_area + eps
  82. iou = intersection_area / union_area # iou
  83. if GIoU or DIoU or CIoU:
  84. cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width
  85. ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height
  86. # Generalized IoU https://arxiv.org/pdf/1902.09630.pdf
  87. if GIoU:
  88. c_area = cw * ch + eps # convex area
  89. iou -= (c_area - union_area) / c_area # GIoU
  90. # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1
  91. if DIoU or CIoU:
  92. # convex diagonal squared
  93. c2 = cw**2 + ch**2 + eps
  94. # centerpoint distance squared
  95. rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 + (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4
  96. if DIoU:
  97. iou -= rho2 / c2 # DIoU
  98. elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47
  99. v = (4 / math.pi**2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2)
  100. with torch.no_grad():
  101. alpha = v / ((1 + eps) - iou + v)
  102. iou -= rho2 / c2 + v * alpha # CIoU
  103. return iou
  104. def calculate_bbox_iou_matrix(box1, box2, x1y1x2y2=True, GIoU: bool = False, DIoU=False, CIoU=False, eps=1e-9):
  105. """
  106. calculate iou matrix containing the iou of every couple iuo(i,j) where i is in box1 and j is in box2
  107. :param box1: a 2D tensor of boxes (shape N x 4)
  108. :param box2: a 2D tensor of boxes (shape M x 4)
  109. :param x1y1x2y2: boxes format is x1y1x2y2 (True) or xywh where xy is the center (False)
  110. :return: a 2D iou matrix (shape NxM)
  111. """
  112. if box1.dim() > 1:
  113. box1 = box1.T
  114. # Get the coordinates of bounding boxes
  115. if x1y1x2y2: # x1, y1, x2, y2 = box1
  116. b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
  117. b2_x1, b2_y1, b2_x2, b2_y2 = box2[:, 0], box2[:, 1], box2[:, 2], box2[:, 3]
  118. else: # x, y, w, h = box1
  119. b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2
  120. b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2
  121. b2_x1, b2_x2 = box2[:, 0] - box2[:, 2] / 2, box2[:, 0] + box2[:, 2] / 2
  122. b2_y1, b2_y2 = box2[:, 1] - box2[:, 3] / 2, box2[:, 1] + box2[:, 3] / 2
  123. b1_x1, b1_y1, b1_x2, b1_y2 = b1_x1.unsqueeze(1), b1_y1.unsqueeze(1), b1_x2.unsqueeze(1), b1_y2.unsqueeze(1)
  124. return _iou(CIoU, DIoU, GIoU, b1_x1, b1_x2, b1_y1, b1_y2, b2_x1, b2_x2, b2_y1, b2_y2, eps)
  125. def calc_bbox_iou_matrix(pred: torch.Tensor):
  126. """
  127. calculate iou for every pair of boxes in the boxes vector
  128. :param pred: a 3-dimensional tensor containing all boxes for a batch of images [N, num_boxes, 4], where
  129. each box format is [x1,y1,x2,y2]
  130. :return: a 3-dimensional matrix where M_i_j_k is the iou of box j and box k of the i'th image in the batch
  131. """
  132. box = pred[:, :, :4] #
  133. b1_x1, b1_y1 = box[:, :, 0].unsqueeze(1), box[:, :, 1].unsqueeze(1)
  134. b1_x2, b1_y2 = box[:, :, 2].unsqueeze(1), box[:, :, 3].unsqueeze(1)
  135. b2_x1 = b1_x1.transpose(2, 1)
  136. b2_x2 = b1_x2.transpose(2, 1)
  137. b2_y1 = b1_y1.transpose(2, 1)
  138. b2_y2 = b1_y2.transpose(2, 1)
  139. intersection_area = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * (torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0)
  140. # Union Area
  141. w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1
  142. w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1
  143. union_area = (w1 * h1 + 1e-16) + w2 * h2 - intersection_area
  144. ious = intersection_area / union_area
  145. return ious
  146. def change_bbox_bounds_for_image_size(boxes, img_shape):
  147. # CLIP BOUNDING XYXY BOUNDING BOXES TO IMAGE SHAPE (HEIGHT, WIDTH)
  148. boxes[:, [0, 2]] = boxes[:, [0, 2]].clamp(min=0, max=img_shape[1])
  149. boxes[:, [1, 3]] = boxes[:, [1, 3]].clamp(min=0, max=img_shape[0])
  150. return boxes
  151. class DetectionPostPredictionCallback(ABC, nn.Module):
  152. def __init__(self) -> None:
  153. super().__init__()
  154. @abstractmethod
  155. def forward(self, x, device: str):
  156. """
  157. :param x: the output of your model
  158. :param device: the device to move all output tensors into
  159. :return: a list with length batch_size, each item in the list is a detections
  160. with shape: nx6 (x1, y1, x2, y2, confidence, class) where x and y are in range [0,1]
  161. """
  162. raise NotImplementedError
  163. class IouThreshold(tuple, Enum):
  164. MAP_05 = (0.5, 0.5)
  165. MAP_05_TO_095 = (0.5, 0.95)
  166. def is_range(self):
  167. return self[0] != self[1]
  168. def to_tensor(self):
  169. if self.is_range():
  170. n_iou_thresh = int(round((self[1] - self[0]) / 0.05)) + 1
  171. return torch.linspace(self[0], self[1], n_iou_thresh)
  172. else:
  173. n_iou_thresh = 1
  174. return torch.tensor([self[0]])
  175. def box_iou(box1: torch.Tensor, box2: torch.Tensor) -> torch.Tensor:
  176. # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py
  177. """
  178. Return intersection-over-union (Jaccard index) of boxes.
  179. Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
  180. :param box1: Tensor of shape [N, 4]
  181. :param box2: Tensor of shape [M, 4]
  182. :return: iou, Tensor of shape [N, M]: the NxM matrix containing the pairwise IoU values for every element in boxes1 and boxes2
  183. """
  184. def box_area(box):
  185. # box = 4xn
  186. return (box[2] - box[0]) * (box[3] - box[1])
  187. area1 = box_area(box1.T)
  188. area2 = box_area(box2.T)
  189. # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2)
  190. inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2)
  191. return inter / (area1[:, None] + area2 - inter) # iou = inter / (area1 + area2 - inter)
  192. def non_max_suppression(prediction, conf_thres=0.1, iou_thres=0.6, multi_label_per_box: bool = True, with_confidence: bool = False):
  193. """
  194. Performs Non-Maximum Suppression (NMS) on inference results
  195. :param prediction: raw model prediction. Should be a list of Tensors of shape (cx, cy, w, h, confidence, cls0, cls1, ...)
  196. :param conf_thres: below the confidence threshold - prediction are discarded
  197. :param iou_thres: IoU threshold for the nms algorithm
  198. :param multi_label_per_box: whether to use re-use each box with all possible labels
  199. (instead of the maximum confidence all confidences above threshold
  200. will be sent to NMS); by default is set to True
  201. :param with_confidence: whether to multiply objectness score with class score.
  202. usually valid for Yolo models only.
  203. :return: detections with shape nx6 (x1, y1, x2, y2, object_conf, class_conf, class)
  204. """
  205. candidates_above_thres = prediction[..., 4] > conf_thres # filter by confidence
  206. output = [None] * prediction.shape[0]
  207. for image_idx, pred in enumerate(prediction):
  208. pred = pred[candidates_above_thres[image_idx]] # confident
  209. if not pred.shape[0]: # If none remain process next image
  210. continue
  211. if with_confidence:
  212. pred[:, 5:] *= pred[:, 4:5] # multiply objectness score with class score
  213. box = convert_cxcywh_bbox_to_xyxy(pred[:, :4]) # cxcywh to xyxy
  214. # Detections matrix nx6 (xyxy, conf, cls)
  215. if multi_label_per_box: # try for all good confidence classes
  216. i, j = (pred[:, 5:] > conf_thres).nonzero(as_tuple=False).T
  217. pred = torch.cat((box[i], pred[i, j + 5, None], j[:, None].float()), 1)
  218. else: # best class only
  219. conf, j = pred[:, 5:].max(1, keepdim=True)
  220. pred = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]
  221. if not pred.shape[0]: # If none remain process next image
  222. continue
  223. # Apply torch batched NMS algorithm
  224. boxes, scores, cls_idx = pred[:, :4], pred[:, 4], pred[:, 5]
  225. idx_to_keep = torchvision.ops.boxes.batched_nms(boxes, scores, cls_idx, iou_thres)
  226. output[image_idx] = pred[idx_to_keep]
  227. return output
  228. def matrix_non_max_suppression(
  229. pred,
  230. conf_thres: float = 0.1,
  231. kernel: str = "gaussian",
  232. sigma: float = 3.0,
  233. max_num_of_detections: int = 500,
  234. ) -> List[torch.Tensor]:
  235. """Performs Matrix Non-Maximum Suppression (NMS) on inference results https://arxiv.org/pdf/1912.04488.pdf
  236. :param pred: Raw model prediction (in test mode) - a Tensor of shape [batch, num_predictions, 85]
  237. where each item format is (x, y, w, h, object_conf, class_conf, ... 80 classes score ...)
  238. :param conf_thres: Threshold under which prediction are discarded
  239. :param kernel: Type of kernel to use ['gaussian', 'linear']
  240. :param sigma: Sigma for the gaussian kernel
  241. :param max_num_of_detections: Maximum number of boxes to output
  242. :return: Detections list with shape (x1, y1, x2, y2, object_conf, class_conf, class)
  243. """
  244. # MULTIPLY CONF BY CLASS CONF TO GET COMBINED CONFIDENCE
  245. class_conf, class_pred = pred[:, :, 5:].max(2)
  246. pred[:, :, 4] *= class_conf
  247. # BOX (CENTER X, CENTER Y, WIDTH, HEIGHT) TO (X1, Y1, X2, Y2)
  248. pred[:, :, :4] = convert_cxcywh_bbox_to_xyxy(pred[:, :, :4])
  249. # DETECTIONS ORDERED AS (x1y1x2y2, obj_conf, class_conf, class_pred)
  250. pred = torch.cat((pred[:, :, :5], class_pred.unsqueeze(2)), 2)
  251. # SORT DETECTIONS BY DECREASING CONFIDENCE SCORES
  252. sort_ind = (-pred[:, :, 4]).argsort()
  253. pred = torch.stack([pred[i, sort_ind[i]] for i in range(pred.shape[0])])[:, 0:max_num_of_detections]
  254. ious = calc_bbox_iou_matrix(pred)
  255. ious = ious.triu(1)
  256. # CREATE A LABELS MASK, WE WANT ONLY BOXES WITH THE SAME LABEL TO AFFECT EACH OTHER
  257. labels = pred[:, :, 5:]
  258. labeles_matrix = (labels == labels.transpose(2, 1)).float().triu(1)
  259. ious *= labeles_matrix
  260. ious_cmax, _ = ious.max(1)
  261. ious_cmax = ious_cmax.unsqueeze(2).repeat(1, 1, max_num_of_detections)
  262. if kernel == "gaussian":
  263. decay_matrix = torch.exp(-1 * sigma * (ious**2))
  264. compensate_matrix = torch.exp(-1 * sigma * (ious_cmax**2))
  265. decay, _ = (decay_matrix / compensate_matrix).min(dim=1)
  266. else:
  267. decay = (1 - ious) / (1 - ious_cmax)
  268. decay, _ = decay.min(dim=1)
  269. pred[:, :, 4] *= decay
  270. output = [pred[i, pred[i, :, 4] > conf_thres] for i in range(pred.shape[0])]
  271. return output
  272. class NMS_Type(str, Enum):
  273. """
  274. Type of non max suppression algorithm that can be used for post processing detection
  275. """
  276. ITERATIVE = "iterative"
  277. MATRIX = "matrix"
  278. def undo_image_preprocessing(im_tensor: torch.Tensor) -> np.ndarray:
  279. """
  280. :param im_tensor: images in a batch after preprocessing for inference, RGB, (B, C, H, W)
  281. :return: images in a batch in cv2 format, BGR, (B, H, W, C)
  282. """
  283. im_np = im_tensor.cpu().numpy()
  284. im_np = im_np[:, ::-1, :, :].transpose(0, 2, 3, 1)
  285. im_np *= 255.0
  286. return np.ascontiguousarray(im_np, dtype=np.uint8)
  287. class DetectionVisualization:
  288. @staticmethod
  289. def _generate_color_mapping(num_classes: int) -> List[Tuple[int]]:
  290. """
  291. Generate a unique BGR color for each class
  292. """
  293. return generate_color_mapping(num_classes=num_classes)
  294. @staticmethod
  295. def _draw_box_title(
  296. color_mapping: List[Tuple[int]],
  297. class_names: List[str],
  298. box_thickness: int,
  299. image_np: np.ndarray,
  300. x1: int,
  301. y1: int,
  302. x2: int,
  303. y2: int,
  304. class_id: int,
  305. pred_conf: float = None,
  306. is_target: bool = False,
  307. ):
  308. color = color_mapping[class_id]
  309. class_name = class_names[class_id]
  310. if is_target:
  311. title = f"[GT] {class_name}"
  312. else:
  313. title = f'[Pred] {class_name} {str(round(pred_conf, 2)) if pred_conf is not None else ""}'
  314. draw_bbox(image=image_np, title=title, x1=x1, y1=y1, x2=x2, y2=y2, box_thickness=box_thickness, color=color)
  315. return image_np
  316. @staticmethod
  317. def _visualize_image(
  318. image_np: np.ndarray,
  319. pred_boxes: np.ndarray,
  320. target_boxes: np.ndarray,
  321. class_names: List[str],
  322. box_thickness: int,
  323. gt_alpha: float,
  324. image_scale: float,
  325. checkpoint_dir: str,
  326. image_name: str,
  327. ):
  328. image_np = cv2.resize(image_np, (0, 0), fx=image_scale, fy=image_scale, interpolation=cv2.INTER_NEAREST)
  329. color_mapping = DetectionVisualization._generate_color_mapping(len(class_names))
  330. # Draw predictions
  331. pred_boxes[:, :4] *= image_scale
  332. for box in pred_boxes:
  333. image_np = DetectionVisualization._draw_box_title(
  334. color_mapping, class_names, box_thickness, image_np, *box[:4].astype(int), class_id=int(box[5]), pred_conf=box[4]
  335. )
  336. # Draw ground truths
  337. target_boxes_image = np.zeros_like(image_np, np.uint8)
  338. for box in target_boxes:
  339. target_boxes_image = DetectionVisualization._draw_box_title(
  340. color_mapping, class_names, box_thickness, target_boxes_image, *box[2:], class_id=box[1], is_target=True
  341. )
  342. # Transparent overlay of ground truth boxes
  343. mask = target_boxes_image.astype(bool)
  344. image_np[mask] = cv2.addWeighted(image_np, 1 - gt_alpha, target_boxes_image, gt_alpha, 0)[mask]
  345. if checkpoint_dir is None:
  346. return image_np
  347. else:
  348. pathlib.Path(checkpoint_dir).mkdir(parents=True, exist_ok=True)
  349. cv2.imwrite(os.path.join(checkpoint_dir, str(image_name) + ".jpg"), image_np)
  350. @staticmethod
  351. def _scaled_ccwh_to_xyxy(target_boxes: np.ndarray, h: int, w: int, image_scale: float) -> np.ndarray:
  352. """
  353. Modifies target_boxes inplace
  354. :param target_boxes: (c1, c2, w, h) boxes in [0, 1] range
  355. :param h: image height
  356. :param w: image width
  357. :param image_scale: desired scale for the boxes w.r.t. w and h
  358. :return: targets in (x1, y1, x2, y2) format
  359. in range [0, w * self.image_scale] [0, h * self.image_scale]
  360. """
  361. # unscale
  362. target_boxes[:, 2:] *= np.array([[w, h, w, h]])
  363. # x1 = c1 - w // 2; y1 = c2 - h // 2
  364. target_boxes[:, 2] -= target_boxes[:, 4] // 2
  365. target_boxes[:, 3] -= target_boxes[:, 5] // 2
  366. # x2 = w + x1; y2 = h + y1
  367. target_boxes[:, 4] += target_boxes[:, 2]
  368. target_boxes[:, 5] += target_boxes[:, 3]
  369. target_boxes[:, 2:] *= image_scale
  370. target_boxes = target_boxes.astype(int)
  371. return target_boxes
  372. @staticmethod
  373. def visualize_batch(
  374. image_tensor: torch.Tensor,
  375. pred_boxes: List[torch.Tensor],
  376. target_boxes: torch.Tensor,
  377. batch_name: Union[int, str],
  378. class_names: List[str],
  379. checkpoint_dir: str = None,
  380. undo_preprocessing_func: Callable[[torch.Tensor], np.ndarray] = undo_image_preprocessing,
  381. box_thickness: int = 2,
  382. image_scale: float = 1.0,
  383. gt_alpha: float = 0.4,
  384. ):
  385. """
  386. A helper function to visualize detections predicted by a network:
  387. saves images into a given path with a name that is {batch_name}_{imade_idx_in_the_batch}.jpg, one batch per call.
  388. Colors are generated on the fly: uniformly sampled from color wheel to support all given classes.
  389. Adjustable:
  390. * Ground truth box transparency;
  391. * Box width;
  392. * Image size (larger or smaller than what's provided)
  393. :param image_tensor: rgb images, (B, H, W, 3)
  394. :param pred_boxes: boxes after NMS for each image in a batch, each (Num_boxes, 6),
  395. values on dim 1 are: x1, y1, x2, y2, confidence, class
  396. :param target_boxes: (Num_targets, 6), values on dim 1 are: image id in a batch, class, x y w h
  397. (coordinates scaled to [0, 1])
  398. :param batch_name: id of the current batch to use for image naming
  399. :param class_names: names of all classes, each on its own index
  400. :param checkpoint_dir: a path where images with boxes will be saved. if None, the result images will
  401. be returns as a list of numpy image arrays
  402. :param undo_preprocessing_func: a function to convert preprocessed images tensor into a batch of cv2-like images
  403. :param box_thickness: box line thickness in px
  404. :param image_scale: scale of an image w.r.t. given image size,
  405. e.g. incoming images are (320x320), use scale = 2. to preview in (640x640)
  406. :param gt_alpha: a value in [0., 1.] transparency on ground truth boxes,
  407. 0 for invisible, 1 for fully opaque
  408. """
  409. image_np = undo_preprocessing_func(image_tensor.detach())
  410. targets = DetectionVisualization._scaled_ccwh_to_xyxy(target_boxes.detach().cpu().numpy(), *image_np.shape[1:3], image_scale)
  411. out_images = []
  412. for i in range(image_np.shape[0]):
  413. preds = pred_boxes[i].detach().cpu().numpy() if pred_boxes[i] is not None else np.empty((0, 6))
  414. targets_cur = targets[targets[:, 0] == i]
  415. image_name = "_".join([str(batch_name), str(i)])
  416. res_image = DetectionVisualization._visualize_image(
  417. image_np[i], preds, targets_cur, class_names, box_thickness, gt_alpha, image_scale, checkpoint_dir, image_name
  418. )
  419. if res_image is not None:
  420. out_images.append(res_image)
  421. return out_images
  422. class Anchors(nn.Module):
  423. """
  424. A wrapper function to hold the anchors used by detection models such as Yolo
  425. """
  426. def __init__(self, anchors_list: List[List], strides: List[int]):
  427. """
  428. :param anchors_list: of the shape [[w1,h1,w2,h2,w3,h3], [w4,h4,w5,h5,w6,h6] .... where each sublist holds
  429. the width and height of the anchors of a specific detection layer.
  430. i.e. for a model with 3 detection layers, each containing 5 anchors the format will be a of 3 sublists of 10 numbers each
  431. The width and height are in pixels (not relative to image size)
  432. :param strides: a list containing the stride of the layers from which the detection heads are fed.
  433. i.e. if the firs detection head is connected to the backbone after the input dimensions were reduces by 8, the first number will be 8
  434. """
  435. super().__init__()
  436. self.__anchors_list = anchors_list
  437. self.__strides = strides
  438. self._check_all_lists(anchors_list)
  439. self._check_all_len_equal_and_even(anchors_list)
  440. self._stride = nn.Parameter(torch.Tensor(strides).float(), requires_grad=False)
  441. anchors = torch.Tensor(anchors_list).float().view(len(anchors_list), -1, 2)
  442. self._anchors = nn.Parameter(anchors / self._stride.view(-1, 1, 1), requires_grad=False)
  443. self._anchor_grid = nn.Parameter(anchors.clone().view(len(anchors_list), 1, -1, 1, 1, 2), requires_grad=False)
  444. @staticmethod
  445. def _check_all_lists(anchors: list) -> bool:
  446. for a in anchors:
  447. if not isinstance(a, (list, ListConfig)):
  448. raise RuntimeError("All objects of anchors_list must be lists")
  449. @staticmethod
  450. def _check_all_len_equal_and_even(anchors: list) -> bool:
  451. len_of_first = len(anchors[0])
  452. for a in anchors:
  453. if len(a) % 2 == 1 or len(a) != len_of_first:
  454. raise RuntimeError("All objects of anchors_list must be of the same even length")
  455. @property
  456. def stride(self) -> nn.Parameter:
  457. return self._stride
  458. @property
  459. def anchors(self) -> nn.Parameter:
  460. return self._anchors
  461. @property
  462. def anchor_grid(self) -> nn.Parameter:
  463. return self._anchor_grid
  464. @property
  465. def detection_layers_num(self) -> int:
  466. return self._anchors.shape[0]
  467. @property
  468. def num_anchors(self) -> int:
  469. return self._anchors.shape[1]
  470. def __repr__(self):
  471. return f"anchors_list: {self.__anchors_list} strides: {self.__strides}"
  472. def xyxy2cxcywh(bboxes):
  473. """
  474. Transforms bboxes from xyxy format to centerized xy wh format
  475. :param bboxes: array, shaped (nboxes, 4)
  476. :return: modified bboxes
  477. """
  478. bboxes[:, 2] = bboxes[:, 2] - bboxes[:, 0]
  479. bboxes[:, 3] = bboxes[:, 3] - bboxes[:, 1]
  480. bboxes[:, 0] = bboxes[:, 0] + bboxes[:, 2] * 0.5
  481. bboxes[:, 1] = bboxes[:, 1] + bboxes[:, 3] * 0.5
  482. return bboxes
  483. def cxcywh2xyxy(bboxes):
  484. """
  485. Transforms bboxes from centerized xy wh format to xyxy format
  486. :param bboxes: array, shaped (nboxes, 4)
  487. :return: modified bboxes
  488. """
  489. bboxes[:, 1] = bboxes[:, 1] - bboxes[:, 3] * 0.5
  490. bboxes[:, 0] = bboxes[:, 0] - bboxes[:, 2] * 0.5
  491. bboxes[:, 3] = bboxes[:, 3] + bboxes[:, 1]
  492. bboxes[:, 2] = bboxes[:, 2] + bboxes[:, 0]
  493. return bboxes
  494. def get_mosaic_coordinate(mosaic_index, xc, yc, w, h, input_h, input_w):
  495. """
  496. Returns the mosaic coordinates of final mosaic image according to mosaic image index.
  497. :param mosaic_index: (int) mosaic image index
  498. :param xc: (int) center x coordinate of the entire mosaic grid.
  499. :param yc: (int) center y coordinate of the entire mosaic grid.
  500. :param w: (int) width of bbox
  501. :param h: (int) height of bbox
  502. :param input_h: (int) image input height (should be 1/2 of the final mosaic output image height).
  503. :param input_w: (int) image input width (should be 1/2 of the final mosaic output image width).
  504. :return: (x1, y1, x2, y2), (x1s, y1s, x2s, y2s) where (x1, y1, x2, y2) are the coordinates in the final mosaic
  505. output image, and (x1s, y1s, x2s, y2s) are the coordinates in the placed image.
  506. """
  507. # index0 to top left part of image
  508. if mosaic_index == 0:
  509. x1, y1, x2, y2 = max(xc - w, 0), max(yc - h, 0), xc, yc
  510. small_coord = w - (x2 - x1), h - (y2 - y1), w, h
  511. # index1 to top right part of image
  512. elif mosaic_index == 1:
  513. x1, y1, x2, y2 = xc, max(yc - h, 0), min(xc + w, input_w * 2), yc
  514. small_coord = 0, h - (y2 - y1), min(w, x2 - x1), h
  515. # index2 to bottom left part of image
  516. elif mosaic_index == 2:
  517. x1, y1, x2, y2 = max(xc - w, 0), yc, xc, min(input_h * 2, yc + h)
  518. small_coord = w - (x2 - x1), 0, w, min(y2 - y1, h)
  519. # index2 to bottom right part of image
  520. elif mosaic_index == 3:
  521. x1, y1, x2, y2 = xc, yc, min(xc + w, input_w * 2), min(input_h * 2, yc + h) # noqa
  522. small_coord = 0, 0, min(w, x2 - x1), min(y2 - y1, h)
  523. return (x1, y1, x2, y2), small_coord
  524. def adjust_box_anns(bbox, scale_ratio, padw, padh, w_max, h_max):
  525. """
  526. Adjusts the bbox annotations of rescaled, padded image.
  527. :param bbox: (np.array) bbox to modify.
  528. :param scale_ratio: (float) scale ratio between rescale output image and original one.
  529. :param padw: (int) width padding size.
  530. :param padh: (int) height padding size.
  531. :param w_max: (int) width border.
  532. :param h_max: (int) height border
  533. :return: modified bbox (np.array)
  534. """
  535. bbox[:, 0::2] = np.clip(bbox[:, 0::2] * scale_ratio + padw, 0, w_max)
  536. bbox[:, 1::2] = np.clip(bbox[:, 1::2] * scale_ratio + padh, 0, h_max)
  537. return bbox
  538. @register_collate_function()
  539. class DetectionCollateFN:
  540. """
  541. Collate function for Yolox training
  542. """
  543. def __call__(self, data) -> Tuple[torch.Tensor, torch.Tensor]:
  544. batch = default_collate(data)
  545. ims, targets = batch[0:2]
  546. return ims, self._format_targets(targets)
  547. def _format_targets(self, targets: torch.Tensor) -> torch.Tensor:
  548. nlabel = (targets.sum(dim=2) > 0).sum(dim=1) # number of label per image
  549. targets_merged = []
  550. for i in range(targets.shape[0]):
  551. targets_im = targets[i, : nlabel[i]]
  552. batch_column = targets.new_ones((targets_im.shape[0], 1)) * i
  553. targets_merged.append(torch.cat((batch_column, targets_im), 1))
  554. return torch.cat(targets_merged, 0)
  555. class PPYoloECollateFN:
  556. """
  557. Collate function for PPYoloE training
  558. """
  559. def __init__(self, random_resize_sizes: Union[List[int], None] = None, random_resize_modes: Union[List[int], None] = None):
  560. """
  561. :param random_resize_sizes: (rows, cols)
  562. """
  563. self.random_resize_sizes = random_resize_sizes
  564. self.random_resize_modes = random_resize_modes
  565. def __repr__(self):
  566. return f"PPYoloECollateFN(random_resize_sizes={self.random_resize_sizes}, random_resize_modes={self.random_resize_modes})"
  567. def __str__(self):
  568. return self.__repr__()
  569. def __call__(self, data) -> Tuple[torch.Tensor, torch.Tensor]:
  570. if self.random_resize_sizes is not None:
  571. data = self.random_resize(data)
  572. batch = default_collate(data)
  573. ims, targets = batch
  574. targets = self._format_targets(targets)
  575. ims = torch.moveaxis(ims, -1, 1).float()
  576. return ims, targets
  577. def random_resize(self, batch):
  578. target_size = random.choice(self.random_resize_sizes)
  579. interpolation = random.choice(self.random_resize_modes)
  580. batch = [self.random_resize_sample(sample, target_size, interpolation) for sample in batch]
  581. return batch
  582. def random_resize_sample(self, sample, target_size, interpolation):
  583. if len(sample) == 2:
  584. image, targets = sample # TARGETS ARE IN LABEL_CXCYWH
  585. with_crowd = False
  586. elif len(sample == 3):
  587. image, targets, crowd_targets = sample
  588. with_crowd = True
  589. else:
  590. raise RuntimeError()
  591. dsize = int(target_size), int(target_size)
  592. scale_factors = target_size / image.shape[0], target_size / image.shape[1]
  593. image = cv2.resize(
  594. image,
  595. dsize=dsize,
  596. interpolation=interpolation,
  597. )
  598. sy, sx = scale_factors
  599. targets[:, 1:5] *= np.array([[sx, sy, sx, sy]], dtype=targets.dtype)
  600. if with_crowd:
  601. crowd_targets[:, 1:5] *= np.array([[sx, sy, sx, sy]], dtype=targets.dtype)
  602. return image, targets, crowd_targets
  603. return image, targets
  604. def _format_targets(self, targets: torch.Tensor) -> torch.Tensor:
  605. """
  606. :param targets:
  607. :return: Tensor of shape [B, N, 6], where 6 elements are (index, c, cx, cy, w, h)
  608. """
  609. # Same collate as in YoloX. We convert to PPYoloTargets in the loss
  610. nlabel = (targets.sum(dim=2) > 0).sum(dim=1) # number of label per image
  611. targets_merged = []
  612. for i in range(targets.shape[0]):
  613. targets_im = targets[i, : nlabel[i]]
  614. batch_column = targets.new_ones((targets_im.shape[0], 1)) * i
  615. targets_merged.append(torch.cat((batch_column, targets_im), 1))
  616. return torch.cat(targets_merged, 0)
  617. class CrowdDetectionPPYoloECollateFN(PPYoloECollateFN):
  618. """
  619. Collate function for Yolox training with additional_batch_items that includes crowd targets
  620. """
  621. def __call__(self, data) -> Tuple[torch.Tensor, torch.Tensor, Dict[str, torch.Tensor]]:
  622. if self.random_resize_sizes is not None:
  623. data = self.random_resize(data)
  624. batch = default_collate(data)
  625. ims, targets, crowd_targets = batch
  626. if ims.shape[3] == 3:
  627. ims = torch.moveaxis(ims, -1, 1).float()
  628. return ims, self._format_targets(targets), {"crowd_targets": self._format_targets(crowd_targets)}
  629. @register_collate_function()
  630. class CrowdDetectionCollateFN(DetectionCollateFN):
  631. """
  632. Collate function for Yolox training with additional_batch_items that includes crowd targets
  633. """
  634. def __call__(self, data) -> Tuple[torch.Tensor, torch.Tensor, Dict[str, torch.Tensor]]:
  635. batch = default_collate(data)
  636. ims, targets, crowd_targets = batch[0:3]
  637. return ims, self._format_targets(targets), {"crowd_targets": self._format_targets(crowd_targets)}
  638. def compute_box_area(box: torch.Tensor) -> torch.Tensor:
  639. """
  640. Compute the area of one or many boxes.
  641. :param box: One or many boxes, shape = (4, ?), each box in format (x1, y1, x2, y2)
  642. :return: Area of every box, shape = (1, ?)
  643. """
  644. # box = 4xn
  645. return (box[2] - box[0]) * (box[3] - box[1])
  646. def crowd_ioa(det_box: torch.Tensor, crowd_box: torch.Tensor) -> torch.Tensor:
  647. """
  648. Return intersection-over-detection_area of boxes, used for crowd ground truths.
  649. Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
  650. :param det_box: Tensor of shape [N, 4]
  651. :param crowd_box: Tensor of shape [M, 4]
  652. :return: crowd_ioa, Tensor of shape [N, M]: the NxM matrix containing the pairwise IoA values for every element in det_box and crowd_box
  653. """
  654. det_area = compute_box_area(det_box.T)
  655. # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2)
  656. inter = (torch.min(det_box[:, None, 2:], crowd_box[:, 2:]) - torch.max(det_box[:, None, :2], crowd_box[:, :2])).clamp(0).prod(2)
  657. return inter / det_area[:, None] # crowd_ioa = inter / det_area
  658. def compute_detection_matching(
  659. output: List[torch.Tensor],
  660. targets: torch.Tensor,
  661. height: int,
  662. width: int,
  663. iou_thresholds: torch.Tensor,
  664. denormalize_targets: bool,
  665. device: str,
  666. crowd_targets: Optional[torch.Tensor] = None,
  667. top_k: int = 100,
  668. return_on_cpu: bool = True,
  669. ) -> List[Tuple]:
  670. """
  671. Match predictions (NMS output) and the targets (ground truth) with respect to IoU and confidence score.
  672. :param output: list (of length batch_size) of Tensors of shape (num_predictions, 6)
  673. format: (x1, y1, x2, y2, confidence, class_label) where x1,y1,x2,y2 are according to image size
  674. :param targets: targets for all images of shape (total_num_targets, 6)
  675. format: (index, x, y, w, h, label) where x,y,w,h are in range [0,1]
  676. :param height: dimensions of the image
  677. :param width: dimensions of the image
  678. :param iou_thresholds: Threshold to compute the mAP
  679. :param device: Device
  680. :param crowd_targets: crowd targets for all images of shape (total_num_crowd_targets, 6)
  681. format: (index, x, y, w, h, label) where x,y,w,h are in range [0,1]
  682. :param top_k: Number of predictions to keep per class, ordered by confidence score
  683. :param denormalize_targets: If True, denormalize the targets and crowd_targets
  684. :param return_on_cpu: If True, the output will be returned on "CPU", otherwise it will be returned on "device"
  685. :return: list of the following tensors, for every image:
  686. :preds_matched: Tensor of shape (num_img_predictions, n_iou_thresholds)
  687. True when prediction (i) is matched with a target with respect to the (j)th IoU threshold
  688. :preds_to_ignore: Tensor of shape (num_img_predictions, n_iou_thresholds)
  689. True when prediction (i) is matched with a crowd target with respect to the (j)th IoU threshold
  690. :preds_scores: Tensor of shape (num_img_predictions), confidence score for every prediction
  691. :preds_cls: Tensor of shape (num_img_predictions), predicted class for every prediction
  692. :targets_cls: Tensor of shape (num_img_targets), ground truth class for every target
  693. """
  694. output = map(lambda tensor: None if tensor is None else tensor.to(device), output)
  695. targets, iou_thresholds = targets.to(device), iou_thresholds.to(device)
  696. # If crowd_targets is not provided, we patch it with an empty tensor
  697. crowd_targets = torch.zeros(size=(0, 6), device=device) if crowd_targets is None else crowd_targets.to(device)
  698. batch_metrics = []
  699. for img_i, img_preds in enumerate(output):
  700. # If img_preds is None (not prediction for this image), we patch it with an empty tensor
  701. img_preds = img_preds if img_preds is not None else torch.zeros(size=(0, 6), device=device)
  702. img_targets = targets[targets[:, 0] == img_i, 1:]
  703. img_crowd_targets = crowd_targets[crowd_targets[:, 0] == img_i, 1:]
  704. img_matching_tensors = compute_img_detection_matching(
  705. preds=img_preds,
  706. targets=img_targets,
  707. crowd_targets=img_crowd_targets,
  708. denormalize_targets=denormalize_targets,
  709. height=height,
  710. width=width,
  711. device=device,
  712. iou_thresholds=iou_thresholds,
  713. top_k=top_k,
  714. return_on_cpu=return_on_cpu,
  715. )
  716. batch_metrics.append(img_matching_tensors)
  717. return batch_metrics
  718. def compute_img_detection_matching(
  719. preds: torch.Tensor,
  720. targets: torch.Tensor,
  721. crowd_targets: torch.Tensor,
  722. height: int,
  723. width: int,
  724. iou_thresholds: torch.Tensor,
  725. device: str,
  726. denormalize_targets: bool,
  727. top_k: int = 100,
  728. return_on_cpu: bool = True,
  729. ) -> Tuple:
  730. """
  731. Match predictions (NMS output) and the targets (ground truth) with respect to IoU and confidence score
  732. for a given image.
  733. :param preds: Tensor of shape (num_img_predictions, 6)
  734. format: (x1, y1, x2, y2, confidence, class_label) where x1,y1,x2,y2 are according to image size
  735. :param targets: targets for this image of shape (num_img_targets, 6)
  736. format: (label, cx, cy, w, h, label) where cx,cy,w,h
  737. :param height: dimensions of the image
  738. :param width: dimensions of the image
  739. :param iou_thresholds: Threshold to compute the mAP
  740. :param device:
  741. :param crowd_targets: crowd targets for all images of shape (total_num_crowd_targets, 6)
  742. format: (index, x, y, w, h, label) where x,y,w,h are in range [0,1]
  743. :param top_k: Number of predictions to keep per class, ordered by confidence score
  744. :param device: Device
  745. :param denormalize_targets: If True, denormalize the targets and crowd_targets
  746. :param return_on_cpu: If True, the output will be returned on "CPU", otherwise it will be returned on "device"
  747. :return:
  748. :preds_matched: Tensor of shape (num_img_predictions, n_iou_thresholds)
  749. True when prediction (i) is matched with a target with respect to the (j)th IoU threshold
  750. :preds_to_ignore: Tensor of shape (num_img_predictions, n_iou_thresholds)
  751. True when prediction (i) is matched with a crowd target with respect to the (j)th IoU threshold
  752. :preds_scores: Tensor of shape (num_img_predictions), confidence score for every prediction
  753. :preds_cls: Tensor of shape (num_img_predictions), predicted class for every prediction
  754. :targets_cls: Tensor of shape (num_img_targets), ground truth class for every target
  755. """
  756. num_iou_thresholds = len(iou_thresholds)
  757. if preds is None or len(preds) == 0:
  758. if return_on_cpu:
  759. device = "cpu"
  760. preds_matched = torch.zeros((0, num_iou_thresholds), dtype=torch.bool, device=device)
  761. preds_to_ignore = torch.zeros((0, num_iou_thresholds), dtype=torch.bool, device=device)
  762. preds_scores = torch.tensor([], dtype=torch.float32, device=device)
  763. preds_cls = torch.tensor([], dtype=torch.float32, device=device)
  764. targets_cls = targets[:, 0].to(device=device)
  765. return preds_matched, preds_to_ignore, preds_scores, preds_cls, targets_cls
  766. preds_matched = torch.zeros(len(preds), num_iou_thresholds, dtype=torch.bool, device=device)
  767. targets_matched = torch.zeros(len(targets), num_iou_thresholds, dtype=torch.bool, device=device)
  768. preds_to_ignore = torch.zeros(len(preds), num_iou_thresholds, dtype=torch.bool, device=device)
  769. preds_cls, preds_box, preds_scores = preds[:, -1], preds[:, 0:4], preds[:, 4]
  770. targets_cls, targets_box = targets[:, 0], targets[:, 1:5]
  771. crowd_targets_cls, crowd_target_box = crowd_targets[:, 0], crowd_targets[:, 1:5]
  772. # Ignore all but the predictions that were top_k for their class
  773. preds_idx_to_use = get_top_k_idx_per_cls(preds_scores, preds_cls, top_k)
  774. preds_to_ignore[:, :] = True
  775. preds_to_ignore[preds_idx_to_use] = False
  776. if len(targets) > 0 or len(crowd_targets) > 0:
  777. # CHANGE bboxes TO FIT THE IMAGE SIZE
  778. change_bbox_bounds_for_image_size(preds, (height, width))
  779. targets_box = cxcywh2xyxy(targets_box)
  780. crowd_target_box = cxcywh2xyxy(crowd_target_box)
  781. if denormalize_targets:
  782. targets_box[:, [0, 2]] *= width
  783. targets_box[:, [1, 3]] *= height
  784. crowd_target_box[:, [0, 2]] *= width
  785. crowd_target_box[:, [1, 3]] *= height
  786. if len(targets) > 0:
  787. # shape = (n_preds x n_targets)
  788. iou = box_iou(preds_box[preds_idx_to_use], targets_box)
  789. # Fill IoU values at index (i, j) with 0 when the prediction (i) and target(j) are of different class
  790. # Filling with 0 is equivalent to ignore these values since with want IoU > iou_threshold > 0
  791. cls_mismatch = preds_cls[preds_idx_to_use].view(-1, 1) != targets_cls.view(1, -1)
  792. iou[cls_mismatch] = 0
  793. # The matching priority is first detection confidence and then IoU value.
  794. # The detection is already sorted by confidence in NMS, so here for each prediction we order the targets by iou.
  795. sorted_iou, target_sorted = iou.sort(descending=True, stable=True)
  796. # Only iterate over IoU values higher than min threshold to speed up the process
  797. for pred_selected_i, target_sorted_i in (sorted_iou > iou_thresholds[0]).nonzero(as_tuple=False):
  798. # pred_selected_i and target_sorted_i are relative to filters/sorting, so we extract their absolute indexes
  799. pred_i = preds_idx_to_use[pred_selected_i]
  800. target_i = target_sorted[pred_selected_i, target_sorted_i]
  801. # Vector[j], True when IoU(pred_i, target_i) is above the (j)th threshold
  802. is_iou_above_threshold = sorted_iou[pred_selected_i, target_sorted_i] > iou_thresholds
  803. # Vector[j], True when both pred_i and target_i are not matched yet for the (j)th threshold
  804. are_candidates_free = torch.logical_and(~preds_matched[pred_i, :], ~targets_matched[target_i, :])
  805. # Vector[j], True when (pred_i, target_i) can be matched for the (j)th threshold
  806. are_candidates_good = torch.logical_and(is_iou_above_threshold, are_candidates_free)
  807. # For every threshold (j) where target_i and pred_i can be matched together ( are_candidates_good[j]==True )
  808. # fill the matching placeholders with True
  809. targets_matched[target_i, are_candidates_good] = True
  810. preds_matched[pred_i, are_candidates_good] = True
  811. # When all the targets are matched with a prediction for every IoU Threshold, stop.
  812. if targets_matched.all():
  813. break
  814. # Crowd targets can be matched with many predictions.
  815. # Therefore, for every prediction we just need to check if it has IoA large enough with any crowd target.
  816. if len(crowd_targets) > 0:
  817. # shape = (n_preds_to_use x n_crowd_targets)
  818. ioa = crowd_ioa(preds_box[preds_idx_to_use], crowd_target_box)
  819. # Fill IoA values at index (i, j) with 0 when the prediction (i) and target(j) are of different class
  820. # Filling with 0 is equivalent to ignore these values since with want IoA > threshold > 0
  821. cls_mismatch = preds_cls[preds_idx_to_use].view(-1, 1) != crowd_targets_cls.view(1, -1)
  822. ioa[cls_mismatch] = 0
  823. # For each prediction, we keep it's highest score with any crowd target (of same class)
  824. # shape = (n_preds_to_use)
  825. best_ioa, _ = ioa.max(1)
  826. # If a prediction has IoA higher than threshold (with any target of same class), then there is a match
  827. # shape = (n_preds_to_use x iou_thresholds)
  828. is_matching_with_crowd = best_ioa.view(-1, 1) > iou_thresholds.view(1, -1)
  829. preds_to_ignore[preds_idx_to_use] = torch.logical_or(preds_to_ignore[preds_idx_to_use], is_matching_with_crowd)
  830. if return_on_cpu:
  831. preds_matched = preds_matched.to("cpu")
  832. preds_to_ignore = preds_to_ignore.to("cpu")
  833. preds_scores = preds_scores.to("cpu")
  834. preds_cls = preds_cls.to("cpu")
  835. targets_cls = targets_cls.to("cpu")
  836. return preds_matched, preds_to_ignore, preds_scores, preds_cls, targets_cls
  837. def get_top_k_idx_per_cls(preds_scores: torch.Tensor, preds_cls: torch.Tensor, top_k: int):
  838. """Get the indexes of all the top k predictions for every class
  839. :param preds_scores: The confidence scores, vector of shape (n_pred)
  840. :param preds_cls: The predicted class, vector of shape (n_pred)
  841. :param top_k: Number of predictions to keep per class, ordered by confidence score
  842. :return top_k_idx: Indexes of the top k predictions. length <= (k * n_unique_class)
  843. """
  844. n_unique_cls = torch.max(preds_cls)
  845. mask = preds_cls.view(-1, 1) == torch.arange(n_unique_cls + 1, device=preds_scores.device).view(1, -1)
  846. preds_scores_per_cls = preds_scores.view(-1, 1) * mask
  847. sorted_scores_per_cls, sorting_idx = preds_scores_per_cls.sort(0, descending=True)
  848. idx_with_satisfying_scores = sorted_scores_per_cls[:top_k, :].nonzero(as_tuple=False)
  849. top_k_idx = sorting_idx[idx_with_satisfying_scores.split(1, dim=1)]
  850. return top_k_idx.view(-1)
  851. def compute_detection_metrics(
  852. preds_matched: torch.Tensor,
  853. preds_to_ignore: torch.Tensor,
  854. preds_scores: torch.Tensor,
  855. preds_cls: torch.Tensor,
  856. targets_cls: torch.Tensor,
  857. device: str,
  858. recall_thresholds: Optional[torch.Tensor] = None,
  859. score_threshold: Optional[float] = 0.1,
  860. ) -> Tuple:
  861. """
  862. Compute the list of precision, recall, MaP and f1 for every recall IoU threshold and for every class.
  863. :param preds_matched: Tensor of shape (num_predictions, n_iou_thresholds)
  864. True when prediction (i) is matched with a target with respect to the (j)th IoU threshold
  865. :param preds_to_ignore Tensor of shape (num_predictions, n_iou_thresholds)
  866. True when prediction (i) is matched with a crowd target with respect to the (j)th IoU threshold
  867. :param preds_scores: Tensor of shape (num_predictions), confidence score for every prediction
  868. :param preds_cls: Tensor of shape (num_predictions), predicted class for every prediction
  869. :param targets_cls: Tensor of shape (num_targets), ground truth class for every target box to be detected
  870. :param recall_thresholds: Recall thresholds used to compute MaP.
  871. :param score_threshold: Minimum confidence score to consider a prediction for the computation of
  872. precision, recall and f1 (not MaP)
  873. :param device: Device
  874. :return:
  875. :ap, precision, recall, f1: Tensors of shape (n_class, nb_iou_thrs)
  876. :unique_classes: Vector with all unique target classes
  877. """
  878. preds_matched, preds_to_ignore = preds_matched.to(device), preds_to_ignore.to(device)
  879. preds_scores, preds_cls, targets_cls = preds_scores.to(device), preds_cls.to(device), targets_cls.to(device)
  880. recall_thresholds = torch.linspace(0, 1, 101, device=device) if recall_thresholds is None else recall_thresholds.to(device)
  881. unique_classes = torch.unique(targets_cls)
  882. n_class, nb_iou_thrs = len(unique_classes), preds_matched.shape[-1]
  883. ap = torch.zeros((n_class, nb_iou_thrs), device=device)
  884. precision = torch.zeros((n_class, nb_iou_thrs), device=device)
  885. recall = torch.zeros((n_class, nb_iou_thrs), device=device)
  886. for cls_i, cls in enumerate(unique_classes):
  887. cls_preds_idx, cls_targets_idx = (preds_cls == cls), (targets_cls == cls)
  888. cls_ap, cls_precision, cls_recall = compute_detection_metrics_per_cls(
  889. preds_matched=preds_matched[cls_preds_idx],
  890. preds_to_ignore=preds_to_ignore[cls_preds_idx],
  891. preds_scores=preds_scores[cls_preds_idx],
  892. n_targets=cls_targets_idx.sum(),
  893. recall_thresholds=recall_thresholds,
  894. score_threshold=score_threshold,
  895. device=device,
  896. )
  897. ap[cls_i, :] = cls_ap
  898. precision[cls_i, :] = cls_precision
  899. recall[cls_i, :] = cls_recall
  900. f1 = 2 * precision * recall / (precision + recall + 1e-16)
  901. return ap, precision, recall, f1, unique_classes
  902. def compute_detection_metrics_per_cls(
  903. preds_matched: torch.Tensor,
  904. preds_to_ignore: torch.Tensor,
  905. preds_scores: torch.Tensor,
  906. n_targets: int,
  907. recall_thresholds: torch.Tensor,
  908. score_threshold: float,
  909. device: str,
  910. ):
  911. """
  912. Compute the list of precision, recall and MaP of a given class for every recall IoU threshold.
  913. :param preds_matched: Tensor of shape (num_predictions, n_iou_thresholds)
  914. True when prediction (i) is matched with a target
  915. with respect to the(j)th IoU threshold
  916. :param preds_to_ignore Tensor of shape (num_predictions, n_iou_thresholds)
  917. True when prediction (i) is matched with a crowd target
  918. with respect to the (j)th IoU threshold
  919. :param preds_scores: Tensor of shape (num_predictions), confidence score for every prediction
  920. :param n_targets: Number of target boxes of this class
  921. :param recall_thresholds: Tensor of shape (max_n_rec_thresh) list of recall thresholds used to compute MaP
  922. :param score_threshold: Minimum confidence score to consider a prediction for the computation of
  923. precision and recall (not MaP)
  924. :param device: Device
  925. :return ap, precision, recall: Tensors of shape (nb_iou_thrs)
  926. """
  927. nb_iou_thrs = preds_matched.shape[-1]
  928. tps = preds_matched
  929. fps = torch.logical_and(torch.logical_not(preds_matched), torch.logical_not(preds_to_ignore))
  930. if len(tps) == 0:
  931. return torch.zeros(nb_iou_thrs, device=device), torch.zeros(nb_iou_thrs, device=device), torch.zeros(nb_iou_thrs, device=device)
  932. # Sort by decreasing score
  933. dtype = torch.uint8 if preds_scores.is_cuda and preds_scores.dtype is torch.bool else preds_scores.dtype
  934. sort_ind = torch.argsort(preds_scores.to(dtype), descending=True)
  935. tps = tps[sort_ind, :]
  936. fps = fps[sort_ind, :]
  937. preds_scores = preds_scores[sort_ind].contiguous()
  938. # Rolling sum over the predictions
  939. rolling_tps = torch.cumsum(tps, axis=0, dtype=torch.float)
  940. rolling_fps = torch.cumsum(fps, axis=0, dtype=torch.float)
  941. rolling_recalls = rolling_tps / n_targets
  942. rolling_precisions = rolling_tps / (rolling_tps + rolling_fps + torch.finfo(torch.float64).eps)
  943. # Reversed cummax to only have decreasing values
  944. rolling_precisions = rolling_precisions.flip(0).cummax(0).values.flip(0)
  945. # ==================
  946. # RECALL & PRECISION
  947. # We want the rolling precision/recall at index i so that: preds_scores[i-1] >= score_threshold > preds_scores[i]
  948. # Note: torch.searchsorted works on increasing sequence and preds_scores is decreasing, so we work with "-"
  949. lowest_score_above_threshold = torch.searchsorted(-preds_scores, -score_threshold, right=False)
  950. if lowest_score_above_threshold == 0: # Here score_threshold > preds_scores[0], so no pred is above the threshold
  951. recall = torch.zeros(nb_iou_thrs, device=device)
  952. precision = torch.zeros(nb_iou_thrs, device=device) # the precision is not really defined when no pred but we need to give it a value
  953. else:
  954. recall = rolling_recalls[lowest_score_above_threshold - 1]
  955. precision = rolling_precisions[lowest_score_above_threshold - 1]
  956. # ==================
  957. # AVERAGE PRECISION
  958. # shape = (nb_iou_thrs, n_recall_thresholds)
  959. recall_thresholds = recall_thresholds.view(1, -1).repeat(nb_iou_thrs, 1)
  960. # We want the index i so that: rolling_recalls[i-1] < recall_thresholds[k] <= rolling_recalls[i]
  961. # Note: when recall_thresholds[k] > max(rolling_recalls), i = len(rolling_recalls)
  962. # Note2: we work with transpose (.T) to apply torch.searchsorted on first dim instead of the last one
  963. recall_threshold_idx = torch.searchsorted(rolling_recalls.T.contiguous(), recall_thresholds, right=False).T
  964. # When recall_thresholds[k] > max(rolling_recalls), rolling_precisions[i] is not defined, and we want precision = 0
  965. rolling_precisions = torch.cat((rolling_precisions, torch.zeros(1, nb_iou_thrs, device=device)), dim=0)
  966. # shape = (n_recall_thresholds, nb_iou_thrs)
  967. sampled_precision_points = torch.gather(input=rolling_precisions, index=recall_threshold_idx, dim=0)
  968. # Average over the recall_thresholds
  969. ap = sampled_precision_points.mean(0)
  970. return ap, precision, recall
Discard
Tip!

Press p or to see the previous file or, n or to see the next file