Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

metrics.py 8.8 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
  1. # Model validation metrics
  2. from pathlib import Path
  3. import matplotlib.pyplot as plt
  4. import numpy as np
  5. import torch
  6. from . import general
  7. def fitness(x):
  8. # Model fitness as a weighted combination of metrics
  9. w = [0.0, 0.0, 0.1, 0.9] # weights for [P, R, mAP@0.5, mAP@0.5:0.95]
  10. return (x[:, :4] * w).sum(1)
  11. def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names=()):
  12. """ Compute the average precision, given the recall and precision curves.
  13. Source: https://github.com/rafaelpadilla/Object-Detection-Metrics.
  14. # Arguments
  15. tp: True positives (nparray, nx1 or nx10).
  16. conf: Objectness value from 0-1 (nparray).
  17. pred_cls: Predicted object classes (nparray).
  18. target_cls: True object classes (nparray).
  19. plot: Plot precision-recall curve at mAP@0.5
  20. save_dir: Plot save directory
  21. # Returns
  22. The average precision as computed in py-faster-rcnn.
  23. """
  24. # Sort by objectness
  25. i = np.argsort(-conf)
  26. tp, conf, pred_cls = tp[i], conf[i], pred_cls[i]
  27. # Find unique classes
  28. unique_classes = np.unique(target_cls)
  29. nc = unique_classes.shape[0] # number of classes, number of detections
  30. # Create Precision-Recall curve and compute AP for each class
  31. px, py = np.linspace(0, 1, 1000), [] # for plotting
  32. ap, p, r = np.zeros((nc, tp.shape[1])), np.zeros((nc, 1000)), np.zeros((nc, 1000))
  33. for ci, c in enumerate(unique_classes):
  34. i = pred_cls == c
  35. n_l = (target_cls == c).sum() # number of labels
  36. n_p = i.sum() # number of predictions
  37. if n_p == 0 or n_l == 0:
  38. continue
  39. else:
  40. # Accumulate FPs and TPs
  41. fpc = (1 - tp[i]).cumsum(0)
  42. tpc = tp[i].cumsum(0)
  43. # Recall
  44. recall = tpc / (n_l + 1e-16) # recall curve
  45. r[ci] = np.interp(-px, -conf[i], recall[:, 0], left=0) # negative x, xp because xp decreases
  46. # Precision
  47. precision = tpc / (tpc + fpc) # precision curve
  48. p[ci] = np.interp(-px, -conf[i], precision[:, 0], left=1) # p at pr_score
  49. # AP from recall-precision curve
  50. for j in range(tp.shape[1]):
  51. ap[ci, j], mpre, mrec = compute_ap(recall[:, j], precision[:, j])
  52. if plot and j == 0:
  53. py.append(np.interp(px, mrec, mpre)) # precision at mAP@0.5
  54. # Compute F1 (harmonic mean of precision and recall)
  55. f1 = 2 * p * r / (p + r + 1e-16)
  56. if plot:
  57. plot_pr_curve(px, py, ap, Path(save_dir) / 'PR_curve.png', names)
  58. plot_mc_curve(px, f1, Path(save_dir) / 'F1_curve.png', names, ylabel='F1')
  59. plot_mc_curve(px, p, Path(save_dir) / 'P_curve.png', names, ylabel='Precision')
  60. plot_mc_curve(px, r, Path(save_dir) / 'R_curve.png', names, ylabel='Recall')
  61. i = f1.mean(0).argmax() # max F1 index
  62. return p[:, i], r[:, i], ap, f1[:, i], unique_classes.astype('int32')
  63. def compute_ap(recall, precision):
  64. """ Compute the average precision, given the recall and precision curves
  65. # Arguments
  66. recall: The recall curve (list)
  67. precision: The precision curve (list)
  68. # Returns
  69. Average precision, precision curve, recall curve
  70. """
  71. # Append sentinel values to beginning and end
  72. mrec = np.concatenate(([0.], recall, [recall[-1] + 0.01]))
  73. mpre = np.concatenate(([1.], precision, [0.]))
  74. # Compute the precision envelope
  75. mpre = np.flip(np.maximum.accumulate(np.flip(mpre)))
  76. # Integrate area under curve
  77. method = 'interp' # methods: 'continuous', 'interp'
  78. if method == 'interp':
  79. x = np.linspace(0, 1, 101) # 101-point interp (COCO)
  80. ap = np.trapz(np.interp(x, mrec, mpre), x) # integrate
  81. else: # 'continuous'
  82. i = np.where(mrec[1:] != mrec[:-1])[0] # points where x axis (recall) changes
  83. ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) # area under curve
  84. return ap, mpre, mrec
  85. class ConfusionMatrix:
  86. # Updated version of https://github.com/kaanakan/object_detection_confusion_matrix
  87. def __init__(self, nc, conf=0.25, iou_thres=0.45):
  88. self.matrix = np.zeros((nc + 1, nc + 1))
  89. self.nc = nc # number of classes
  90. self.conf = conf
  91. self.iou_thres = iou_thres
  92. def process_batch(self, detections, labels):
  93. """
  94. Return intersection-over-union (Jaccard index) of boxes.
  95. Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
  96. Arguments:
  97. detections (Array[N, 6]), x1, y1, x2, y2, conf, class
  98. labels (Array[M, 5]), class, x1, y1, x2, y2
  99. Returns:
  100. None, updates confusion matrix accordingly
  101. """
  102. detections = detections[detections[:, 4] > self.conf]
  103. gt_classes = labels[:, 0].int()
  104. detection_classes = detections[:, 5].int()
  105. iou = general.box_iou(labels[:, 1:], detections[:, :4])
  106. x = torch.where(iou > self.iou_thres)
  107. if x[0].shape[0]:
  108. matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy()
  109. if x[0].shape[0] > 1:
  110. matches = matches[matches[:, 2].argsort()[::-1]]
  111. matches = matches[np.unique(matches[:, 1], return_index=True)[1]]
  112. matches = matches[matches[:, 2].argsort()[::-1]]
  113. matches = matches[np.unique(matches[:, 0], return_index=True)[1]]
  114. else:
  115. matches = np.zeros((0, 3))
  116. n = matches.shape[0] > 0
  117. m0, m1, _ = matches.transpose().astype(np.int16)
  118. for i, gc in enumerate(gt_classes):
  119. j = m0 == i
  120. if n and sum(j) == 1:
  121. self.matrix[detection_classes[m1[j]], gc] += 1 # correct
  122. else:
  123. self.matrix[self.nc, gc] += 1 # background FP
  124. if n:
  125. for i, dc in enumerate(detection_classes):
  126. if not any(m1 == i):
  127. self.matrix[dc, self.nc] += 1 # background FN
  128. def matrix(self):
  129. return self.matrix
  130. def plot(self, save_dir='', names=()):
  131. try:
  132. import seaborn as sn
  133. array = self.matrix / (self.matrix.sum(0).reshape(1, self.nc + 1) + 1E-6) # normalize
  134. array[array < 0.005] = np.nan # don't annotate (would appear as 0.00)
  135. fig = plt.figure(figsize=(12, 9), tight_layout=True)
  136. sn.set(font_scale=1.0 if self.nc < 50 else 0.8) # for label size
  137. labels = (0 < len(names) < 99) and len(names) == self.nc # apply names to ticklabels
  138. sn.heatmap(array, annot=self.nc < 30, annot_kws={"size": 8}, cmap='Blues', fmt='.2f', square=True,
  139. xticklabels=names + ['background FP'] if labels else "auto",
  140. yticklabels=names + ['background FN'] if labels else "auto").set_facecolor((1, 1, 1))
  141. fig.axes[0].set_xlabel('True')
  142. fig.axes[0].set_ylabel('Predicted')
  143. fig.savefig(Path(save_dir) / 'confusion_matrix.png', dpi=250)
  144. except Exception as e:
  145. pass
  146. def print(self):
  147. for i in range(self.nc + 1):
  148. print(' '.join(map(str, self.matrix[i])))
  149. # Plots ----------------------------------------------------------------------------------------------------------------
  150. def plot_pr_curve(px, py, ap, save_dir='pr_curve.png', names=()):
  151. # Precision-recall curve
  152. fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True)
  153. py = np.stack(py, axis=1)
  154. if 0 < len(names) < 21: # display per-class legend if < 21 classes
  155. for i, y in enumerate(py.T):
  156. ax.plot(px, y, linewidth=1, label=f'{names[i]} {ap[i, 0]:.3f}') # plot(recall, precision)
  157. else:
  158. ax.plot(px, py, linewidth=1, color='grey') # plot(recall, precision)
  159. ax.plot(px, py.mean(1), linewidth=3, color='blue', label='all classes %.3f mAP@0.5' % ap[:, 0].mean())
  160. ax.set_xlabel('Recall')
  161. ax.set_ylabel('Precision')
  162. ax.set_xlim(0, 1)
  163. ax.set_ylim(0, 1)
  164. plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
  165. fig.savefig(Path(save_dir), dpi=250)
  166. def plot_mc_curve(px, py, save_dir='mc_curve.png', names=(), xlabel='Confidence', ylabel='Metric'):
  167. # Metric-confidence curve
  168. fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True)
  169. if 0 < len(names) < 21: # display per-class legend if < 21 classes
  170. for i, y in enumerate(py):
  171. ax.plot(px, y, linewidth=1, label=f'{names[i]}') # plot(confidence, metric)
  172. else:
  173. ax.plot(px, py.T, linewidth=1, color='grey') # plot(confidence, metric)
  174. y = py.mean(0)
  175. ax.plot(px, y, linewidth=3, color='blue', label=f'all classes {y.max():.2f} at {px[y.argmax()]:.3f}')
  176. ax.set_xlabel(xlabel)
  177. ax.set_ylabel(ylabel)
  178. ax.set_xlim(0, 1)
  179. ax.set_ylim(0, 1)
  180. plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
  181. fig.savefig(Path(save_dir), dpi=250)
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...