Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

main.py 12 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
  1. # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
  2. import argparse
  3. from typing import List, Tuple
  4. import cv2
  5. import numpy as np
  6. import onnxruntime as ort
  7. import torch
  8. from ultralytics.utils import ASSETS, YAML
  9. from ultralytics.utils.checks import check_requirements, check_yaml
  10. class YOLOv8:
  11. """
  12. YOLOv8 object detection model class for handling ONNX inference and visualization.
  13. This class provides functionality to load a YOLOv8 ONNX model, perform inference on images,
  14. and visualize the detection results with bounding boxes and labels.
  15. Attributes:
  16. onnx_model (str): Path to the ONNX model file.
  17. input_image (str): Path to the input image file.
  18. confidence_thres (float): Confidence threshold for filtering detections.
  19. iou_thres (float): IoU threshold for non-maximum suppression.
  20. classes (List[str]): List of class names from the COCO dataset.
  21. color_palette (np.ndarray): Random color palette for visualizing different classes.
  22. input_width (int): Width dimension of the model input.
  23. input_height (int): Height dimension of the model input.
  24. img (np.ndarray): The loaded input image.
  25. img_height (int): Height of the input image.
  26. img_width (int): Width of the input image.
  27. Methods:
  28. letterbox: Resize and reshape images while maintaining aspect ratio by adding padding.
  29. draw_detections: Draw bounding boxes and labels on the input image based on detected objects.
  30. preprocess: Preprocess the input image before performing inference.
  31. postprocess: Perform post-processing on the model's output to extract and visualize detections.
  32. main: Perform inference using an ONNX model and return the output image with drawn detections.
  33. Examples:
  34. Initialize YOLOv8 detector and run inference
  35. >>> detector = YOLOv8("yolov8n.onnx", "image.jpg", 0.5, 0.5)
  36. >>> output_image = detector.main()
  37. """
  38. def __init__(self, onnx_model: str, input_image: str, confidence_thres: float, iou_thres: float):
  39. """
  40. Initialize an instance of the YOLOv8 class.
  41. Args:
  42. onnx_model (str): Path to the ONNX model.
  43. input_image (str): Path to the input image.
  44. confidence_thres (float): Confidence threshold for filtering detections.
  45. iou_thres (float): IoU threshold for non-maximum suppression.
  46. """
  47. self.onnx_model = onnx_model
  48. self.input_image = input_image
  49. self.confidence_thres = confidence_thres
  50. self.iou_thres = iou_thres
  51. # Load the class names from the COCO dataset
  52. self.classes = YAML.load(check_yaml("coco8.yaml"))["names"]
  53. # Generate a color palette for the classes
  54. self.color_palette = np.random.uniform(0, 255, size=(len(self.classes), 3))
  55. def letterbox(self, img: np.ndarray, new_shape: Tuple[int, int] = (640, 640)) -> Tuple[np.ndarray, Tuple[int, int]]:
  56. """
  57. Resize and reshape images while maintaining aspect ratio by adding padding.
  58. Args:
  59. img (np.ndarray): Input image to be resized.
  60. new_shape (Tuple[int, int]): Target shape (height, width) for the image.
  61. Returns:
  62. img (np.ndarray): Resized and padded image.
  63. pad (Tuple[int, int]): Padding values (top, left) applied to the image.
  64. """
  65. shape = img.shape[:2] # current shape [height, width]
  66. # Scale ratio (new / old)
  67. r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
  68. # Compute padding
  69. new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
  70. dw, dh = (new_shape[1] - new_unpad[0]) / 2, (new_shape[0] - new_unpad[1]) / 2 # wh padding
  71. if shape[::-1] != new_unpad: # resize
  72. img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
  73. top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
  74. left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
  75. img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=(114, 114, 114))
  76. return img, (top, left)
  77. def draw_detections(self, img: np.ndarray, box: List[float], score: float, class_id: int) -> None:
  78. """Draw bounding boxes and labels on the input image based on the detected objects."""
  79. # Extract the coordinates of the bounding box
  80. x1, y1, w, h = box
  81. # Retrieve the color for the class ID
  82. color = self.color_palette[class_id]
  83. # Draw the bounding box on the image
  84. cv2.rectangle(img, (int(x1), int(y1)), (int(x1 + w), int(y1 + h)), color, 2)
  85. # Create the label text with class name and score
  86. label = f"{self.classes[class_id]}: {score:.2f}"
  87. # Calculate the dimensions of the label text
  88. (label_width, label_height), _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)
  89. # Calculate the position of the label text
  90. label_x = x1
  91. label_y = y1 - 10 if y1 - 10 > label_height else y1 + 10
  92. # Draw a filled rectangle as the background for the label text
  93. cv2.rectangle(
  94. img, (label_x, label_y - label_height), (label_x + label_width, label_y + label_height), color, cv2.FILLED
  95. )
  96. # Draw the label text on the image
  97. cv2.putText(img, label, (label_x, label_y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1, cv2.LINE_AA)
  98. def preprocess(self) -> Tuple[np.ndarray, Tuple[int, int]]:
  99. """
  100. Preprocess the input image before performing inference.
  101. This method reads the input image, converts its color space, applies letterboxing to maintain aspect ratio,
  102. normalizes pixel values, and prepares the image data for model input.
  103. Returns:
  104. image_data (np.ndarray): Preprocessed image data ready for inference with shape (1, 3, height, width).
  105. pad (Tuple[int, int]): Padding values (top, left) applied during letterboxing.
  106. """
  107. # Read the input image using OpenCV
  108. self.img = cv2.imread(self.input_image)
  109. # Get the height and width of the input image
  110. self.img_height, self.img_width = self.img.shape[:2]
  111. # Convert the image color space from BGR to RGB
  112. img = cv2.cvtColor(self.img, cv2.COLOR_BGR2RGB)
  113. img, pad = self.letterbox(img, (self.input_width, self.input_height))
  114. # Normalize the image data by dividing it by 255.0
  115. image_data = np.array(img) / 255.0
  116. # Transpose the image to have the channel dimension as the first dimension
  117. image_data = np.transpose(image_data, (2, 0, 1)) # Channel first
  118. # Expand the dimensions of the image data to match the expected input shape
  119. image_data = np.expand_dims(image_data, axis=0).astype(np.float32)
  120. # Return the preprocessed image data
  121. return image_data, pad
  122. def postprocess(self, input_image: np.ndarray, output: List[np.ndarray], pad: Tuple[int, int]) -> np.ndarray:
  123. """
  124. Perform post-processing on the model's output to extract and visualize detections.
  125. This method processes the raw model output to extract bounding boxes, scores, and class IDs.
  126. It applies non-maximum suppression to filter overlapping detections and draws the results on the input image.
  127. Args:
  128. input_image (np.ndarray): The input image.
  129. output (List[np.ndarray]): The output arrays from the model.
  130. pad (Tuple[int, int]): Padding values (top, left) used during letterboxing.
  131. Returns:
  132. (np.ndarray): The input image with detections drawn on it.
  133. """
  134. # Transpose and squeeze the output to match the expected shape
  135. outputs = np.transpose(np.squeeze(output[0]))
  136. # Get the number of rows in the outputs array
  137. rows = outputs.shape[0]
  138. # Lists to store the bounding boxes, scores, and class IDs of the detections
  139. boxes = []
  140. scores = []
  141. class_ids = []
  142. # Calculate the scaling factors for the bounding box coordinates
  143. gain = min(self.input_height / self.img_height, self.input_width / self.img_width)
  144. outputs[:, 0] -= pad[1]
  145. outputs[:, 1] -= pad[0]
  146. # Iterate over each row in the outputs array
  147. for i in range(rows):
  148. # Extract the class scores from the current row
  149. classes_scores = outputs[i][4:]
  150. # Find the maximum score among the class scores
  151. max_score = np.amax(classes_scores)
  152. # If the maximum score is above the confidence threshold
  153. if max_score >= self.confidence_thres:
  154. # Get the class ID with the highest score
  155. class_id = np.argmax(classes_scores)
  156. # Extract the bounding box coordinates from the current row
  157. x, y, w, h = outputs[i][0], outputs[i][1], outputs[i][2], outputs[i][3]
  158. # Calculate the scaled coordinates of the bounding box
  159. left = int((x - w / 2) / gain)
  160. top = int((y - h / 2) / gain)
  161. width = int(w / gain)
  162. height = int(h / gain)
  163. # Add the class ID, score, and box coordinates to the respective lists
  164. class_ids.append(class_id)
  165. scores.append(max_score)
  166. boxes.append([left, top, width, height])
  167. # Apply non-maximum suppression to filter out overlapping bounding boxes
  168. indices = cv2.dnn.NMSBoxes(boxes, scores, self.confidence_thres, self.iou_thres)
  169. # Iterate over the selected indices after non-maximum suppression
  170. for i in indices:
  171. # Get the box, score, and class ID corresponding to the index
  172. box = boxes[i]
  173. score = scores[i]
  174. class_id = class_ids[i]
  175. # Draw the detection on the input image
  176. self.draw_detections(input_image, box, score, class_id)
  177. # Return the modified input image
  178. return input_image
  179. def main(self) -> np.ndarray:
  180. """
  181. Perform inference using an ONNX model and return the output image with drawn detections.
  182. Returns:
  183. (np.ndarray): The output image with drawn detections.
  184. """
  185. # Create an inference session using the ONNX model and specify execution providers
  186. session = ort.InferenceSession(self.onnx_model, providers=["CUDAExecutionProvider", "CPUExecutionProvider"])
  187. # Get the model inputs
  188. model_inputs = session.get_inputs()
  189. # Store the shape of the input for later use
  190. input_shape = model_inputs[0].shape
  191. self.input_width = input_shape[2]
  192. self.input_height = input_shape[3]
  193. # Preprocess the image data
  194. img_data, pad = self.preprocess()
  195. # Run inference using the preprocessed image data
  196. outputs = session.run(None, {model_inputs[0].name: img_data})
  197. # Perform post-processing on the outputs to obtain output image
  198. return self.postprocess(self.img, outputs, pad)
  199. if __name__ == "__main__":
  200. # Create an argument parser to handle command-line arguments
  201. parser = argparse.ArgumentParser()
  202. parser.add_argument("--model", type=str, default="yolov8n.onnx", help="Input your ONNX model.")
  203. parser.add_argument("--img", type=str, default=str(ASSETS / "bus.jpg"), help="Path to input image.")
  204. parser.add_argument("--conf-thres", type=float, default=0.5, help="Confidence threshold")
  205. parser.add_argument("--iou-thres", type=float, default=0.5, help="NMS IoU threshold")
  206. args = parser.parse_args()
  207. # Check the requirements and select the appropriate backend (CPU or GPU)
  208. check_requirements("onnxruntime-gpu" if torch.cuda.is_available() else "onnxruntime")
  209. # Create an instance of the YOLOv8 class with the specified arguments
  210. detection = YOLOv8(args.model, args.img, args.conf_thres, args.iou_thres)
  211. # Perform object detection and obtain the output image
  212. output_image = detection.main()
  213. # Display the output image in a window
  214. cv2.namedWindow("Output", cv2.WINDOW_NORMAL)
  215. cv2.imshow("Output", output_image)
  216. # Wait for a key press to exit
  217. cv2.waitKey(0)
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...