Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

#18534 Create .dockerignore

Merged
Glenn Jocher merged 1 commits into Ultralytics:main from ultralytics:dockerignore
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
  1. # Ultralytics YOLO ๐Ÿš€, AGPL-3.0 license
  2. import argparse
  3. import cv2
  4. import numpy as np
  5. import onnxruntime as ort
  6. import torch
  7. from ultralytics.utils import ASSETS, yaml_load
  8. from ultralytics.utils.checks import check_requirements, check_yaml
  9. class YOLOv8:
  10. """YOLOv8 object detection model class for handling inference and visualization."""
  11. def __init__(self, onnx_model, input_image, confidence_thres, iou_thres):
  12. """
  13. Initializes an instance of the YOLOv8 class.
  14. Args:
  15. onnx_model: Path to the ONNX model.
  16. input_image: Path to the input image.
  17. confidence_thres: Confidence threshold for filtering detections.
  18. iou_thres: IoU (Intersection over Union) threshold for non-maximum suppression.
  19. """
  20. self.onnx_model = onnx_model
  21. self.input_image = input_image
  22. self.confidence_thres = confidence_thres
  23. self.iou_thres = iou_thres
  24. # Load the class names from the COCO dataset
  25. self.classes = yaml_load(check_yaml("coco8.yaml"))["names"]
  26. # Generate a color palette for the classes
  27. self.color_palette = np.random.uniform(0, 255, size=(len(self.classes), 3))
  28. def draw_detections(self, img, box, score, class_id):
  29. """
  30. Draws bounding boxes and labels on the input image based on the detected objects.
  31. Args:
  32. img: The input image to draw detections on.
  33. box: Detected bounding box.
  34. score: Corresponding detection score.
  35. class_id: Class ID for the detected object.
  36. Returns:
  37. None
  38. """
  39. # Extract the coordinates of the bounding box
  40. x1, y1, w, h = box
  41. # Retrieve the color for the class ID
  42. color = self.color_palette[class_id]
  43. # Draw the bounding box on the image
  44. cv2.rectangle(img, (int(x1), int(y1)), (int(x1 + w), int(y1 + h)), color, 2)
  45. # Create the label text with class name and score
  46. label = f"{self.classes[class_id]}: {score:.2f}"
  47. # Calculate the dimensions of the label text
  48. (label_width, label_height), _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)
  49. # Calculate the position of the label text
  50. label_x = x1
  51. label_y = y1 - 10 if y1 - 10 > label_height else y1 + 10
  52. # Draw a filled rectangle as the background for the label text
  53. cv2.rectangle(
  54. img, (label_x, label_y - label_height), (label_x + label_width, label_y + label_height), color, cv2.FILLED
  55. )
  56. # Draw the label text on the image
  57. cv2.putText(img, label, (label_x, label_y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1, cv2.LINE_AA)
  58. def preprocess(self):
  59. """
  60. Preprocesses the input image before performing inference.
  61. Returns:
  62. image_data: Preprocessed image data ready for inference.
  63. """
  64. # Read the input image using OpenCV
  65. self.img = cv2.imread(self.input_image)
  66. # Get the height and width of the input image
  67. self.img_height, self.img_width = self.img.shape[:2]
  68. # Convert the image color space from BGR to RGB
  69. img = cv2.cvtColor(self.img, cv2.COLOR_BGR2RGB)
  70. # Resize the image to match the input shape
  71. img = cv2.resize(img, (self.input_width, self.input_height))
  72. # Normalize the image data by dividing it by 255.0
  73. image_data = np.array(img) / 255.0
  74. # Transpose the image to have the channel dimension as the first dimension
  75. image_data = np.transpose(image_data, (2, 0, 1)) # Channel first
  76. # Expand the dimensions of the image data to match the expected input shape
  77. image_data = np.expand_dims(image_data, axis=0).astype(np.float32)
  78. # Return the preprocessed image data
  79. return image_data
  80. def postprocess(self, input_image, output):
  81. """
  82. Performs post-processing on the model's output to extract bounding boxes, scores, and class IDs.
  83. Args:
  84. input_image (numpy.ndarray): The input image.
  85. output (numpy.ndarray): The output of the model.
  86. Returns:
  87. numpy.ndarray: The input image with detections drawn on it.
  88. """
  89. # Transpose and squeeze the output to match the expected shape
  90. outputs = np.transpose(np.squeeze(output[0]))
  91. # Get the number of rows in the outputs array
  92. rows = outputs.shape[0]
  93. # Lists to store the bounding boxes, scores, and class IDs of the detections
  94. boxes = []
  95. scores = []
  96. class_ids = []
  97. # Calculate the scaling factors for the bounding box coordinates
  98. x_factor = self.img_width / self.input_width
  99. y_factor = self.img_height / self.input_height
  100. # Iterate over each row in the outputs array
  101. for i in range(rows):
  102. # Extract the class scores from the current row
  103. classes_scores = outputs[i][4:]
  104. # Find the maximum score among the class scores
  105. max_score = np.amax(classes_scores)
  106. # If the maximum score is above the confidence threshold
  107. if max_score >= self.confidence_thres:
  108. # Get the class ID with the highest score
  109. class_id = np.argmax(classes_scores)
  110. # Extract the bounding box coordinates from the current row
  111. x, y, w, h = outputs[i][0], outputs[i][1], outputs[i][2], outputs[i][3]
  112. # Calculate the scaled coordinates of the bounding box
  113. left = int((x - w / 2) * x_factor)
  114. top = int((y - h / 2) * y_factor)
  115. width = int(w * x_factor)
  116. height = int(h * y_factor)
  117. # Add the class ID, score, and box coordinates to the respective lists
  118. class_ids.append(class_id)
  119. scores.append(max_score)
  120. boxes.append([left, top, width, height])
  121. # Apply non-maximum suppression to filter out overlapping bounding boxes
  122. indices = cv2.dnn.NMSBoxes(boxes, scores, self.confidence_thres, self.iou_thres)
  123. # Iterate over the selected indices after non-maximum suppression
  124. for i in indices:
  125. # Get the box, score, and class ID corresponding to the index
  126. box = boxes[i]
  127. score = scores[i]
  128. class_id = class_ids[i]
  129. # Draw the detection on the input image
  130. self.draw_detections(input_image, box, score, class_id)
  131. # Return the modified input image
  132. return input_image
  133. def main(self):
  134. """
  135. Performs inference using an ONNX model and returns the output image with drawn detections.
  136. Returns:
  137. output_img: The output image with drawn detections.
  138. """
  139. # Create an inference session using the ONNX model and specify execution providers
  140. session = ort.InferenceSession(self.onnx_model, providers=["CUDAExecutionProvider", "CPUExecutionProvider"])
  141. # Get the model inputs
  142. model_inputs = session.get_inputs()
  143. # Store the shape of the input for later use
  144. input_shape = model_inputs[0].shape
  145. self.input_width = input_shape[2]
  146. self.input_height = input_shape[3]
  147. # Preprocess the image data
  148. img_data = self.preprocess()
  149. # Run inference using the preprocessed image data
  150. outputs = session.run(None, {model_inputs[0].name: img_data})
  151. # Perform post-processing on the outputs to obtain output image.
  152. return self.postprocess(self.img, outputs) # output image
  153. if __name__ == "__main__":
  154. # Create an argument parser to handle command-line arguments
  155. parser = argparse.ArgumentParser()
  156. parser.add_argument("--model", type=str, default="yolov8n.onnx", help="Input your ONNX model.")
  157. parser.add_argument("--img", type=str, default=str(ASSETS / "bus.jpg"), help="Path to input image.")
  158. parser.add_argument("--conf-thres", type=float, default=0.5, help="Confidence threshold")
  159. parser.add_argument("--iou-thres", type=float, default=0.5, help="NMS IoU threshold")
  160. args = parser.parse_args()
  161. # Check the requirements and select the appropriate backend (CPU or GPU)
  162. check_requirements("onnxruntime-gpu" if torch.cuda.is_available() else "onnxruntime")
  163. # Create an instance of the YOLOv8 class with the specified arguments
  164. detection = YOLOv8(args.model, args.img, args.conf_thres, args.iou_thres)
  165. # Perform object detection and obtain the output image
  166. output_image = detection.main()
  167. # Display the output image in a window
  168. cv2.namedWindow("Output", cv2.WINDOW_NORMAL)
  169. cv2.imshow("Output", output_image)
  170. # Wait for a key press to exit
  171. cv2.waitKey(0)
Discard
Tip!

Press p or to see the previous file or, n or to see the next file