Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

interactive_tracker.py 8.4 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
  1. # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
  2. import time
  3. from typing import Tuple
  4. import cv2
  5. from ultralytics import YOLO
  6. from ultralytics.utils import LOGGER
  7. from ultralytics.utils.plotting import Annotator, colors
  8. enable_gpu = False # Set True if running with CUDA
  9. model_file = "yolo11s.pt" # Path to model file
  10. show_fps = True # If True, shows current FPS in top-left corner
  11. show_conf = False # Display or hide the confidence score
  12. save_video = True # Set True to save output video
  13. video_output_path = "interactive_tracker_output.avi" # Output video file name
  14. conf = 0.3 # Min confidence for object detection (lower = more detections, possibly more false positives)
  15. iou = 0.3 # IoU threshold for NMS (higher = less overlap allowed)
  16. max_det = 20 # Maximum objects per image (increase for crowded scenes)
  17. tracker = "bytetrack.yaml" # Tracker config: 'bytetrack.yaml', 'botsort.yaml', etc.
  18. track_args = {
  19. "persist": True, # Keep frames history as a stream for continuous tracking
  20. "verbose": False, # Print debug info from tracker
  21. }
  22. window_name = "Ultralytics YOLO Interactive Tracking" # Output window name
  23. LOGGER.info("🚀 Initializing model...")
  24. if enable_gpu:
  25. LOGGER.info("Using GPU...")
  26. model = YOLO(model_file)
  27. model.to("cuda")
  28. else:
  29. LOGGER.info("Using CPU...")
  30. model = YOLO(model_file, task="detect")
  31. classes = model.names # Store model class names
  32. cap = cv2.VideoCapture(0) # Replace with video path if needed
  33. # Initialize video writer
  34. vw = None
  35. if save_video:
  36. w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))
  37. vw = cv2.VideoWriter(video_output_path, cv2.VideoWriter_fourcc(*"mp4v"), fps, (w, h))
  38. selected_object_id = None
  39. selected_bbox = None
  40. selected_center = None
  41. def get_center(x1: int, y1: int, x2: int, y2: int) -> Tuple[int, int]:
  42. """
  43. Calculate the center point of a bounding box.
  44. Args:
  45. x1 (int): Top-left X coordinate.
  46. y1 (int): Top-left Y coordinate.
  47. x2 (int): Bottom-right X coordinate.
  48. y2 (int): Bottom-right Y coordinate.
  49. Returns:
  50. center_x (int): X-coordinate of the center point.
  51. center_y (int): Y-coordinate of the center point.
  52. """
  53. return (x1 + x2) // 2, (y1 + y2) // 2
  54. def extend_line_from_edge(mid_x: int, mid_y: int, direction: str, img_shape: Tuple[int, int, int]) -> Tuple[int, int]:
  55. """
  56. Calculate the endpoint to extend a line from the center toward an image edge.
  57. Args:
  58. mid_x (int): X-coordinate of the midpoint.
  59. mid_y (int): Y-coordinate of the midpoint.
  60. direction (str): Direction to extend ('left', 'right', 'up', 'down').
  61. img_shape (Tuple[int, int, int]): Image shape in (height, width, channels).
  62. Returns:
  63. end_x (int): X-coordinate of the endpoint.
  64. end_y (int): Y-coordinate of the endpoint.
  65. """
  66. h, w = img_shape[:2]
  67. if direction == "left":
  68. return 0, mid_y
  69. if direction == "right":
  70. return w - 1, mid_y
  71. if direction == "up":
  72. return mid_x, 0
  73. if direction == "down":
  74. return mid_x, h - 1
  75. return mid_x, mid_y
  76. def draw_tracking_scope(im, bbox: tuple, color: tuple) -> None:
  77. """
  78. Draw tracking scope lines extending from the bounding box to image edges.
  79. Args:
  80. im (np.ndarray): Image array to draw on.
  81. bbox (tuple): Bounding box coordinates (x1, y1, x2, y2).
  82. color (tuple): Color in BGR format for drawing.
  83. """
  84. x1, y1, x2, y2 = bbox
  85. mid_top = ((x1 + x2) // 2, y1)
  86. mid_bottom = ((x1 + x2) // 2, y2)
  87. mid_left = (x1, (y1 + y2) // 2)
  88. mid_right = (x2, (y1 + y2) // 2)
  89. cv2.line(im, mid_top, extend_line_from_edge(*mid_top, "up", im.shape), color, 2)
  90. cv2.line(im, mid_bottom, extend_line_from_edge(*mid_bottom, "down", im.shape), color, 2)
  91. cv2.line(im, mid_left, extend_line_from_edge(*mid_left, "left", im.shape), color, 2)
  92. cv2.line(im, mid_right, extend_line_from_edge(*mid_right, "right", im.shape), color, 2)
  93. def click_event(event: int, x: int, y: int, flags: int, param) -> None:
  94. """
  95. Handle mouse click events to select an object for focused tracking.
  96. Args:
  97. event (int): OpenCV mouse event type.
  98. x (int): X-coordinate of the mouse event.
  99. y (int): Y-coordinate of the mouse event.
  100. flags (int): Any relevant flags passed by OpenCV.
  101. param (Any): Additional parameters (not used).
  102. """
  103. global selected_object_id
  104. if event == cv2.EVENT_LBUTTONDOWN:
  105. detections = results[0].boxes.data if results[0].boxes is not None else []
  106. if detections is not None:
  107. min_area = float("inf")
  108. best_match = None
  109. for track in detections:
  110. track = track.tolist()
  111. if len(track) >= 6:
  112. x1, y1, x2, y2 = map(int, track[:4])
  113. if x1 <= x <= x2 and y1 <= y <= y2:
  114. area = (x2 - x1) * (y2 - y1)
  115. if area < min_area:
  116. class_id = int(track[-1])
  117. track_id = int(track[4]) if len(track) == 7 else -1
  118. min_area = area
  119. best_match = (track_id, model.names[class_id])
  120. if best_match:
  121. selected_object_id, label = best_match
  122. print(f"🔵 TRACKING STARTED: {label} (ID {selected_object_id})")
  123. cv2.namedWindow(window_name)
  124. cv2.setMouseCallback(window_name, click_event)
  125. fps_counter, fps_timer, fps_display = 0, time.time(), 0
  126. while cap.isOpened():
  127. success, im = cap.read()
  128. if not success:
  129. break
  130. results = model.track(im, conf=conf, iou=iou, max_det=max_det, tracker=tracker, **track_args)
  131. annotator = Annotator(im)
  132. detections = results[0].boxes.data if results[0].boxes is not None else []
  133. detected_objects = []
  134. for track in detections:
  135. track = track.tolist()
  136. if len(track) < 6:
  137. continue
  138. x1, y1, x2, y2 = map(int, track[:4])
  139. class_id = int(track[6]) if len(track) >= 7 else int(track[5])
  140. track_id = int(track[4]) if len(track) == 7 else -1
  141. color = colors(track_id, True)
  142. txt_color = annotator.get_txt_color(color)
  143. label = f"{classes[class_id]} ID {track_id}" + (f" ({float(track[5]):.2f})" if show_conf else "")
  144. if track_id == selected_object_id:
  145. draw_tracking_scope(im, (x1, y1, x2, y2), color)
  146. center = get_center(x1, y1, x2, y2)
  147. cv2.circle(im, center, 6, color, -1)
  148. # Pulsing circle for attention
  149. pulse_radius = 8 + int(4 * abs(time.time() % 1 - 0.5))
  150. cv2.circle(im, center, pulse_radius, color, 2)
  151. annotator.box_label([x1, y1, x2, y2], label=f"ACTIVE: TRACK {track_id}", color=color)
  152. else:
  153. # Draw dashed box for other objects
  154. for i in range(x1, x2, 10):
  155. cv2.line(im, (i, y1), (i + 5, y1), color, 3)
  156. cv2.line(im, (i, y2), (i + 5, y2), color, 3)
  157. for i in range(y1, y2, 10):
  158. cv2.line(im, (x1, i), (x1, i + 5), color, 3)
  159. cv2.line(im, (x2, i), (x2, i + 5), color, 3)
  160. # Draw label text with background
  161. (tw, th), bl = cv2.getTextSize(label, 0, 0.7, 2)
  162. cv2.rectangle(im, (x1 + 5 - 5, y1 + 20 - th - 5), (x1 + 5 + tw + 5, y1 + 20 + bl), color, -1)
  163. cv2.putText(im, label, (x1 + 5, y1 + 20), 0, 0.7, txt_color, 1, cv2.LINE_AA)
  164. if show_fps:
  165. fps_counter += 1
  166. if time.time() - fps_timer >= 1.0:
  167. fps_display = fps_counter
  168. fps_counter = 0
  169. fps_timer = time.time()
  170. # Draw FPS text with background
  171. fps_text = f"FPS: {fps_display}"
  172. cv2.putText(im, fps_text, (10, 25), 0, 0.7, (255, 255, 255), 1)
  173. (tw, th), bl = cv2.getTextSize(fps_text, 0, 0.7, 2)
  174. cv2.rectangle(im, (10 - 5, 25 - th - 5), (10 + tw + 5, 25 + bl), (255, 255, 255), -1)
  175. cv2.putText(im, fps_text, (10, 25), 0, 0.7, (104, 31, 17), 1, cv2.LINE_AA)
  176. cv2.imshow(window_name, im)
  177. if save_video and vw is not None:
  178. vw.write(im)
  179. # Terminal logging
  180. LOGGER.info(f"🟡 DETECTED {len(detections)} OBJECT(S): {' | '.join(detected_objects)}")
  181. key = cv2.waitKey(1) & 0xFF
  182. if key == ord("q"):
  183. break
  184. elif key == ord("c"):
  185. LOGGER.info("🟢 TRACKING RESET")
  186. selected_object_id = None
  187. cap.release()
  188. if save_video and vw is not None:
  189. vw.release()
  190. cv2.destroyAllWindows()
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...