Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

action_recognition.py 20 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
  1. # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
  2. import argparse
  3. import time
  4. from collections import defaultdict
  5. from typing import List, Optional, Tuple
  6. from urllib.parse import urlparse
  7. import cv2
  8. import numpy as np
  9. import torch
  10. from transformers import AutoModel, AutoProcessor
  11. from ultralytics import YOLO
  12. from ultralytics.data.loaders import get_best_youtube_url
  13. from ultralytics.utils.plotting import Annotator
  14. from ultralytics.utils.torch_utils import select_device
  15. class TorchVisionVideoClassifier:
  16. """
  17. Video classifier using pretrained TorchVision models for action recognition.
  18. This class provides an interface for video classification using various pretrained models from TorchVision's
  19. video model collection, supporting models like S3D, R3D, Swin3D, and MViT architectures.
  20. Attributes:
  21. model (torch.nn.Module): The loaded TorchVision model for video classification.
  22. weights (torchvision.models.video.Weights): The weights used for the model.
  23. device (torch.device): The device on which the model is loaded.
  24. Methods:
  25. available_model_names: Returns a list of available model names.
  26. preprocess_crops_for_video_cls: Preprocesses crops for video classification.
  27. __call__: Performs inference on the given sequences.
  28. postprocess: Postprocesses the model's output.
  29. Examples:
  30. >>> classifier = TorchVisionVideoClassifier("s3d", device="cpu")
  31. >>> crops = [np.random.randint(0, 255, (224, 224, 3), dtype=np.uint8) for _ in range(8)]
  32. >>> tensor = classifier.preprocess_crops_for_video_cls(crops)
  33. >>> outputs = classifier(tensor)
  34. >>> labels, confidences = classifier.postprocess(outputs)
  35. References:
  36. https://pytorch.org/vision/stable/
  37. """
  38. from torchvision.models.video import (
  39. MViT_V1_B_Weights,
  40. MViT_V2_S_Weights,
  41. R3D_18_Weights,
  42. S3D_Weights,
  43. Swin3D_B_Weights,
  44. Swin3D_T_Weights,
  45. mvit_v1_b,
  46. mvit_v2_s,
  47. r3d_18,
  48. s3d,
  49. swin3d_b,
  50. swin3d_t,
  51. )
  52. model_name_to_model_and_weights = {
  53. "s3d": (s3d, S3D_Weights.DEFAULT),
  54. "r3d_18": (r3d_18, R3D_18_Weights.DEFAULT),
  55. "swin3d_t": (swin3d_t, Swin3D_T_Weights.DEFAULT),
  56. "swin3d_b": (swin3d_b, Swin3D_B_Weights.DEFAULT),
  57. "mvit_v1_b": (mvit_v1_b, MViT_V1_B_Weights.DEFAULT),
  58. "mvit_v2_s": (mvit_v2_s, MViT_V2_S_Weights.DEFAULT),
  59. }
  60. def __init__(self, model_name: str, device: str | torch.device = ""):
  61. """
  62. Initialize the VideoClassifier with the specified model name and device.
  63. Args:
  64. model_name (str): The name of the model to use. Must be one of the available models.
  65. device (str | torch.device): The device to run the model on.
  66. """
  67. if model_name not in self.model_name_to_model_and_weights:
  68. raise ValueError(f"Invalid model name '{model_name}'. Available models: {self.available_model_names()}")
  69. model, self.weights = self.model_name_to_model_and_weights[model_name]
  70. self.device = select_device(device)
  71. self.model = model(weights=self.weights).to(self.device).eval()
  72. @staticmethod
  73. def available_model_names() -> List[str]:
  74. """
  75. Get the list of available model names.
  76. Returns:
  77. (List[str]): List of available model names that can be used with this classifier.
  78. """
  79. return list(TorchVisionVideoClassifier.model_name_to_model_and_weights.keys())
  80. def preprocess_crops_for_video_cls(self, crops: List[np.ndarray], input_size: List[int] = None) -> torch.Tensor:
  81. """
  82. Preprocess a list of crops for video classification.
  83. Args:
  84. crops (List[np.ndarray]): List of crops to preprocess. Each crop should have dimensions (H, W, C).
  85. input_size (List[int], optional): The target input size for the model.
  86. Returns:
  87. (torch.Tensor): Preprocessed crops as a tensor with dimensions (1, T, C, H, W).
  88. """
  89. if input_size is None:
  90. input_size = [224, 224]
  91. from torchvision.transforms import v2
  92. transform = v2.Compose(
  93. [
  94. v2.ToDtype(torch.float32, scale=True),
  95. v2.Resize(input_size, antialias=True),
  96. v2.Normalize(mean=self.weights.transforms().mean, std=self.weights.transforms().std),
  97. ]
  98. )
  99. processed_crops = [transform(torch.from_numpy(crop).permute(2, 0, 1)) for crop in crops]
  100. return torch.stack(processed_crops).unsqueeze(0).permute(0, 2, 1, 3, 4).to(self.device)
  101. def __call__(self, sequences: torch.Tensor) -> torch.Tensor:
  102. """
  103. Perform inference on the given sequences.
  104. Args:
  105. sequences (torch.Tensor): The input sequences for the model with dimensions (B, T, C, H, W) for batched
  106. video frames or (T, C, H, W) for single video frames.
  107. Returns:
  108. (torch.Tensor): The model's output logits.
  109. """
  110. with torch.inference_mode():
  111. return self.model(sequences)
  112. def postprocess(self, outputs: torch.Tensor) -> Tuple[List[str], List[float]]:
  113. """
  114. Postprocess the model's batch output.
  115. Args:
  116. outputs (torch.Tensor): The model's output logits.
  117. Returns:
  118. pred_labels (List[str]): The predicted labels.
  119. pred_confs (List[float]): The predicted confidences.
  120. """
  121. pred_labels = []
  122. pred_confs = []
  123. for output in outputs:
  124. pred_class = output.argmax(0).item()
  125. pred_label = self.weights.meta["categories"][pred_class]
  126. pred_labels.append(pred_label)
  127. pred_conf = output.softmax(0)[pred_class].item()
  128. pred_confs.append(pred_conf)
  129. return pred_labels, pred_confs
  130. class HuggingFaceVideoClassifier:
  131. """
  132. Zero-shot video classifier using Hugging Face transformer models.
  133. This class provides an interface for zero-shot video classification using Hugging Face models, supporting
  134. custom label sets and various transformer architectures for video understanding.
  135. Attributes:
  136. fp16 (bool): Whether to use FP16 for inference.
  137. labels (List[str]): List of labels for zero-shot classification.
  138. device (torch.device): The device on which the model is loaded.
  139. processor (transformers.AutoProcessor): The processor for the model.
  140. model (transformers.AutoModel): The loaded Hugging Face model.
  141. Methods:
  142. preprocess_crops_for_video_cls: Preprocesses crops for video classification.
  143. __call__: Performs inference on the given sequences.
  144. postprocess: Postprocesses the model's output.
  145. Examples:
  146. >>> labels = ["walking", "running", "dancing"]
  147. >>> classifier = HuggingFaceVideoClassifier(labels, device="cpu")
  148. >>> crops = [np.random.randint(0, 255, (224, 224, 3), dtype=np.uint8) for _ in range(8)]
  149. >>> tensor = classifier.preprocess_crops_for_video_cls(crops)
  150. >>> outputs = classifier(tensor)
  151. >>> labels, confidences = classifier.postprocess(outputs)
  152. """
  153. def __init__(
  154. self,
  155. labels: List[str],
  156. model_name: str = "microsoft/xclip-base-patch16-zero-shot",
  157. device: str | torch.device = "",
  158. fp16: bool = False,
  159. ):
  160. """
  161. Initialize the HuggingFaceVideoClassifier with the specified model name.
  162. Args:
  163. labels (List[str]): List of labels for zero-shot classification.
  164. model_name (str): The name of the model to use.
  165. device (str | torch.device): The device to run the model on.
  166. fp16 (bool): Whether to use FP16 for inference.
  167. """
  168. self.fp16 = fp16
  169. self.labels = labels
  170. self.device = select_device(device)
  171. self.processor = AutoProcessor.from_pretrained(model_name)
  172. model = AutoModel.from_pretrained(model_name).to(self.device)
  173. if fp16:
  174. model = model.half()
  175. self.model = model.eval()
  176. def preprocess_crops_for_video_cls(self, crops: List[np.ndarray], input_size: List[int] = None) -> torch.Tensor:
  177. """
  178. Preprocess a list of crops for video classification.
  179. Args:
  180. crops (List[np.ndarray]): List of crops to preprocess. Each crop should have dimensions (H, W, C).
  181. input_size (List[int], optional): The target input size for the model.
  182. Returns:
  183. (torch.Tensor): Preprocessed crops as a tensor with dimensions (1, T, C, H, W).
  184. """
  185. if input_size is None:
  186. input_size = [224, 224]
  187. from torchvision import transforms
  188. transform = transforms.Compose(
  189. [
  190. transforms.Lambda(lambda x: x.float() / 255.0),
  191. transforms.Resize(input_size),
  192. transforms.Normalize(
  193. mean=self.processor.image_processor.image_mean, std=self.processor.image_processor.image_std
  194. ),
  195. ]
  196. )
  197. processed_crops = [transform(torch.from_numpy(crop).permute(2, 0, 1)) for crop in crops] # (T, C, H, W)
  198. output = torch.stack(processed_crops).unsqueeze(0).to(self.device) # (1, T, C, H, W)
  199. if self.fp16:
  200. output = output.half()
  201. return output
  202. def __call__(self, sequences: torch.Tensor) -> torch.Tensor:
  203. """
  204. Perform inference on the given sequences.
  205. Args:
  206. sequences (torch.Tensor): The input sequences for the model. Batched video frames with shape (B, T, H, W, C).
  207. Returns:
  208. (torch.Tensor): The model's output logits.
  209. """
  210. input_ids = self.processor(text=self.labels, return_tensors="pt", padding=True)["input_ids"].to(self.device)
  211. inputs = {"pixel_values": sequences, "input_ids": input_ids}
  212. with torch.inference_mode():
  213. outputs = self.model(**inputs)
  214. return outputs.logits_per_video
  215. def postprocess(self, outputs: torch.Tensor) -> Tuple[List[List[str]], List[List[float]]]:
  216. """
  217. Postprocess the model's batch output.
  218. Args:
  219. outputs (torch.Tensor): The model's output logits.
  220. Returns:
  221. pred_labels (List[List[str]]): The predicted top2 labels for each sample.
  222. pred_confs (List[List[float]]): The predicted top2 confidences for each sample.
  223. """
  224. pred_labels = []
  225. pred_confs = []
  226. with torch.no_grad():
  227. logits_per_video = outputs # Assuming outputs is already the logits tensor
  228. probs = logits_per_video.softmax(dim=-1) # Use softmax to convert logits to probabilities
  229. for prob in probs:
  230. top2_indices = prob.topk(2).indices.tolist()
  231. top2_labels = [self.labels[idx] for idx in top2_indices]
  232. top2_confs = prob[top2_indices].tolist()
  233. pred_labels.append(top2_labels)
  234. pred_confs.append(top2_confs)
  235. return pred_labels, pred_confs
  236. def crop_and_pad(frame: np.ndarray, box: List[float], margin_percent: int) -> np.ndarray:
  237. """
  238. Crop box with margin and take square crop from frame.
  239. Args:
  240. frame (np.ndarray): The input frame to crop from.
  241. box (List[float]): The bounding box coordinates [x1, y1, x2, y2].
  242. margin_percent (int): The percentage of margin to add around the box.
  243. Returns:
  244. (np.ndarray): The cropped and resized square image.
  245. """
  246. x1, y1, x2, y2 = map(int, box)
  247. w, h = x2 - x1, y2 - y1
  248. # Add margin
  249. margin_x, margin_y = int(w * margin_percent / 100), int(h * margin_percent / 100)
  250. x1, y1 = max(0, x1 - margin_x), max(0, y1 - margin_y)
  251. x2, y2 = min(frame.shape[1], x2 + margin_x), min(frame.shape[0], y2 + margin_y)
  252. # Take square crop from frame
  253. size = max(y2 - y1, x2 - x1)
  254. center_y, center_x = (y1 + y2) // 2, (x1 + x2) // 2
  255. half_size = size // 2
  256. square_crop = frame[
  257. max(0, center_y - half_size) : min(frame.shape[0], center_y + half_size),
  258. max(0, center_x - half_size) : min(frame.shape[1], center_x + half_size),
  259. ]
  260. return cv2.resize(square_crop, (224, 224), interpolation=cv2.INTER_LINEAR)
  261. def run(
  262. weights: str = "yolo11n.pt",
  263. device: str = "",
  264. source: str = "https://www.youtube.com/watch?v=dQw4w9WgXcQ",
  265. output_path: Optional[str] = None,
  266. crop_margin_percentage: int = 10,
  267. num_video_sequence_samples: int = 8,
  268. skip_frame: int = 2,
  269. video_cls_overlap_ratio: float = 0.25,
  270. fp16: bool = False,
  271. video_classifier_model: str = "microsoft/xclip-base-patch32",
  272. labels: List[str] = None,
  273. ) -> None:
  274. """
  275. Run action recognition on a video source using YOLO for object detection and a video classifier.
  276. Args:
  277. weights (str): Path to the YOLO model weights.
  278. device (str): Device to run the model on. Use 'cuda' for NVIDIA GPU, 'mps' for Apple Silicon, or 'cpu'.
  279. source (str): Path to mp4 video file or YouTube URL.
  280. output_path (str, optional): Path to save the output video.
  281. crop_margin_percentage (int): Percentage of margin to add around detected objects.
  282. num_video_sequence_samples (int): Number of video frames to use for classification.
  283. skip_frame (int): Number of frames to skip between detections.
  284. video_cls_overlap_ratio (float): Overlap ratio between video sequences.
  285. fp16 (bool): Whether to use half-precision floating point.
  286. video_classifier_model (str): Name or path of the video classifier model.
  287. labels (List[str], optional): List of labels for zero-shot classification.
  288. """
  289. if labels is None:
  290. labels = [
  291. "walking",
  292. "running",
  293. "brushing teeth",
  294. "looking into phone",
  295. "weight lifting",
  296. "cooking",
  297. "sitting",
  298. ]
  299. # Initialize models and device
  300. device = select_device(device)
  301. yolo_model = YOLO(weights).to(device)
  302. if video_classifier_model in TorchVisionVideoClassifier.available_model_names():
  303. print("'fp16' is not supported for TorchVisionVideoClassifier. Setting fp16 to False.")
  304. print(
  305. "'labels' is not used for TorchVisionVideoClassifier. Ignoring the provided labels and using Kinetics-400 labels."
  306. )
  307. video_classifier = TorchVisionVideoClassifier(video_classifier_model, device=device)
  308. else:
  309. video_classifier = HuggingFaceVideoClassifier(
  310. labels, model_name=video_classifier_model, device=device, fp16=fp16
  311. )
  312. # Initialize video capture
  313. if source.startswith("http") and urlparse(source).hostname in {"www.youtube.com", "youtube.com", "youtu.be"}:
  314. source = get_best_youtube_url(source)
  315. elif not source.endswith(".mp4"):
  316. raise ValueError("Invalid source. Supported sources are YouTube URLs and MP4 files.")
  317. cap = cv2.VideoCapture(source)
  318. # Get video properties
  319. frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
  320. frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
  321. fps = cap.get(cv2.CAP_PROP_FPS)
  322. # Initialize VideoWriter
  323. if output_path is not None:
  324. fourcc = cv2.VideoWriter_fourcc(*"mp4v")
  325. out = cv2.VideoWriter(output_path, fourcc, fps, (frame_width, frame_height))
  326. # Initialize track history
  327. track_history = defaultdict(list)
  328. frame_counter = 0
  329. track_ids_to_infer = []
  330. crops_to_infer = []
  331. pred_labels = []
  332. pred_confs = []
  333. while cap.isOpened():
  334. success, frame = cap.read()
  335. if not success:
  336. break
  337. frame_counter += 1
  338. # Run YOLO tracking
  339. results = yolo_model.track(frame, persist=True, classes=[0]) # Track only person class
  340. if results[0].boxes.is_track:
  341. boxes = results[0].boxes.xyxy.cpu().numpy()
  342. track_ids = results[0].boxes.id.cpu().numpy()
  343. # Visualize prediction
  344. annotator = Annotator(frame, line_width=3, font_size=10, pil=False)
  345. if frame_counter % skip_frame == 0:
  346. crops_to_infer = []
  347. track_ids_to_infer = []
  348. for box, track_id in zip(boxes, track_ids):
  349. if frame_counter % skip_frame == 0:
  350. crop = crop_and_pad(frame, box, crop_margin_percentage)
  351. track_history[track_id].append(crop)
  352. if len(track_history[track_id]) > num_video_sequence_samples:
  353. track_history[track_id].pop(0)
  354. if len(track_history[track_id]) == num_video_sequence_samples and frame_counter % skip_frame == 0:
  355. start_time = time.time()
  356. crops = video_classifier.preprocess_crops_for_video_cls(track_history[track_id])
  357. end_time = time.time()
  358. preprocess_time = end_time - start_time
  359. print(f"video cls preprocess time: {preprocess_time:.4f} seconds")
  360. crops_to_infer.append(crops)
  361. track_ids_to_infer.append(track_id)
  362. if crops_to_infer and (
  363. not pred_labels
  364. or frame_counter % int(num_video_sequence_samples * skip_frame * (1 - video_cls_overlap_ratio)) == 0
  365. ):
  366. crops_batch = torch.cat(crops_to_infer, dim=0)
  367. start_inference_time = time.time()
  368. output_batch = video_classifier(crops_batch)
  369. end_inference_time = time.time()
  370. inference_time = end_inference_time - start_inference_time
  371. print(f"video cls inference time: {inference_time:.4f} seconds")
  372. pred_labels, pred_confs = video_classifier.postprocess(output_batch)
  373. if track_ids_to_infer and crops_to_infer:
  374. for box, track_id, pred_label, pred_conf in zip(boxes, track_ids_to_infer, pred_labels, pred_confs):
  375. top2_preds = sorted(zip(pred_label, pred_conf), key=lambda x: x[1], reverse=True)
  376. label_text = " | ".join([f"{label} ({conf:.2f})" for label, conf in top2_preds])
  377. annotator.box_label(box, label_text, color=(0, 0, 255))
  378. # Write the annotated frame to the output video
  379. if output_path is not None:
  380. out.write(frame)
  381. # Display the annotated frame
  382. cv2.imshow("YOLOv8 Tracking with S3D Classification", frame)
  383. if cv2.waitKey(1) & 0xFF == ord("q"):
  384. break
  385. cap.release()
  386. if output_path is not None:
  387. out.release()
  388. cv2.destroyAllWindows()
  389. def parse_opt() -> argparse.Namespace:
  390. """Parse command line arguments for action recognition pipeline."""
  391. parser = argparse.ArgumentParser()
  392. parser.add_argument("--weights", type=str, default="yolo11n.pt", help="ultralytics detector model path")
  393. parser.add_argument("--device", default="", help='cuda device, i.e. 0 or 0,1,2,3 or cpu/mps, "" for auto-detection')
  394. parser.add_argument(
  395. "--source",
  396. type=str,
  397. default="https://www.youtube.com/watch?v=dQw4w9WgXcQ",
  398. help="video file path or youtube URL",
  399. )
  400. parser.add_argument("--output-path", type=str, default="output_video.mp4", help="output video file path")
  401. parser.add_argument(
  402. "--crop-margin-percentage", type=int, default=10, help="percentage of margin to add around detected objects"
  403. )
  404. parser.add_argument(
  405. "--num-video-sequence-samples", type=int, default=8, help="number of video frames to use for classification"
  406. )
  407. parser.add_argument("--skip-frame", type=int, default=2, help="number of frames to skip between detections")
  408. parser.add_argument(
  409. "--video-cls-overlap-ratio", type=float, default=0.25, help="overlap ratio between video sequences"
  410. )
  411. parser.add_argument("--fp16", action="store_true", help="use FP16 for inference")
  412. parser.add_argument(
  413. "--video-classifier-model", type=str, default="microsoft/xclip-base-patch32", help="video classifier model name"
  414. )
  415. parser.add_argument(
  416. "--labels",
  417. nargs="+",
  418. type=str,
  419. default=["dancing", "singing a song"],
  420. help="labels for zero-shot video classification",
  421. )
  422. return parser.parse_args()
  423. def main(opt: argparse.Namespace) -> None:
  424. """Run the action recognition pipeline with parsed command line arguments."""
  425. run(**vars(opt))
  426. if __name__ == "__main__":
  427. opt = parse_opt()
  428. main(opt)
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...