Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

#18534 Create .dockerignore

Merged
Glenn Jocher merged 1 commits into Ultralytics:main from ultralytics:dockerignore
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
  1. # Ultralytics YOLO ๐Ÿš€, AGPL-3.0 license
  2. from ultralytics.solutions.solutions import BaseSolution
  3. from ultralytics.utils.plotting import Annotator
  4. class AIGym(BaseSolution):
  5. """
  6. A class to manage gym steps of people in a real-time video stream based on their poses.
  7. This class extends BaseSolution to monitor workouts using YOLO pose estimation models. It tracks and counts
  8. repetitions of exercises based on predefined angle thresholds for up and down positions.
  9. Attributes:
  10. count (List[int]): Repetition counts for each detected person.
  11. angle (List[float]): Current angle of the tracked body part for each person.
  12. stage (List[str]): Current exercise stage ('up', 'down', or '-') for each person.
  13. initial_stage (str | None): Initial stage of the exercise.
  14. up_angle (float): Angle threshold for considering the 'up' position of an exercise.
  15. down_angle (float): Angle threshold for considering the 'down' position of an exercise.
  16. kpts (List[int]): Indices of keypoints used for angle calculation.
  17. annotator (Annotator): Object for drawing annotations on the image.
  18. Methods:
  19. monitor: Processes a frame to detect poses, calculate angles, and count repetitions.
  20. Examples:
  21. >>> gym = AIGym(model="yolov8n-pose.pt")
  22. >>> image = cv2.imread("gym_scene.jpg")
  23. >>> processed_image = gym.monitor(image)
  24. >>> cv2.imshow("Processed Image", processed_image)
  25. >>> cv2.waitKey(0)
  26. """
  27. def __init__(self, **kwargs):
  28. """Initializes AIGym for workout monitoring using pose estimation and predefined angles."""
  29. # Check if the model name ends with '-pose'
  30. if "model" in kwargs and "-pose" not in kwargs["model"]:
  31. kwargs["model"] = "yolo11n-pose.pt"
  32. elif "model" not in kwargs:
  33. kwargs["model"] = "yolo11n-pose.pt"
  34. super().__init__(**kwargs)
  35. self.count = [] # List for counts, necessary where there are multiple objects in frame
  36. self.angle = [] # List for angle, necessary where there are multiple objects in frame
  37. self.stage = [] # List for stage, necessary where there are multiple objects in frame
  38. # Extract details from CFG single time for usage later
  39. self.initial_stage = None
  40. self.up_angle = float(self.CFG["up_angle"]) # Pose up predefined angle to consider up pose
  41. self.down_angle = float(self.CFG["down_angle"]) # Pose down predefined angle to consider down pose
  42. self.kpts = self.CFG["kpts"] # User selected kpts of workouts storage for further usage
  43. def monitor(self, im0):
  44. """
  45. Monitors workouts using Ultralytics YOLO Pose Model.
  46. This function processes an input image to track and analyze human poses for workout monitoring. It uses
  47. the YOLO Pose model to detect keypoints, estimate angles, and count repetitions based on predefined
  48. angle thresholds.
  49. Args:
  50. im0 (ndarray): Input image for processing.
  51. Returns:
  52. (ndarray): Processed image with annotations for workout monitoring.
  53. Examples:
  54. >>> gym = AIGym()
  55. >>> image = cv2.imread("workout.jpg")
  56. >>> processed_image = gym.monitor(image)
  57. """
  58. # Extract tracks
  59. tracks = self.model.track(source=im0, persist=True, classes=self.CFG["classes"], **self.track_add_args)[0]
  60. if tracks.boxes.id is not None:
  61. # Extract and check keypoints
  62. if len(tracks) > len(self.count):
  63. new_human = len(tracks) - len(self.count)
  64. self.angle += [0] * new_human
  65. self.count += [0] * new_human
  66. self.stage += ["-"] * new_human
  67. # Initialize annotator
  68. self.annotator = Annotator(im0, line_width=self.line_width)
  69. # Enumerate over keypoints
  70. for ind, k in enumerate(reversed(tracks.keypoints.data)):
  71. # Get keypoints and estimate the angle
  72. kpts = [k[int(self.kpts[i])].cpu() for i in range(3)]
  73. self.angle[ind] = self.annotator.estimate_pose_angle(*kpts)
  74. im0 = self.annotator.draw_specific_points(k, self.kpts, radius=self.line_width * 3)
  75. # Determine stage and count logic based on angle thresholds
  76. if self.angle[ind] < self.down_angle:
  77. if self.stage[ind] == "up":
  78. self.count[ind] += 1
  79. self.stage[ind] = "down"
  80. elif self.angle[ind] > self.up_angle:
  81. self.stage[ind] = "up"
  82. # Display angle, count, and stage text
  83. self.annotator.plot_angle_and_count_and_stage(
  84. angle_text=self.angle[ind], # angle text for display
  85. count_text=self.count[ind], # count text for workouts
  86. stage_text=self.stage[ind], # stage position text
  87. center_kpt=k[int(self.kpts[1])], # center keypoint for display
  88. )
  89. self.display_output(im0) # Display output image, if environment support display
  90. return im0 # return an image for writing or further usage
Discard
Tip!

Press p or to see the previous file or, n or to see the next file