Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

test_python.py 7.0 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
  1. # Ultralytics YOLO 🚀, AGPL-3.0 license
  2. from pathlib import Path
  3. import cv2
  4. import numpy as np
  5. import torch
  6. from PIL import Image
  7. from torchvision.transforms import ToTensor
  8. from ultralytics import RTDETR, YOLO
  9. from ultralytics.yolo.data.build import load_inference_source
  10. from ultralytics.yolo.utils import LINUX, ONLINE, ROOT, SETTINGS
  11. MODEL = Path(SETTINGS['weights_dir']) / 'yolov8n.pt'
  12. CFG = 'yolov8n.yaml'
  13. SOURCE = ROOT / 'assets/bus.jpg'
  14. SOURCE_GREYSCALE = Path(f'{SOURCE.parent / SOURCE.stem}_greyscale.jpg')
  15. SOURCE_RGBA = Path(f'{SOURCE.parent / SOURCE.stem}_4ch.png')
  16. # Convert SOURCE to greyscale and 4-ch
  17. im = Image.open(SOURCE)
  18. im.convert('L').save(SOURCE_GREYSCALE) # greyscale
  19. im.convert('RGBA').save(SOURCE_RGBA) # 4-ch PNG with alpha
  20. def test_model_forward():
  21. model = YOLO(CFG)
  22. model(SOURCE)
  23. def test_model_info():
  24. model = YOLO(CFG)
  25. model.info()
  26. model = YOLO(MODEL)
  27. model.info(verbose=True)
  28. def test_model_fuse():
  29. model = YOLO(CFG)
  30. model.fuse()
  31. model = YOLO(MODEL)
  32. model.fuse()
  33. def test_predict_dir():
  34. model = YOLO(MODEL)
  35. model(source=ROOT / 'assets')
  36. def test_predict_img():
  37. model = YOLO(MODEL)
  38. seg_model = YOLO('yolov8n-seg.pt')
  39. cls_model = YOLO('yolov8n-cls.pt')
  40. pose_model = YOLO('yolov8n-pose.pt')
  41. im = cv2.imread(str(SOURCE))
  42. assert len(model(source=Image.open(SOURCE), save=True, verbose=True)) == 1 # PIL
  43. assert len(model(source=im, save=True, save_txt=True)) == 1 # ndarray
  44. assert len(model(source=[im, im], save=True, save_txt=True)) == 2 # batch
  45. assert len(list(model(source=[im, im], save=True, stream=True))) == 2 # stream
  46. assert len(model(torch.zeros(320, 640, 3).numpy())) == 1 # tensor to numpy
  47. batch = [
  48. str(SOURCE), # filename
  49. Path(SOURCE), # Path
  50. 'https://ultralytics.com/images/zidane.jpg' if ONLINE else SOURCE, # URI
  51. cv2.imread(str(SOURCE)), # OpenCV
  52. Image.open(SOURCE), # PIL
  53. np.zeros((320, 640, 3))] # numpy
  54. assert len(model(batch, visualize=True)) == len(batch) # multiple sources in a batch
  55. # Test tensor inference
  56. im = cv2.imread(str(SOURCE)) # OpenCV
  57. t = cv2.resize(im, (32, 32))
  58. t = ToTensor()(t)
  59. t = torch.stack([t, t, t, t])
  60. results = model(t, visualize=True)
  61. assert len(results) == t.shape[0]
  62. results = seg_model(t, visualize=True)
  63. assert len(results) == t.shape[0]
  64. results = cls_model(t, visualize=True)
  65. assert len(results) == t.shape[0]
  66. results = pose_model(t, visualize=True)
  67. assert len(results) == t.shape[0]
  68. def test_predict_grey_and_4ch():
  69. model = YOLO(MODEL)
  70. for f in SOURCE_RGBA, SOURCE_GREYSCALE:
  71. for source in Image.open(f), cv2.imread(str(f)), f:
  72. model(source, save=True, verbose=True)
  73. def test_val():
  74. model = YOLO(MODEL)
  75. model.val(data='coco8.yaml', imgsz=32)
  76. def test_val_scratch():
  77. model = YOLO(CFG)
  78. model.val(data='coco8.yaml', imgsz=32)
  79. def test_amp():
  80. if torch.cuda.is_available():
  81. from ultralytics.yolo.utils.checks import check_amp
  82. model = YOLO(MODEL).model.cuda()
  83. assert check_amp(model)
  84. def test_train_scratch():
  85. model = YOLO(CFG)
  86. model.train(data='coco8.yaml', epochs=1, imgsz=32, cache='disk') # test disk caching
  87. model(SOURCE)
  88. def test_train_pretrained():
  89. model = YOLO(MODEL)
  90. model.train(data='coco8.yaml', epochs=1, imgsz=32, cache='ram') # test RAM caching
  91. model(SOURCE)
  92. def test_export_torchscript():
  93. model = YOLO(MODEL)
  94. f = model.export(format='torchscript')
  95. YOLO(f)(SOURCE) # exported model inference
  96. def test_export_torchscript_scratch():
  97. model = YOLO(CFG)
  98. f = model.export(format='torchscript')
  99. YOLO(f)(SOURCE) # exported model inference
  100. def test_export_onnx():
  101. model = YOLO(MODEL)
  102. f = model.export(format='onnx')
  103. YOLO(f)(SOURCE) # exported model inference
  104. def test_export_openvino():
  105. model = YOLO(MODEL)
  106. f = model.export(format='openvino')
  107. YOLO(f)(SOURCE) # exported model inference
  108. def test_export_coreml(): # sourcery skip: move-assign
  109. model = YOLO(MODEL)
  110. model.export(format='coreml')
  111. # if MACOS:
  112. # YOLO(f)(SOURCE) # model prediction only supported on macOS
  113. def test_export_tflite(enabled=False):
  114. # TF suffers from install conflicts on Windows and macOS
  115. if enabled and LINUX:
  116. model = YOLO(MODEL)
  117. f = model.export(format='tflite')
  118. YOLO(f)(SOURCE)
  119. def test_export_pb(enabled=False):
  120. # TF suffers from install conflicts on Windows and macOS
  121. if enabled and LINUX:
  122. model = YOLO(MODEL)
  123. f = model.export(format='pb')
  124. YOLO(f)(SOURCE)
  125. def test_export_paddle(enabled=False):
  126. # Paddle protobuf requirements conflicting with onnx protobuf requirements
  127. if enabled:
  128. model = YOLO(MODEL)
  129. model.export(format='paddle')
  130. def test_all_model_yamls():
  131. for m in list((ROOT / 'models').rglob('yolo*.yaml')):
  132. if m.name == 'yolov8-rtdetr.yaml': # except the rtdetr model
  133. RTDETR(m.name)
  134. else:
  135. YOLO(m.name)
  136. def test_workflow():
  137. model = YOLO(MODEL)
  138. model.train(data='coco8.yaml', epochs=1, imgsz=32)
  139. model.val()
  140. model.predict(SOURCE)
  141. model.export(format='onnx') # export a model to ONNX format
  142. def test_predict_callback_and_setup():
  143. # test callback addition for prediction
  144. def on_predict_batch_end(predictor): # results -> List[batch_size]
  145. path, im0s, _, _ = predictor.batch
  146. # print('on_predict_batch_end', im0s[0].shape)
  147. im0s = im0s if isinstance(im0s, list) else [im0s]
  148. bs = [predictor.dataset.bs for _ in range(len(path))]
  149. predictor.results = zip(predictor.results, im0s, bs)
  150. model = YOLO(MODEL)
  151. model.add_callback('on_predict_batch_end', on_predict_batch_end)
  152. dataset = load_inference_source(source=SOURCE)
  153. bs = dataset.bs # noqa access predictor properties
  154. results = model.predict(dataset, stream=True) # source already setup
  155. for _, (result, im0, bs) in enumerate(results):
  156. print('test_callback', im0.shape)
  157. print('test_callback', bs)
  158. boxes = result.boxes # Boxes object for bbox outputs
  159. print(boxes)
  160. def _test_results_api(res):
  161. # General apis except plot
  162. res = res.cpu().numpy()
  163. # res = res.cuda()
  164. res = res.to(device='cpu', dtype=torch.float32)
  165. res.save_txt('label.txt', save_conf=False)
  166. res.save_txt('label.txt', save_conf=True)
  167. res.save_crop('crops/')
  168. res.tojson(normalize=False)
  169. res.tojson(normalize=True)
  170. res.plot(pil=True)
  171. res.plot(conf=True, boxes=False)
  172. res.plot()
  173. print(res)
  174. print(res.path)
  175. for k in res.keys:
  176. print(getattr(res, k))
  177. def test_results():
  178. for m in ['yolov8n-pose.pt', 'yolov8n-seg.pt', 'yolov8n.pt', 'yolov8n-cls.pt']:
  179. model = YOLO(m)
  180. res = model([SOURCE, SOURCE])
  181. _test_results_api(res[0])
  182. def test_track():
  183. im = cv2.imread(str(SOURCE))
  184. for m in ['yolov8n-pose.pt', 'yolov8n-seg.pt', 'yolov8n.pt']:
  185. model = YOLO(m)
  186. res = model.track(source=im)
  187. _test_results_api(res[0])
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...