Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

yolo.py 13 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
  1. """YOLOv5-specific modules
  2. Usage:
  3. $ python path/to/models/yolo.py --cfg yolov5s.yaml
  4. """
  5. import argparse
  6. import logging
  7. import sys
  8. from copy import deepcopy
  9. from pathlib import Path
  10. FILE = Path(__file__).absolute()
  11. sys.path.append(FILE.parents[1].as_posix()) # add yolov5/ to path
  12. from models.common import *
  13. from models.experimental import *
  14. from utils.autoanchor import check_anchor_order
  15. from utils.general import make_divisible, check_file, set_logging
  16. from utils.torch_utils import time_synchronized, fuse_conv_and_bn, model_info, scale_img, initialize_weights, \
  17. select_device, copy_attr
  18. try:
  19. import thop # for FLOPs computation
  20. except ImportError:
  21. thop = None
  22. logger = logging.getLogger(__name__)
  23. class Detect(nn.Module):
  24. stride = None # strides computed during build
  25. onnx_dynamic = False # ONNX export parameter
  26. def __init__(self, nc=80, anchors=(), ch=(), inplace=True): # detection layer
  27. super(Detect, self).__init__()
  28. self.nc = nc # number of classes
  29. self.no = nc + 5 # number of outputs per anchor
  30. self.nl = len(anchors) # number of detection layers
  31. self.na = len(anchors[0]) // 2 # number of anchors
  32. self.grid = [torch.zeros(1)] * self.nl # init grid
  33. a = torch.tensor(anchors).float().view(self.nl, -1, 2)
  34. self.register_buffer('anchors', a) # shape(nl,na,2)
  35. self.register_buffer('anchor_grid', a.clone().view(self.nl, 1, -1, 1, 1, 2)) # shape(nl,1,na,1,1,2)
  36. self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv
  37. self.inplace = inplace # use in-place ops (e.g. slice assignment)
  38. def forward(self, x):
  39. # x = x.copy() # for profiling
  40. z = [] # inference output
  41. for i in range(self.nl):
  42. x[i] = self.m[i](x[i]) # conv
  43. bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85)
  44. x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous()
  45. if not self.training: # inference
  46. if self.grid[i].shape[2:4] != x[i].shape[2:4] or self.onnx_dynamic:
  47. self.grid[i] = self._make_grid(nx, ny).to(x[i].device)
  48. y = x[i].sigmoid()
  49. if self.inplace:
  50. y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy
  51. y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh
  52. else: # for YOLOv5 on AWS Inferentia https://github.com/ultralytics/yolov5/pull/2953
  53. xy = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy
  54. wh = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i].view(1, self.na, 1, 1, 2) # wh
  55. y = torch.cat((xy, wh, y[..., 4:]), -1)
  56. z.append(y.view(bs, -1, self.no))
  57. return x if self.training else (torch.cat(z, 1), x)
  58. @staticmethod
  59. def _make_grid(nx=20, ny=20):
  60. yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)])
  61. return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float()
  62. class Model(nn.Module):
  63. def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, input channels, number of classes
  64. super(Model, self).__init__()
  65. if isinstance(cfg, dict):
  66. self.yaml = cfg # model dict
  67. else: # is *.yaml
  68. import yaml # for torch hub
  69. self.yaml_file = Path(cfg).name
  70. with open(cfg) as f:
  71. self.yaml = yaml.safe_load(f) # model dict
  72. # Define model
  73. ch = self.yaml['ch'] = self.yaml.get('ch', ch) # input channels
  74. if nc and nc != self.yaml['nc']:
  75. logger.info(f"Overriding model.yaml nc={self.yaml['nc']} with nc={nc}")
  76. self.yaml['nc'] = nc # override yaml value
  77. if anchors:
  78. logger.info(f'Overriding model.yaml anchors with anchors={anchors}')
  79. self.yaml['anchors'] = round(anchors) # override yaml value
  80. self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist
  81. self.names = [str(i) for i in range(self.yaml['nc'])] # default names
  82. self.inplace = self.yaml.get('inplace', True)
  83. # logger.info([x.shape for x in self.forward(torch.zeros(1, ch, 64, 64))])
  84. # Build strides, anchors
  85. m = self.model[-1] # Detect()
  86. if isinstance(m, Detect):
  87. s = 256 # 2x min stride
  88. m.inplace = self.inplace
  89. m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward
  90. m.anchors /= m.stride.view(-1, 1, 1)
  91. check_anchor_order(m)
  92. self.stride = m.stride
  93. self._initialize_biases() # only run once
  94. # logger.info('Strides: %s' % m.stride.tolist())
  95. # Init weights, biases
  96. initialize_weights(self)
  97. self.info()
  98. logger.info('')
  99. def forward(self, x, augment=False, profile=False):
  100. if augment:
  101. return self.forward_augment(x) # augmented inference, None
  102. else:
  103. return self.forward_once(x, profile) # single-scale inference, train
  104. def forward_augment(self, x):
  105. img_size = x.shape[-2:] # height, width
  106. s = [1, 0.83, 0.67] # scales
  107. f = [None, 3, None] # flips (2-ud, 3-lr)
  108. y = [] # outputs
  109. for si, fi in zip(s, f):
  110. xi = scale_img(x.flip(fi) if fi else x, si, gs=int(self.stride.max()))
  111. yi = self.forward_once(xi)[0] # forward
  112. # cv2.imwrite(f'img_{si}.jpg', 255 * xi[0].cpu().numpy().transpose((1, 2, 0))[:, :, ::-1]) # save
  113. yi = self._descale_pred(yi, fi, si, img_size)
  114. y.append(yi)
  115. return torch.cat(y, 1), None # augmented inference, train
  116. def forward_once(self, x, profile=False):
  117. y, dt = [], [] # outputs
  118. for m in self.model:
  119. if m.f != -1: # if not from previous layer
  120. x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers
  121. if profile:
  122. o = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPs
  123. t = time_synchronized()
  124. for _ in range(10):
  125. _ = m(x)
  126. dt.append((time_synchronized() - t) * 100)
  127. if m == self.model[0]:
  128. logger.info(f"{'time (ms)':>10s} {'GFLOPs':>10s} {'params':>10s} {'module'}")
  129. logger.info(f'{dt[-1]:10.2f} {o:10.2f} {m.np:10.0f} {m.type}')
  130. x = m(x) # run
  131. y.append(x if m.i in self.save else None) # save output
  132. if profile:
  133. logger.info('%.1fms total' % sum(dt))
  134. return x
  135. def _descale_pred(self, p, flips, scale, img_size):
  136. # de-scale predictions following augmented inference (inverse operation)
  137. if self.inplace:
  138. p[..., :4] /= scale # de-scale
  139. if flips == 2:
  140. p[..., 1] = img_size[0] - p[..., 1] # de-flip ud
  141. elif flips == 3:
  142. p[..., 0] = img_size[1] - p[..., 0] # de-flip lr
  143. else:
  144. x, y, wh = p[..., 0:1] / scale, p[..., 1:2] / scale, p[..., 2:4] / scale # de-scale
  145. if flips == 2:
  146. y = img_size[0] - y # de-flip ud
  147. elif flips == 3:
  148. x = img_size[1] - x # de-flip lr
  149. p = torch.cat((x, y, wh, p[..., 4:]), -1)
  150. return p
  151. def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency
  152. # https://arxiv.org/abs/1708.02002 section 3.3
  153. # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1.
  154. m = self.model[-1] # Detect() module
  155. for mi, s in zip(m.m, m.stride): # from
  156. b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85)
  157. b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image)
  158. b.data[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls
  159. mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
  160. def _print_biases(self):
  161. m = self.model[-1] # Detect() module
  162. for mi in m.m: # from
  163. b = mi.bias.detach().view(m.na, -1).T # conv.bias(255) to (3,85)
  164. logger.info(
  165. ('%6g Conv2d.bias:' + '%10.3g' * 6) % (mi.weight.shape[1], *b[:5].mean(1).tolist(), b[5:].mean()))
  166. # def _print_weights(self):
  167. # for m in self.model.modules():
  168. # if type(m) is Bottleneck:
  169. # logger.info('%10.3g' % (m.w.detach().sigmoid() * 2)) # shortcut weights
  170. def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers
  171. logger.info('Fusing layers... ')
  172. for m in self.model.modules():
  173. if type(m) is Conv and hasattr(m, 'bn'):
  174. m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv
  175. delattr(m, 'bn') # remove batchnorm
  176. m.forward = m.fuseforward # update forward
  177. self.info()
  178. return self
  179. def nms(self, mode=True): # add or remove NMS module
  180. present = type(self.model[-1]) is NMS # last layer is NMS
  181. if mode and not present:
  182. logger.info('Adding NMS... ')
  183. m = NMS() # module
  184. m.f = -1 # from
  185. m.i = self.model[-1].i + 1 # index
  186. self.model.add_module(name='%s' % m.i, module=m) # add
  187. self.eval()
  188. elif not mode and present:
  189. logger.info('Removing NMS... ')
  190. self.model = self.model[:-1] # remove
  191. return self
  192. def autoshape(self): # add AutoShape module
  193. logger.info('Adding AutoShape... ')
  194. m = AutoShape(self) # wrap model
  195. copy_attr(m, self, include=('yaml', 'nc', 'hyp', 'names', 'stride'), exclude=()) # copy attributes
  196. return m
  197. def info(self, verbose=False, img_size=640): # print model information
  198. model_info(self, verbose, img_size)
  199. def parse_model(d, ch): # model_dict, input_channels(3)
  200. logger.info('\n%3s%18s%3s%10s %-40s%-30s' % ('', 'from', 'n', 'params', 'module', 'arguments'))
  201. anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple']
  202. na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors
  203. no = na * (nc + 5) # number of outputs = anchors * (classes + 5)
  204. layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out
  205. for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args
  206. m = eval(m) if isinstance(m, str) else m # eval strings
  207. for j, a in enumerate(args):
  208. try:
  209. args[j] = eval(a) if isinstance(a, str) else a # eval strings
  210. except:
  211. pass
  212. n = max(round(n * gd), 1) if n > 1 else n # depth gain
  213. if m in [Conv, GhostConv, Bottleneck, GhostBottleneck, SPP, DWConv, MixConv2d, Focus, CrossConv, BottleneckCSP,
  214. C3, C3TR]:
  215. c1, c2 = ch[f], args[0]
  216. if c2 != no: # if not output
  217. c2 = make_divisible(c2 * gw, 8)
  218. args = [c1, c2, *args[1:]]
  219. if m in [BottleneckCSP, C3, C3TR]:
  220. args.insert(2, n) # number of repeats
  221. n = 1
  222. elif m is nn.BatchNorm2d:
  223. args = [ch[f]]
  224. elif m is Concat:
  225. c2 = sum([ch[x] for x in f])
  226. elif m is Detect:
  227. args.append([ch[x] for x in f])
  228. if isinstance(args[1], int): # number of anchors
  229. args[1] = [list(range(args[1] * 2))] * len(f)
  230. elif m is Contract:
  231. c2 = ch[f] * args[0] ** 2
  232. elif m is Expand:
  233. c2 = ch[f] // args[0] ** 2
  234. else:
  235. c2 = ch[f]
  236. m_ = nn.Sequential(*[m(*args) for _ in range(n)]) if n > 1 else m(*args) # module
  237. t = str(m)[8:-2].replace('__main__.', '') # module type
  238. np = sum([x.numel() for x in m_.parameters()]) # number params
  239. m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params
  240. logger.info('%3s%18s%3s%10.0f %-40s%-30s' % (i, f, n, np, t, args)) # print
  241. save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist
  242. layers.append(m_)
  243. if i == 0:
  244. ch = []
  245. ch.append(c2)
  246. return nn.Sequential(*layers), sorted(save)
  247. if __name__ == '__main__':
  248. parser = argparse.ArgumentParser()
  249. parser.add_argument('--cfg', type=str, default='yolov5s.yaml', help='model.yaml')
  250. parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
  251. opt = parser.parse_args()
  252. opt.cfg = check_file(opt.cfg) # check file
  253. set_logging()
  254. device = select_device(opt.device)
  255. # Create model
  256. model = Model(opt.cfg).to(device)
  257. model.train()
  258. # Profile
  259. # img = torch.rand(8 if torch.cuda.is_available() else 1, 3, 320, 320).to(device)
  260. # y = model(img, profile=True)
  261. # Tensorboard (not working https://github.com/ultralytics/yolov5/issues/2898)
  262. # from torch.utils.tensorboard import SummaryWriter
  263. # tb_writer = SummaryWriter('.')
  264. # logger.info("Run 'tensorboard --logdir=models' to view tensorboard at http://localhost:6006/")
  265. # tb_writer.add_graph(torch.jit.trace(model, img, strict=False), []) # add model graph
  266. # tb_writer.add_image('test', img[0], dataformats='CWH') # add model to tensorboard
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...