Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

#609 Ci fix

Merged
Ghost merged 1 commits into Deci-AI:master from deci-ai:bugfix/infra-000_ci
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
  1. """
  2. CSP Darknet
  3. """
  4. import math
  5. from typing import Tuple, Type
  6. import torch
  7. import torch.nn as nn
  8. from super_gradients.modules import Residual
  9. from super_gradients.training.utils.utils import get_param, HpmStruct
  10. from super_gradients.training.models.sg_module import SgModule
  11. def autopad(kernel, padding=None):
  12. # PAD TO 'SAME'
  13. if padding is None:
  14. padding = kernel // 2 if isinstance(kernel, int) else [x // 2 for x in kernel]
  15. return padding
  16. def width_multiplier(original, factor, divisor: int = None):
  17. if divisor is None:
  18. return int(original * factor)
  19. else:
  20. return math.ceil(int(original * factor) / divisor) * divisor
  21. def get_yolo_type_params(yolo_type: str, width_mult_factor: float, depth_mult_factor: float):
  22. if yolo_type == "yoloX":
  23. struct = (3, 9, 9, 3)
  24. block = CSPLayer
  25. activation_type = nn.SiLU
  26. width_mult = lambda channels: width_multiplier(channels, width_mult_factor)
  27. else:
  28. raise NotImplementedError(f"Yolo yolo_type {yolo_type} is not supported")
  29. depth_mult = lambda blocks: max(round(blocks * depth_mult_factor), 1) if blocks > 1 else blocks
  30. return struct, block, activation_type, width_mult, depth_mult
  31. class NumClassesMissingException(Exception):
  32. pass
  33. class Conv(nn.Module):
  34. # STANDARD CONVOLUTION
  35. def __init__(self, input_channels, output_channels, kernel, stride, activation_type: Type[nn.Module], padding: int = None, groups: int = None):
  36. super().__init__()
  37. self.conv = nn.Conv2d(input_channels, output_channels, kernel, stride, autopad(kernel, padding), groups=groups or 1, bias=False)
  38. self.bn = nn.BatchNorm2d(output_channels)
  39. self.act = activation_type()
  40. def forward(self, x):
  41. return self.act(self.bn(self.conv(x)))
  42. def fuseforward(self, x):
  43. return self.act(self.conv(x))
  44. class GroupedConvBlock(nn.Module):
  45. """
  46. Grouped Conv KxK -> usual Conv 1x1
  47. """
  48. def __init__(self, input_channels, output_channels, kernel, stride, activation_type: Type[nn.Module], padding: int = None, groups: int = None):
  49. """
  50. :param groups: num of groups in the first conv; if None depthwise separable conv will be used
  51. (groups = input channels)
  52. """
  53. super().__init__()
  54. self.dconv = Conv(input_channels, input_channels, kernel, stride, activation_type, padding, groups=groups or input_channels)
  55. self.conv = Conv(input_channels, output_channels, 1, 1, activation_type)
  56. def forward(self, x):
  57. return self.conv(self.dconv(x))
  58. class Bottleneck(nn.Module):
  59. # STANDARD BOTTLENECK
  60. def __init__(self, input_channels, output_channels, shortcut: bool, activation_type: Type[nn.Module], depthwise=False):
  61. super().__init__()
  62. ConvBlock = GroupedConvBlock if depthwise else Conv
  63. hidden_channels = output_channels
  64. self.cv1 = Conv(input_channels, hidden_channels, 1, 1, activation_type)
  65. self.cv2 = ConvBlock(hidden_channels, output_channels, 3, 1, activation_type)
  66. self.add = shortcut and input_channels == output_channels
  67. self.shortcut = Residual() if self.add else None
  68. def forward(self, x):
  69. return self.shortcut(x) + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
  70. class CSPLayer(nn.Module):
  71. """
  72. CSP Bottleneck with 3 convolutions
  73. Args:
  74. in_channels: int, input channels.
  75. out_channels: int, output channels.
  76. num_bottlenecks: int, number of bottleneck conv layers.
  77. act: Type[nn.module], activation type.
  78. shortcut: bool, whether to apply shortcut (i.e add input to result) in bottlenecks (default=True).
  79. depthwise: bool, whether to use GroupedConvBlock in last conv in bottlenecks (default=False).
  80. expansion: float, determines the number of hidden channels (default=0.5).
  81. """
  82. def __init__(
  83. self,
  84. in_channels: int,
  85. out_channels: int,
  86. num_bottlenecks: int,
  87. act: Type[nn.Module],
  88. shortcut: bool = True,
  89. depthwise: bool = False,
  90. expansion: float = 0.5,
  91. ):
  92. super().__init__()
  93. hidden_channels = int(out_channels * expansion)
  94. self.conv1 = Conv(in_channels, hidden_channels, 1, stride=1, activation_type=act)
  95. self.conv2 = Conv(in_channels, hidden_channels, 1, stride=1, activation_type=act)
  96. self.conv3 = Conv(2 * hidden_channels, out_channels, 1, stride=1, activation_type=act)
  97. module_list = [Bottleneck(hidden_channels, hidden_channels, shortcut, act, depthwise) for _ in range(num_bottlenecks)]
  98. self.bottlenecks = nn.Sequential(*module_list)
  99. def forward(self, x):
  100. x_1 = self.conv1(x)
  101. x_1 = self.bottlenecks(x_1)
  102. x_2 = self.conv2(x)
  103. x = torch.cat((x_1, x_2), dim=1)
  104. return self.conv3(x)
  105. class BottleneckCSP(nn.Module):
  106. # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
  107. def __init__(self, input_channels, output_channels, bottleneck_blocks_num, activation_type: Type[nn.Module], shortcut=True, depthwise=False, expansion=0.5):
  108. super().__init__()
  109. hidden_channels = int(output_channels * expansion)
  110. self.cv1 = Conv(input_channels, hidden_channels, 1, 1, activation_type)
  111. self.cv2 = nn.Conv2d(input_channels, hidden_channels, 1, 1, bias=False)
  112. self.cv3 = nn.Conv2d(hidden_channels, hidden_channels, 1, 1, bias=False)
  113. self.cv4 = Conv(2 * hidden_channels, output_channels, 1, 1, activation_type)
  114. self.bn = nn.BatchNorm2d(2 * hidden_channels) # APPLIED TO CAT(CV2, CV3)
  115. self.act = nn.LeakyReLU(0.1, inplace=True)
  116. self.m = nn.Sequential(*[Bottleneck(hidden_channels, hidden_channels, shortcut, activation_type, depthwise) for _ in range(bottleneck_blocks_num)])
  117. def forward(self, x):
  118. y1 = self.cv3(self.m(self.cv1(x)))
  119. y2 = self.cv2(x)
  120. return self.cv4(self.act(self.bn(torch.cat((y1, y2), dim=1))))
  121. class SPP(nn.Module):
  122. # SPATIAL PYRAMID POOLING LAYER
  123. def __init__(self, input_channels, output_channels, k: Tuple, activation_type: Type[nn.Module]):
  124. super().__init__()
  125. hidden_channels = input_channels // 2
  126. self.cv1 = Conv(input_channels, hidden_channels, 1, 1, activation_type)
  127. self.cv2 = Conv(hidden_channels * (len(k) + 1), output_channels, 1, 1, activation_type)
  128. self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k])
  129. def forward(self, x):
  130. x = self.cv1(x)
  131. return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1))
  132. class ViewModule(nn.Module):
  133. """
  134. Returns a reshaped version of the input, to be used in None-Backbone Mode
  135. """
  136. def __init__(self, features=1024):
  137. super(ViewModule, self).__init__()
  138. self.features = features
  139. def forward(self, x):
  140. return x.view(-1, self.features)
  141. class CSPDarknet53(SgModule):
  142. def __init__(self, arch_params: HpmStruct):
  143. super().__init__()
  144. self.num_classes = arch_params.num_classes
  145. self.backbone_mode = get_param(arch_params, "backbone_mode", False)
  146. depth_mult_factor = get_param(arch_params, "depth_mult_factor", 1.0)
  147. width_mult_factor = get_param(arch_params, "width_mult_factor", 1.0)
  148. channels_in = get_param(arch_params, "channels_in", 3)
  149. yolo_type = get_param(arch_params, "yolo_type", "yoloX")
  150. depthwise = get_param(arch_params, "depthwise", False)
  151. struct, block, activation_type, width_mult, depth_mult = get_yolo_type_params(yolo_type, width_mult_factor, depth_mult_factor)
  152. ConvBlock = Conv if not depthwise else GroupedConvBlock
  153. struct = [depth_mult(s) for s in struct]
  154. self._modules_list = nn.ModuleList()
  155. if get_param(arch_params, "stem_type") == "6x6" or yolo_type == "yoloX":
  156. self._modules_list.append(Conv(channels_in, width_mult(64), 6, 2, activation_type, padding=2)) # 0
  157. else:
  158. raise NotImplementedError(f"Yolo type: {yolo_type} is not supported")
  159. for i, layer_in_ch in enumerate([64, 128, 256, 512]):
  160. self._modules_list.append(ConvBlock(width_mult(layer_in_ch), width_mult(layer_in_ch * 2), 3, 2, activation_type)) # 1,3,5,7
  161. if i < 3:
  162. self._modules_list.append(
  163. block(width_mult(layer_in_ch * 2), width_mult(layer_in_ch * 2), struct[i], activation_type, depthwise=depthwise)
  164. ) # 2,4,6
  165. if yolo_type == "yoloX":
  166. self._modules_list.append(SPP(width_mult(1024), width_mult(1024), (5, 9, 13), activation_type)) # 8
  167. self._modules_list.append(block(width_mult(1024), width_mult(1024), struct[3], activation_type, False, depthwise=depthwise)) # 9
  168. else:
  169. raise NotImplementedError(f"Yolo type: {yolo_type} is not supported")
  170. if not self.backbone_mode:
  171. # IF NOT USED AS A BACKEND BUT AS A CLASSIFIER WE ADD THE CLASSIFICATION LAYERS
  172. self._modules_list.append(nn.AdaptiveAvgPool2d((1, 1)))
  173. self._modules_list.append(ViewModule(1024))
  174. self._modules_list.append(nn.Linear(1024, self.num_classes))
  175. def forward(self, x):
  176. return self._modules_list(x)
Discard
Tip!

Press p or to see the previous file or, n or to see the next file