Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

#609 Ci fix

Merged
Ghost merged 1 commits into Deci-AI:master from deci-ai:bugfix/infra-000_ci
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
  1. from typing import Type, Union, Mapping, Any
  2. import numpy as np
  3. import torch
  4. from torch import nn
  5. class RepVGGBlock(nn.Module):
  6. """
  7. Repvgg block consists of three branches
  8. 3x3: a branch of a 3x3 Convolution + BatchNorm + Activation
  9. 1x1: a branch of a 1x1 Convolution + BatchNorm + Activation
  10. no_conv_branch: a branch with only BatchNorm which will only be used if
  11. input channel == output channel and use_residual_connection is True
  12. (usually in all but the first block of each stage)
  13. """
  14. def __init__(
  15. self,
  16. in_channels: int,
  17. out_channels: int,
  18. stride: int = 1,
  19. dilation: int = 1,
  20. groups: int = 1,
  21. activation_type: Type[nn.Module] = nn.ReLU,
  22. activation_kwargs: Union[Mapping[str, Any], None] = None,
  23. se_type: Type[nn.Module] = nn.Identity,
  24. se_kwargs: Union[Mapping[str, Any], None] = None,
  25. build_residual_branches: bool = True,
  26. use_residual_connection: bool = True,
  27. use_alpha: bool = False,
  28. ):
  29. """
  30. :param in_channels: Number of input channels
  31. :param out_channels: Number of output channels
  32. :param activation_type: Type of the nonlinearity
  33. :param se_type: Type of the se block (Use nn.Identity to disable SE)
  34. :param stride: Output stride
  35. :param dilation: Dilation factor for 3x3 conv
  36. :param groups: Number of groups used in convolutions
  37. :param activation_kwargs: Additional arguments for instantiating activation module.
  38. :param se_kwargs: Additional arguments for instantiating SE module.
  39. :param build_residual_branches: Whether to initialize block with already fused paramters (for deployment)
  40. :param use_residual_connection: Whether to add input x to the output (Enabled in RepVGG, disabled in PP-Yolo)
  41. :param use_alpha: If True, enables additional learnable weighting parameter for 1x1 branch (PP-Yolo-E Plus)
  42. """
  43. super().__init__()
  44. if activation_kwargs is None:
  45. activation_kwargs = {}
  46. if se_kwargs is None:
  47. se_kwargs = {}
  48. self.groups = groups
  49. self.in_channels = in_channels
  50. self.nonlinearity = activation_type(**activation_kwargs)
  51. self.se = se_type(**se_kwargs)
  52. if use_residual_connection and out_channels == in_channels and stride == 1:
  53. self.no_conv_branch = nn.BatchNorm2d(num_features=in_channels)
  54. else:
  55. self.no_conv_branch = None
  56. self.branch_3x3 = self._conv_bn(
  57. in_channels=in_channels,
  58. out_channels=out_channels,
  59. dilation=dilation,
  60. kernel_size=3,
  61. stride=stride,
  62. padding=dilation,
  63. groups=groups,
  64. )
  65. self.branch_1x1 = self._conv_bn(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=stride, padding=0, groups=groups)
  66. if use_alpha:
  67. self.alpha = torch.nn.Parameter(torch.tensor([1.0]), requires_grad=True)
  68. else:
  69. self.alpha = 1
  70. if not build_residual_branches:
  71. self.fuse_block_residual_branches()
  72. else:
  73. self.build_residual_branches = True
  74. def forward(self, inputs):
  75. if not self.build_residual_branches:
  76. return self.nonlinearity(self.se(self.rbr_reparam(inputs)))
  77. if self.no_conv_branch is None:
  78. id_out = 0
  79. else:
  80. id_out = self.no_conv_branch(inputs)
  81. return self.nonlinearity(self.se(self.branch_3x3(inputs) + self.alpha * self.branch_1x1(inputs) + id_out))
  82. def _get_equivalent_kernel_bias(self):
  83. """
  84. Fuses the 3x3, 1x1 and identity branches into a single 3x3 conv layer
  85. """
  86. kernel3x3, bias3x3 = self._fuse_bn_tensor(self.branch_3x3)
  87. kernel1x1, bias1x1 = self._fuse_bn_tensor(self.branch_1x1)
  88. kernelid, biasid = self._fuse_bn_tensor(self.no_conv_branch)
  89. return kernel3x3 + self.alpha * self._pad_1x1_to_3x3_tensor(kernel1x1) + kernelid, bias3x3 + self.alpha * bias1x1 + biasid
  90. def _pad_1x1_to_3x3_tensor(self, kernel1x1):
  91. """
  92. padding the 1x1 convolution weights with zeros to be able to fuse the 3x3 conv layer with the 1x1
  93. :param kernel1x1: weights of the 1x1 convolution
  94. :type kernel1x1:
  95. :return: padded 1x1 weights
  96. :rtype:
  97. """
  98. if kernel1x1 is None:
  99. return 0
  100. else:
  101. return torch.nn.functional.pad(kernel1x1, [1, 1, 1, 1])
  102. def _fuse_bn_tensor(self, branch):
  103. """
  104. Fusing of the batchnorm into the conv layer.
  105. If the branch is the identity branch (no conv) the kernel will simply be eye.
  106. :param branch:
  107. :type branch:
  108. :return:
  109. :rtype:
  110. """
  111. if branch is None:
  112. return 0, 0
  113. if isinstance(branch, nn.Sequential):
  114. kernel = branch.conv.weight
  115. running_mean = branch.bn.running_mean
  116. running_var = branch.bn.running_var
  117. gamma = branch.bn.weight
  118. beta = branch.bn.bias
  119. eps = branch.bn.eps
  120. else:
  121. assert isinstance(branch, nn.BatchNorm2d)
  122. if not hasattr(self, "id_tensor"):
  123. input_dim = self.in_channels // self.groups
  124. kernel_value = np.zeros((self.in_channels, input_dim, 3, 3), dtype=np.float32)
  125. for i in range(self.in_channels):
  126. kernel_value[i, i % input_dim, 1, 1] = 1
  127. self.id_tensor = torch.from_numpy(kernel_value).to(branch.weight.device)
  128. kernel = self.id_tensor
  129. running_mean = branch.running_mean
  130. running_var = branch.running_var
  131. gamma = branch.weight
  132. beta = branch.bias
  133. eps = branch.eps
  134. std = (running_var + eps).sqrt()
  135. t = (gamma / std).reshape(-1, 1, 1, 1)
  136. return kernel * t, beta - running_mean * gamma / std
  137. def fuse_block_residual_branches(self):
  138. """
  139. converts a repvgg block from training model (with branches) to deployment mode (vgg like model)
  140. :return:
  141. :rtype:
  142. """
  143. if hasattr(self, "build_residual_branches") and not self.build_residual_branches:
  144. return
  145. kernel, bias = self._get_equivalent_kernel_bias()
  146. self.rbr_reparam = nn.Conv2d(
  147. in_channels=self.branch_3x3.conv.in_channels,
  148. out_channels=self.branch_3x3.conv.out_channels,
  149. kernel_size=self.branch_3x3.conv.kernel_size,
  150. stride=self.branch_3x3.conv.stride,
  151. padding=self.branch_3x3.conv.padding,
  152. dilation=self.branch_3x3.conv.dilation,
  153. groups=self.branch_3x3.conv.groups,
  154. bias=True,
  155. )
  156. self.rbr_reparam.weight.data = kernel
  157. self.rbr_reparam.bias.data = bias
  158. for para in self.parameters():
  159. para.detach_()
  160. self.__delattr__("branch_3x3")
  161. self.__delattr__("branch_1x1")
  162. if hasattr(self, "no_conv_branch"):
  163. self.__delattr__("no_conv_branch")
  164. if hasattr(self, "alpha"):
  165. self.__delattr__("alpha")
  166. self.build_residual_branches = False
  167. @staticmethod
  168. def _conv_bn(in_channels, out_channels, kernel_size, stride, padding, groups=1, dilation=1):
  169. result = nn.Sequential()
  170. result.add_module(
  171. "conv",
  172. nn.Conv2d(
  173. in_channels=in_channels,
  174. out_channels=out_channels,
  175. kernel_size=kernel_size,
  176. stride=stride,
  177. padding=padding,
  178. groups=groups,
  179. bias=False,
  180. dilation=dilation,
  181. ),
  182. )
  183. result.add_module("bn", nn.BatchNorm2d(num_features=out_channels))
  184. return result
Discard
Tip!

Press p or to see the previous file or, n or to see the next file