Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

models.py 9.8 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
  1. import numpy as np
  2. from torch import nn
  3. import torch.nn.functional as F
  4. import torch
  5. class VideoNet(nn.Module):
  6. def __init__(self):
  7. super(VideoNet, self).__init__()
  8. self.conv1 = nn.Conv2d(in_channels=1, out_channels=3, kernel_size=5)
  9. self.relu1 = nn.ReLU()
  10. self.maxpool1 = nn.MaxPool2d(kernel_size=2, stride=1)
  11. self.conv2 = nn.Conv2d(in_channels=3, out_channels=1, kernel_size=3)
  12. self.lstm = nn.LSTM(input_size=1, hidden_size=40, num_layers=1, batch_first=True)
  13. self.fc = nn.Linear(40, 1)
  14. # self.conv2 = nn.Conv1d(in_channels=H, out_channels=3, kernel_size=5)
  15. # self.maxpool2 = nn.MaxPool1d(kernel_size=2)
  16. # self.fc = nn.Linear(18 + p, 1) # this is hard-coded
  17. def forward(self, x):
  18. '''
  19. x: torch.Tensor
  20. (batch_size, time_steps, height, width)
  21. = (batch_size, 40, 10, 10)
  22. '''
  23. # print('in shape', x.shape)
  24. # extract features from each time_step separately
  25. # reshape time_steps and batch into same dim
  26. batch_size = x.shape[0]
  27. T = x.shape[1]
  28. x = x.reshape(batch_size * T, 1, x.shape[2], x.shape[3])
  29. x = self.conv1(x)
  30. x = self.relu1(x)
  31. x = self.maxpool1(x)
  32. x = self.conv2(x)
  33. x = torch.max(x, dim=3).values
  34. x = torch.max(x, dim=2).values
  35. # extract time_steps back out
  36. # run lstm on result 1D time series
  37. x = x.reshape(batch_size, T, 1)
  38. outputs, (h1, c1) = self.lstm(x) # get hidden vec
  39. h1 = h1.squeeze(0) # remove dimension corresponding to multiple layers / directions
  40. return self.fc(h1)
  41. class FCNN(nn.Module):
  42. """
  43. customized (one hidden layer) fully connected neural network class
  44. """
  45. def __init__(self, D_in, H, p):
  46. """
  47. Parameters:
  48. ==========================================================
  49. D_in: int
  50. dimension of input track
  51. H: int
  52. hidden layer size
  53. p: int
  54. number of additional covariates (such as lifetime, msd, etc..., to be concatenated to the hidden layer)
  55. """
  56. super(FCNN, self).__init__()
  57. self.fc1 = nn.Linear(D_in, H)
  58. #self.fc2 = nn.Linear(H, H)
  59. self.bn1 = nn.BatchNorm1d(H)
  60. self.fc2 = nn.Linear(H + p, 1)
  61. def forward(self, x1, x2):
  62. z1 = self.fc1(x1)
  63. z1 = self.bn1(z1)
  64. h1 = F.relu(z1)
  65. if x2 is not None:
  66. h1 = torch.cat((h1, x2), 1)
  67. z2 = self.fc2(h1)
  68. #h2 = F.relu(z2)
  69. #z3 = self.fc3(h2)
  70. return z2
  71. class LSTMNet(nn.Module):
  72. def __init__(self, D_in, H, p):
  73. """
  74. Parameters:
  75. ==========================================================
  76. D_in: int
  77. dimension of input track (ignored, can be variable)
  78. H: int
  79. hidden layer size
  80. p: int
  81. number of additional covariates (such as lifetime, msd, etc..., to be concatenated to the hidden layer)
  82. """
  83. super(LSTMNet, self).__init__()
  84. self.lstm = nn.LSTM(input_size=1, hidden_size=H, num_layers=1, batch_first=True)
  85. self.fc = nn.Linear(H + p, 1)
  86. def forward(self, x1, x2=None):
  87. x1 = x1.unsqueeze(2) # add input_size dimension (this is usually for the size of embedding vector)
  88. outputs, (h1, c1) = self.lstm(x1) # get hidden vec
  89. h1 = h1.squeeze(0) # remove dimension corresponding to multiple layers / directions
  90. if x2 is not None:
  91. h1 = torch.cat((h1, x2), 1)
  92. return self.fc(h1)
  93. class CNN(nn.Module):
  94. def __init__(self, D_in, H, p):
  95. """
  96. Parameters:
  97. ==========================================================
  98. D_in: int
  99. dimension of input track (ignored, can be variable)
  100. H: int
  101. hidden layer size
  102. p: int
  103. number of additional covariates (such as lifetime, msd, etc..., to be concatenated to the hidden layer)
  104. """
  105. super(CNN, self).__init__()
  106. self.conv1 = nn.Conv1d(in_channels=1, out_channels=H, kernel_size=7)
  107. self.maxpool1 = nn.MaxPool1d(kernel_size=2)
  108. self.conv2 = nn.Conv1d(in_channels=H, out_channels=3, kernel_size=5)
  109. self.maxpool2 = nn.MaxPool1d(kernel_size=2)
  110. self.fc = nn.Linear(18 + p, 1) # this is hard-coded
  111. def forward(self, x1, x2):
  112. x1 = x1.unsqueeze(1) # add channel dim
  113. x1 = self.conv1(x1)
  114. x1 = self.maxpool1(x1)
  115. x1 = self.conv2(x1)
  116. x1 = self.maxpool2(x1)
  117. x1 = x1.reshape(x1.shape[0], -1) # flatten channel dim
  118. if x2 is not None:
  119. x1 = torch.cat((x1, x2), 1)
  120. return self.fc(x1)
  121. class AttentionNet(nn.Module):
  122. """
  123. customized (one hidden layer) fully connected neural network class
  124. """
  125. def __init__(self, D_in, H, p):
  126. """
  127. Parameters:
  128. ==========================================================
  129. D_in: int
  130. dimension of input track (ignored, can be variable)
  131. H: int
  132. hidden layer size
  133. p: int
  134. number of additional covariates (such as lifetime, msd, etc..., to be concatenated to the hidden layer)
  135. """
  136. super(AttentionNet, self).__init__()
  137. self.att1 = nn.MultiheadAttention(embed_dim=18, num_heads=3)
  138. self.ln1 = nn.LayerNorm(D_in)
  139. self.fc1 = nn.Linear(D_in, 1)
  140. self.relu1 = nn.ReLU()
  141. self.att2 = nn.MultiheadAttention(embed_dim=18, num_heads=3)
  142. self.ln2 = nn.LayerNorm(D_in)
  143. self.fc2 = nn.Linear(D_in + p, 1)
  144. def forward(self, x1, x2):
  145. print(x1.shape)
  146. x1 = self.att1(x1, x1)
  147. x1 = self.ln1(x1)
  148. x1 = self.fc1(x1)
  149. x1 = self.relu1(x1)
  150. x1 = self.att2(x1, x1)
  151. x1 = self.ln2(x1)
  152. if x2 is not None:
  153. h1 = torch.cat((h1, x2), 1)
  154. return self.fc2(h1)
  155. class MaxLinear(nn.Module):
  156. '''Takes flattened input and predicts it using many linear units
  157. X: batch_size x num_timepoints
  158. '''
  159. def __init__(self, input_dim=24300, num_units=20, nonlin=F.relu, use_bias=False):
  160. super(MaxLinear, self).__init__()
  161. self.fc1 = nn.Linear(input_dim, num_units, bias=use_bias)
  162. # self.offset = nn.Parameter(torch.Tensor([0]))
  163. def forward(self, X, **kwargs):
  164. # print('in shape', X.shape, X.dtype)
  165. X = self.fc1(X) # .max(dim=-1)
  166. # print('out shape', X.shape, X.dtype)
  167. X = torch.max(X, dim=1)[0] # 0 because this returns max, indices
  168. # print('out2 shape', X.shape, X.dtype)
  169. return X # + self.offset
  170. class MaxConv(nn.Module):
  171. '''Takes flattened input and predicts it using many conv unit
  172. X: batch_size x 1 x num_timepoints
  173. OR
  174. X: list of size (num_timepoints,)
  175. '''
  176. def __init__(self, num_units=20, kernel_size=30, nonlin=F.relu, use_bias=False):
  177. super(MaxConv, self).__init__()
  178. self.conv1 = nn.Conv1d(in_channels=1, out_channels=num_units, kernel_size=kernel_size, bias=use_bias)
  179. # torch.nn.Conv1d(in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, padding_mode='zeros')
  180. self.offset = nn.Parameter(torch.Tensor([0]))
  181. def forward(self, X, **kwargs):
  182. if type(X) == list:
  183. print('list')
  184. X = torch.tensor(np.array(X).astype(np.float32))
  185. X = X.unsqueeze(0)
  186. X = X.unsqueeze(0)
  187. print(X.shape)
  188. # print('in shape', X.shape, X.dtype)
  189. else:
  190. X = X.unsqueeze(1)
  191. X = self.conv1(X) # .max(dim=-1)
  192. # print('out shape', X.shape, X.dtype)
  193. # max over channels
  194. X = torch.max(X, dim=1)[0] # 0 because this returns max, indices
  195. # max over time step
  196. X = torch.max(X, dim=1)[0] + self.offset # 0 because this returns max, indices
  197. # print('out2 shape', X.shape, X.dtype)
  198. X = X.unsqueeze(1)
  199. # print('preds', X)
  200. return X
  201. class MaxConvLinear(nn.Module):
  202. '''Takes input patch, uses linear filter to convert it to time series, then runs temporal conv, then takes max
  203. X: batch_size x H_patch x W_patch x time
  204. '''
  205. def __init__(self, num_timepoints=300, num_linear_filts=1, num_conv_filts=3, patch_size=9,
  206. kernel_size=30, nonlin=F.relu, use_bias=False):
  207. super(MaxConvLinear, self).__init__()
  208. self.fc1 = nn.Linear(patch_size * patch_size, num_linear_filts, bias=use_bias)
  209. self.conv1 = nn.Conv1d(in_channels=num_linear_filts, out_channels=num_conv_filts, kernel_size=kernel_size,
  210. bias=use_bias)
  211. self.offset = nn.Parameter(torch.Tensor([0]))
  212. def forward(self, X, **kwargs):
  213. s = X.shape # batch_size x H_patch x W_patch x time
  214. X = X.reshape(s[0], s[1] * s[2], s[3])
  215. X = torch.transpose(X, 1, 2)
  216. # print('in shape', X.shape, X.dtype)
  217. X = self.fc1(X) # .max(dim=-1)
  218. X = torch.transpose(X, 1, 2)
  219. X = self.conv1(X) # .max(dim=-1)
  220. # print('out shape', X.shape, X.dtype)
  221. # max over channels
  222. X = torch.max(X, dim=1)[0] # 0 because this returns max, indices
  223. # max over time step
  224. X = torch.max(X, dim=1)[0] # + self.offset # 0 because this returns max, indices
  225. # print('out2 shape', X.shape, X.dtype)
  226. X = X.unsqueeze(1)
  227. return X
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...