Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

attention_model.py 22 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
  1. import torch
  2. from torch import nn
  3. from torch.utils.checkpoint import checkpoint
  4. import math
  5. from typing import NamedTuple
  6. from utils.tensor_functions import compute_in_batches
  7. from nets.graph_encoder import GraphAttentionEncoder
  8. from torch.nn import DataParallel
  9. from utils.beam_search import CachedLookup
  10. from utils.functions import sample_many
  11. def set_decode_type(model, decode_type):
  12. if isinstance(model, DataParallel):
  13. model = model.module
  14. model.set_decode_type(decode_type)
  15. class AttentionModelFixed(NamedTuple):
  16. """
  17. Context for AttentionModel decoder that is fixed during decoding so can be precomputed/cached
  18. This class allows for efficient indexing of multiple Tensors at once
  19. """
  20. node_embeddings: torch.Tensor
  21. context_node_projected: torch.Tensor
  22. glimpse_key: torch.Tensor
  23. glimpse_val: torch.Tensor
  24. logit_key: torch.Tensor
  25. def __getitem__(self, key):
  26. if torch.is_tensor(key) or isinstance(key, slice):
  27. return AttentionModelFixed(
  28. node_embeddings=self.node_embeddings[key],
  29. context_node_projected=self.context_node_projected[key],
  30. glimpse_key=self.glimpse_key[:, key], # dim 0 are the heads
  31. glimpse_val=self.glimpse_val[:, key], # dim 0 are the heads
  32. logit_key=self.logit_key[key]
  33. )
  34. return super(AttentionModelFixed, self).__getitem__(key)
  35. class AttentionModel(nn.Module):
  36. def __init__(self,
  37. embedding_dim,
  38. hidden_dim,
  39. problem,
  40. n_encode_layers=2,
  41. tanh_clipping=10.,
  42. mask_inner=True,
  43. mask_logits=True,
  44. normalization='batch',
  45. n_heads=8,
  46. checkpoint_encoder=False,
  47. shrink_size=None):
  48. super(AttentionModel, self).__init__()
  49. self.embedding_dim = embedding_dim
  50. self.hidden_dim = hidden_dim
  51. self.n_encode_layers = n_encode_layers
  52. self.decode_type = None
  53. self.temp = 1.0
  54. self.allow_partial = problem.NAME == 'sdvrp'
  55. self.is_vrp = problem.NAME == 'cvrp' or problem.NAME == 'sdvrp'
  56. self.is_orienteering = problem.NAME == 'op'
  57. self.is_pctsp = problem.NAME == 'pctsp'
  58. self.tanh_clipping = tanh_clipping
  59. self.mask_inner = mask_inner
  60. self.mask_logits = mask_logits
  61. self.problem = problem
  62. self.n_heads = n_heads
  63. self.checkpoint_encoder = checkpoint_encoder
  64. self.shrink_size = shrink_size
  65. # Problem specific context parameters (placeholder and step context dimension)
  66. if self.is_vrp or self.is_orienteering or self.is_pctsp:
  67. # Embedding of last node + remaining_capacity / remaining length / remaining prize to collect
  68. step_context_dim = embedding_dim + 1
  69. if self.is_pctsp:
  70. node_dim = 4 # x, y, expected_prize, penalty
  71. else:
  72. node_dim = 3 # x, y, demand / prize
  73. # Special embedding projection for depot node
  74. self.init_embed_depot = nn.Linear(2, embedding_dim)
  75. if self.is_vrp and self.allow_partial: # Need to include the demand if split delivery allowed
  76. self.project_node_step = nn.Linear(1, 3 * embedding_dim, bias=False)
  77. else: # TSP
  78. assert problem.NAME == "tsp", "Unsupported problem: {}".format(problem.NAME)
  79. step_context_dim = 2 * embedding_dim # Embedding of first and last node
  80. node_dim = 2 # x, y
  81. # Learned input symbols for first action
  82. self.W_placeholder = nn.Parameter(torch.Tensor(2 * embedding_dim))
  83. self.W_placeholder.data.uniform_(-1, 1) # Placeholder should be in range of activations
  84. self.init_embed = nn.Linear(node_dim, embedding_dim)
  85. self.embedder = GraphAttentionEncoder(
  86. n_heads=n_heads,
  87. embed_dim=embedding_dim,
  88. n_layers=self.n_encode_layers,
  89. normalization=normalization
  90. )
  91. # For each node we compute (glimpse key, glimpse value, logit key) so 3 * embedding_dim
  92. self.project_node_embeddings = nn.Linear(embedding_dim, 3 * embedding_dim, bias=False)
  93. self.project_fixed_context = nn.Linear(embedding_dim, embedding_dim, bias=False)
  94. self.project_step_context = nn.Linear(step_context_dim, embedding_dim, bias=False)
  95. assert embedding_dim % n_heads == 0
  96. # Note n_heads * val_dim == embedding_dim so input to project_out is embedding_dim
  97. self.project_out = nn.Linear(embedding_dim, embedding_dim, bias=False)
  98. def set_decode_type(self, decode_type, temp=None):
  99. self.decode_type = decode_type
  100. if temp is not None: # Do not change temperature if not provided
  101. self.temp = temp
  102. def forward(self, input, return_pi=False):
  103. """
  104. :param input: (batch_size, graph_size, node_dim) input node features or dictionary with multiple tensors
  105. :param return_pi: whether to return the output sequences, this is optional as it is not compatible with
  106. using DataParallel as the results may be of different lengths on different GPUs
  107. :return:
  108. """
  109. if self.checkpoint_encoder and self.training: # Only checkpoint if we need gradients
  110. embeddings, _ = checkpoint(self.embedder, self._init_embed(input))
  111. else:
  112. embeddings, _ = self.embedder(self._init_embed(input))
  113. _log_p, pi = self._inner(input, embeddings)
  114. cost, mask = self.problem.get_costs(input, pi)
  115. # Log likelyhood is calculated within the model since returning it per action does not work well with
  116. # DataParallel since sequences can be of different lengths
  117. ll = self._calc_log_likelihood(_log_p, pi, mask)
  118. if return_pi:
  119. return cost, ll, pi
  120. return cost, ll
  121. def beam_search(self, *args, **kwargs):
  122. return self.problem.beam_search(*args, **kwargs, model=self)
  123. def precompute_fixed(self, input):
  124. embeddings, _ = self.embedder(self._init_embed(input))
  125. # Use a CachedLookup such that if we repeatedly index this object with the same index we only need to do
  126. # the lookup once... this is the case if all elements in the batch have maximum batch size
  127. return CachedLookup(self._precompute(embeddings))
  128. def propose_expansions(self, beam, fixed, expand_size=None, normalize=False, max_calc_batch_size=4096):
  129. # First dim = batch_size * cur_beam_size
  130. log_p_topk, ind_topk = compute_in_batches(
  131. lambda b: self._get_log_p_topk(fixed[b.ids], b.state, k=expand_size, normalize=normalize),
  132. max_calc_batch_size, beam, n=beam.size()
  133. )
  134. assert log_p_topk.size(1) == 1, "Can only have single step"
  135. # This will broadcast, calculate log_p (score) of expansions
  136. score_expand = beam.score[:, None] + log_p_topk[:, 0, :]
  137. # We flatten the action as we need to filter and this cannot be done in 2d
  138. flat_action = ind_topk.view(-1)
  139. flat_score = score_expand.view(-1)
  140. flat_feas = flat_score > -1e10 # != -math.inf triggers
  141. # Parent is row idx of ind_topk, can be found by enumerating elements and dividing by number of columns
  142. flat_parent = torch.arange(flat_action.size(-1), out=flat_action.new()) / ind_topk.size(-1)
  143. # Filter infeasible
  144. feas_ind_2d = torch.nonzero(flat_feas)
  145. if len(feas_ind_2d) == 0:
  146. # Too bad, no feasible expansions at all :(
  147. return None, None, None
  148. feas_ind = feas_ind_2d[:, 0]
  149. return flat_parent[feas_ind], flat_action[feas_ind], flat_score[feas_ind]
  150. def _calc_log_likelihood(self, _log_p, a, mask):
  151. # Get log_p corresponding to selected actions
  152. log_p = _log_p.gather(2, a.unsqueeze(-1)).squeeze(-1)
  153. # Optional: mask out actions irrelevant to objective so they do not get reinforced
  154. if mask is not None:
  155. log_p[mask] = 0
  156. assert (log_p > -1000).data.all(), "Logprobs should not be -inf, check sampling procedure!"
  157. # Calculate log_likelihood
  158. return log_p.sum(1)
  159. def _init_embed(self, input):
  160. if self.is_vrp or self.is_orienteering or self.is_pctsp:
  161. if self.is_vrp:
  162. features = ('demand', )
  163. elif self.is_orienteering:
  164. features = ('prize', )
  165. else:
  166. assert self.is_pctsp
  167. features = ('deterministic_prize', 'penalty')
  168. return torch.cat(
  169. (
  170. self.init_embed_depot(input['depot'])[:, None, :],
  171. self.init_embed(torch.cat((
  172. input['loc'],
  173. *(input[feat][:, :, None] for feat in features)
  174. ), -1))
  175. ),
  176. 1
  177. )
  178. # TSP
  179. return self.init_embed(input)
  180. def _inner(self, input, embeddings):
  181. outputs = []
  182. sequences = []
  183. state = self.problem.make_state(input)
  184. # Compute keys, values for the glimpse and keys for the logits once as they can be reused in every step
  185. fixed = self._precompute(embeddings)
  186. batch_size = state.ids.size(0)
  187. # Perform decoding steps
  188. i = 0
  189. while not (self.shrink_size is None and state.all_finished()):
  190. if self.shrink_size is not None:
  191. unfinished = torch.nonzero(state.get_finished() == 0)
  192. if len(unfinished) == 0:
  193. break
  194. unfinished = unfinished[:, 0]
  195. # Check if we can shrink by at least shrink_size and if this leaves at least 16
  196. # (otherwise batch norm will not work well and it is inefficient anyway)
  197. if 16 <= len(unfinished) <= state.ids.size(0) - self.shrink_size:
  198. # Filter states
  199. state = state[unfinished]
  200. fixed = fixed[unfinished]
  201. log_p, mask = self._get_log_p(fixed, state)
  202. # Select the indices of the next nodes in the sequences, result (batch_size) long
  203. selected = self._select_node(log_p.exp()[:, 0, :], mask[:, 0, :]) # Squeeze out steps dimension
  204. state = state.update(selected)
  205. # Now make log_p, selected desired output size by 'unshrinking'
  206. if self.shrink_size is not None and state.ids.size(0) < batch_size:
  207. log_p_, selected_ = log_p, selected
  208. log_p = log_p_.new_zeros(batch_size, *log_p_.size()[1:])
  209. selected = selected_.new_zeros(batch_size)
  210. log_p[state.ids[:, 0]] = log_p_
  211. selected[state.ids[:, 0]] = selected_
  212. # Collect output of step
  213. outputs.append(log_p[:, 0, :])
  214. sequences.append(selected)
  215. i += 1
  216. # Collected lists, return Tensor
  217. return torch.stack(outputs, 1), torch.stack(sequences, 1)
  218. def sample_many(self, input, batch_rep=1, iter_rep=1):
  219. """
  220. :param input: (batch_size, graph_size, node_dim) input node features
  221. :return:
  222. """
  223. # Bit ugly but we need to pass the embeddings as well.
  224. # Making a tuple will not work with the problem.get_cost function
  225. return sample_many(
  226. lambda input: self._inner(*input), # Need to unpack tuple into arguments
  227. lambda input, pi: self.problem.get_costs(input[0], pi), # Don't need embeddings as input to get_costs
  228. (input, self.embedder(self._init_embed(input))[0]), # Pack input with embeddings (additional input)
  229. batch_rep, iter_rep
  230. )
  231. def _select_node(self, probs, mask):
  232. assert (probs == probs).all(), "Probs should not contain any nans"
  233. if self.decode_type == "greedy":
  234. _, selected = probs.max(1)
  235. assert not mask.gather(1, selected.unsqueeze(
  236. -1)).data.any(), "Decode greedy: infeasible action has maximum probability"
  237. elif self.decode_type == "sampling":
  238. selected = probs.multinomial(1).squeeze(1)
  239. # Check if sampling went OK, can go wrong due to bug on GPU
  240. # See https://discuss.pytorch.org/t/bad-behavior-of-multinomial-function/10232
  241. while mask.gather(1, selected.unsqueeze(-1)).data.any():
  242. print('Sampled bad values, resampling!')
  243. selected = probs.multinomial(1).squeeze(1)
  244. else:
  245. assert False, "Unknown decode type"
  246. return selected
  247. def _precompute(self, embeddings, num_steps=1):
  248. # The fixed context projection of the graph embedding is calculated only once for efficiency
  249. graph_embed = embeddings.mean(1)
  250. # fixed context = (batch_size, 1, embed_dim) to make broadcastable with parallel timesteps
  251. fixed_context = self.project_fixed_context(graph_embed)[:, None, :]
  252. # The projection of the node embeddings for the attention is calculated once up front
  253. glimpse_key_fixed, glimpse_val_fixed, logit_key_fixed = \
  254. self.project_node_embeddings(embeddings[:, None, :, :]).chunk(3, dim=-1)
  255. # No need to rearrange key for logit as there is a single head
  256. fixed_attention_node_data = (
  257. self._make_heads(glimpse_key_fixed, num_steps),
  258. self._make_heads(glimpse_val_fixed, num_steps),
  259. logit_key_fixed.contiguous()
  260. )
  261. return AttentionModelFixed(embeddings, fixed_context, *fixed_attention_node_data)
  262. def _get_log_p_topk(self, fixed, state, k=None, normalize=True):
  263. log_p, _ = self._get_log_p(fixed, state, normalize=normalize)
  264. # Return topk
  265. if k is not None and k < log_p.size(-1):
  266. return log_p.topk(k, -1)
  267. # Return all, note different from torch.topk this does not give error if less than k elements along dim
  268. return (
  269. log_p,
  270. torch.arange(log_p.size(-1), device=log_p.device, dtype=torch.int64).repeat(log_p.size(0), 1)[:, None, :]
  271. )
  272. def _get_log_p(self, fixed, state, normalize=True):
  273. # Compute query = context node embedding
  274. query = fixed.context_node_projected + \
  275. self.project_step_context(self._get_parallel_step_context(fixed.node_embeddings, state))
  276. # Compute keys and values for the nodes
  277. glimpse_K, glimpse_V, logit_K = self._get_attention_node_data(fixed, state)
  278. # Compute the mask
  279. mask = state.get_mask()
  280. # Compute logits (unnormalized log_p)
  281. log_p, glimpse = self._one_to_many_logits(query, glimpse_K, glimpse_V, logit_K, mask)
  282. if normalize:
  283. log_p = torch.log_softmax(log_p / self.temp, dim=-1)
  284. assert not torch.isnan(log_p).any()
  285. return log_p, mask
  286. def _get_parallel_step_context(self, embeddings, state, from_depot=False):
  287. """
  288. Returns the context per step, optionally for multiple steps at once (for efficient evaluation of the model)
  289. :param embeddings: (batch_size, graph_size, embed_dim)
  290. :param prev_a: (batch_size, num_steps)
  291. :param first_a: Only used when num_steps = 1, action of first step or None if first step
  292. :return: (batch_size, num_steps, context_dim)
  293. """
  294. current_node = state.get_current_node()
  295. batch_size, num_steps = current_node.size()
  296. if self.is_vrp:
  297. # Embedding of previous node + remaining capacity
  298. if from_depot:
  299. # 1st dimension is node idx, but we do not squeeze it since we want to insert step dimension
  300. # i.e. we actually want embeddings[:, 0, :][:, None, :] which is equivalent
  301. return torch.cat(
  302. (
  303. embeddings[:, 0:1, :].expand(batch_size, num_steps, embeddings.size(-1)),
  304. # used capacity is 0 after visiting depot
  305. self.problem.VEHICLE_CAPACITY - torch.zeros_like(state.used_capacity[:, :, None])
  306. ),
  307. -1
  308. )
  309. else:
  310. return torch.cat(
  311. (
  312. torch.gather(
  313. embeddings,
  314. 1,
  315. current_node.contiguous()
  316. .view(batch_size, num_steps, 1)
  317. .expand(batch_size, num_steps, embeddings.size(-1))
  318. ).view(batch_size, num_steps, embeddings.size(-1)),
  319. self.problem.VEHICLE_CAPACITY - state.used_capacity[:, :, None]
  320. ),
  321. -1
  322. )
  323. elif self.is_orienteering or self.is_pctsp:
  324. return torch.cat(
  325. (
  326. torch.gather(
  327. embeddings,
  328. 1,
  329. current_node.contiguous()
  330. .view(batch_size, num_steps, 1)
  331. .expand(batch_size, num_steps, embeddings.size(-1))
  332. ).view(batch_size, num_steps, embeddings.size(-1)),
  333. (
  334. state.get_remaining_length()[:, :, None]
  335. if self.is_orienteering
  336. else state.get_remaining_prize_to_collect()[:, :, None]
  337. )
  338. ),
  339. -1
  340. )
  341. else: # TSP
  342. if num_steps == 1: # We need to special case if we have only 1 step, may be the first or not
  343. if state.i.item() == 0:
  344. # First and only step, ignore prev_a (this is a placeholder)
  345. return self.W_placeholder[None, None, :].expand(batch_size, 1, self.W_placeholder.size(-1))
  346. else:
  347. return embeddings.gather(
  348. 1,
  349. torch.cat((state.first_a, current_node), 1)[:, :, None].expand(batch_size, 2, embeddings.size(-1))
  350. ).view(batch_size, 1, -1)
  351. # More than one step, assume always starting with first
  352. embeddings_per_step = embeddings.gather(
  353. 1,
  354. current_node[:, 1:, None].expand(batch_size, num_steps - 1, embeddings.size(-1))
  355. )
  356. return torch.cat((
  357. # First step placeholder, cat in dim 1 (time steps)
  358. self.W_placeholder[None, None, :].expand(batch_size, 1, self.W_placeholder.size(-1)),
  359. # Second step, concatenate embedding of first with embedding of current/previous (in dim 2, context dim)
  360. torch.cat((
  361. embeddings_per_step[:, 0:1, :].expand(batch_size, num_steps - 1, embeddings.size(-1)),
  362. embeddings_per_step
  363. ), 2)
  364. ), 1)
  365. def _one_to_many_logits(self, query, glimpse_K, glimpse_V, logit_K, mask):
  366. batch_size, num_steps, embed_dim = query.size()
  367. key_size = val_size = embed_dim // self.n_heads
  368. # Compute the glimpse, rearrange dimensions so the dimensions are (n_heads, batch_size, num_steps, 1, key_size)
  369. glimpse_Q = query.view(batch_size, num_steps, self.n_heads, 1, key_size).permute(2, 0, 1, 3, 4)
  370. # Batch matrix multiplication to compute compatibilities (n_heads, batch_size, num_steps, graph_size)
  371. compatibility = torch.matmul(glimpse_Q, glimpse_K.transpose(-2, -1)) / math.sqrt(glimpse_Q.size(-1))
  372. if self.mask_inner:
  373. assert self.mask_logits, "Cannot mask inner without masking logits"
  374. compatibility[mask[None, :, :, None, :].expand_as(compatibility)] = -math.inf
  375. # Batch matrix multiplication to compute heads (n_heads, batch_size, num_steps, val_size)
  376. heads = torch.matmul(torch.softmax(compatibility, dim=-1), glimpse_V)
  377. # Project to get glimpse/updated context node embedding (batch_size, num_steps, embedding_dim)
  378. glimpse = self.project_out(
  379. heads.permute(1, 2, 3, 0, 4).contiguous().view(-1, num_steps, 1, self.n_heads * val_size))
  380. # Now projecting the glimpse is not needed since this can be absorbed into project_out
  381. # final_Q = self.project_glimpse(glimpse)
  382. final_Q = glimpse
  383. # Batch matrix multiplication to compute logits (batch_size, num_steps, graph_size)
  384. # logits = 'compatibility'
  385. logits = torch.matmul(final_Q, logit_K.transpose(-2, -1)).squeeze(-2) / math.sqrt(final_Q.size(-1))
  386. # From the logits compute the probabilities by clipping, masking and softmax
  387. if self.tanh_clipping > 0:
  388. logits = torch.tanh(logits) * self.tanh_clipping
  389. if self.mask_logits:
  390. logits[mask] = -math.inf
  391. return logits, glimpse.squeeze(-2)
  392. def _get_attention_node_data(self, fixed, state):
  393. if self.is_vrp and self.allow_partial:
  394. # Need to provide information of how much each node has already been served
  395. # Clone demands as they are needed by the backprop whereas they are updated later
  396. glimpse_key_step, glimpse_val_step, logit_key_step = \
  397. self.project_node_step(state.demands_with_depot[:, :, :, None].clone()).chunk(3, dim=-1)
  398. # Projection of concatenation is equivalent to addition of projections but this is more efficient
  399. return (
  400. fixed.glimpse_key + self._make_heads(glimpse_key_step),
  401. fixed.glimpse_val + self._make_heads(glimpse_val_step),
  402. fixed.logit_key + logit_key_step,
  403. )
  404. # TSP or VRP without split delivery
  405. return fixed.glimpse_key, fixed.glimpse_val, fixed.logit_key
  406. def _make_heads(self, v, num_steps=None):
  407. assert num_steps is None or v.size(1) == 1 or v.size(1) == num_steps
  408. return (
  409. v.contiguous().view(v.size(0), v.size(1), v.size(2), self.n_heads, -1)
  410. .expand(v.size(0), v.size(1) if num_steps is None else num_steps, v.size(2), self.n_heads, -1)
  411. .permute(3, 0, 1, 2, 4) # (n_heads, batch_size, num_steps, graph_size, head_dim)
  412. )
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...