Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

run.py 5.9 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
  1. #!/usr/bin/env python
  2. import os
  3. import json
  4. import pprint as pp
  5. import torch
  6. import torch.optim as optim
  7. from tensorboard_logger import Logger as TbLogger
  8. from nets.critic_network import CriticNetwork
  9. from options import get_options
  10. from train import train_epoch, validate, get_inner_model
  11. from reinforce_baselines import NoBaseline, ExponentialBaseline, CriticBaseline, RolloutBaseline, WarmupBaseline
  12. from nets.attention_model import AttentionModel
  13. from nets.pointer_network import PointerNetwork, CriticNetworkLSTM
  14. from utils import torch_load_cpu, load_problem
  15. def run(opts):
  16. # Pretty print the run args
  17. pp.pprint(vars(opts))
  18. # Set the random seed
  19. torch.manual_seed(opts.seed)
  20. # Optionally configure tensorboard
  21. tb_logger = None
  22. if not opts.no_tensorboard:
  23. tb_logger = TbLogger(os.path.join(opts.log_dir, "{}_{}".format(opts.problem, opts.graph_size), opts.run_name))
  24. os.makedirs(opts.save_dir)
  25. # Save arguments so exact configuration can always be found
  26. with open(os.path.join(opts.save_dir, "args.json"), 'w') as f:
  27. json.dump(vars(opts), f, indent=True)
  28. # Set the device
  29. opts.device = torch.device("cuda:0" if opts.use_cuda else "cpu")
  30. # Figure out what's the problem
  31. problem = load_problem(opts.problem)
  32. # Load data from load_path
  33. load_data = {}
  34. assert opts.load_path is None or opts.resume is None, "Only one of load path and resume can be given"
  35. load_path = opts.load_path if opts.load_path is not None else opts.resume
  36. if load_path is not None:
  37. print(' [*] Loading data from {}'.format(load_path))
  38. load_data = torch_load_cpu(load_path)
  39. # Initialize model
  40. model_class = {
  41. 'attention': AttentionModel,
  42. 'pointer': PointerNetwork
  43. }.get(opts.model, None)
  44. assert model_class is not None, "Unknown model: {}".format(model_class)
  45. model = model_class(
  46. opts.embedding_dim,
  47. opts.hidden_dim,
  48. problem,
  49. n_encode_layers=opts.n_encode_layers,
  50. mask_inner=True,
  51. mask_logits=True,
  52. normalization=opts.normalization,
  53. tanh_clipping=opts.tanh_clipping,
  54. checkpoint_encoder=opts.checkpoint_encoder,
  55. shrink_size=opts.shrink_size
  56. ).to(opts.device)
  57. if opts.use_cuda and torch.cuda.device_count() > 1:
  58. model = torch.nn.DataParallel(model)
  59. # Overwrite model parameters by parameters to load
  60. model_ = get_inner_model(model)
  61. model_.load_state_dict({**model_.state_dict(), **load_data.get('model', {})})
  62. # Initialize baseline
  63. if opts.baseline == 'exponential':
  64. baseline = ExponentialBaseline(opts.exp_beta)
  65. elif opts.baseline == 'critic' or opts.baseline == 'critic_lstm':
  66. assert problem.NAME == 'tsp', "Critic only supported for TSP"
  67. baseline = CriticBaseline(
  68. (
  69. CriticNetworkLSTM(
  70. 2,
  71. opts.embedding_dim,
  72. opts.hidden_dim,
  73. opts.n_encode_layers,
  74. opts.tanh_clipping
  75. )
  76. if opts.baseline == 'critic_lstm'
  77. else
  78. CriticNetwork(
  79. 2,
  80. opts.embedding_dim,
  81. opts.hidden_dim,
  82. opts.n_encode_layers,
  83. opts.normalization
  84. )
  85. ).to(opts.device)
  86. )
  87. elif opts.baseline == 'rollout':
  88. baseline = RolloutBaseline(model, problem, opts)
  89. else:
  90. assert opts.baseline is None, "Unknown baseline: {}".format(opts.baseline)
  91. baseline = NoBaseline()
  92. if opts.bl_warmup_epochs > 0:
  93. baseline = WarmupBaseline(baseline, opts.bl_warmup_epochs, warmup_exp_beta=opts.exp_beta)
  94. # Load baseline from data, make sure script is called with same type of baseline
  95. if 'baseline' in load_data:
  96. baseline.load_state_dict(load_data['baseline'])
  97. # Initialize optimizer
  98. optimizer = optim.Adam(
  99. [{'params': model.parameters(), 'lr': opts.lr_model}]
  100. + (
  101. [{'params': baseline.get_learnable_parameters(), 'lr': opts.lr_critic}]
  102. if len(baseline.get_learnable_parameters()) > 0
  103. else []
  104. )
  105. )
  106. # Load optimizer state
  107. if 'optimizer' in load_data:
  108. optimizer.load_state_dict(load_data['optimizer'])
  109. for state in optimizer.state.values():
  110. for k, v in state.items():
  111. # if isinstance(v, torch.Tensor):
  112. if torch.is_tensor(v):
  113. state[k] = v.to(opts.device)
  114. # Initialize learning rate scheduler, decay by lr_decay once per epoch!
  115. lr_scheduler = optim.lr_scheduler.LambdaLR(optimizer, lambda epoch: opts.lr_decay ** epoch)
  116. # Start the actual training loop
  117. val_dataset = problem.make_dataset(
  118. size=opts.graph_size, num_samples=opts.val_size, filename=opts.val_dataset, distribution=opts.data_distribution)
  119. if opts.resume:
  120. epoch_resume = int(os.path.splitext(os.path.split(opts.resume)[-1])[0].split("-")[1])
  121. torch.set_rng_state(load_data['rng_state'])
  122. if opts.use_cuda:
  123. torch.cuda.set_rng_state_all(load_data['cuda_rng_state'])
  124. # Set the random states
  125. # Dumping of state was done before epoch callback, so do that now (model is loaded)
  126. baseline.epoch_callback(model, epoch_resume)
  127. print("Resuming after {}".format(epoch_resume))
  128. opts.epoch_start = epoch_resume + 1
  129. if opts.eval_only:
  130. validate(model, val_dataset, opts)
  131. else:
  132. for epoch in range(opts.epoch_start, opts.epoch_start + opts.n_epochs):
  133. train_epoch(
  134. model,
  135. optimizer,
  136. baseline,
  137. lr_scheduler,
  138. epoch,
  139. val_dataset,
  140. problem,
  141. tb_logger,
  142. opts
  143. )
  144. if __name__ == "__main__":
  145. run(get_options())
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...