Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

double_dqn_learn.py 9.8 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
  1. """
  2. This file is copied/apdated from https://github.com/berkeleydeeprlcourse/homework/tree/master/hw3
  3. """
  4. import sys
  5. import pickle
  6. import numpy as np
  7. from collections import namedtuple
  8. from itertools import count
  9. import random
  10. import gym.spaces
  11. import json
  12. import torch
  13. import torch.autograd as autograd
  14. from utils.replay_buffer import ReplayBuffer
  15. from utils.gym import get_wrapper_by_name
  16. USE_CUDA = torch.cuda.is_available()
  17. print("USE_CUDA=", USE_CUDA)
  18. dtype = torch.cuda.FloatTensor if USE_CUDA else torch.FloatTensor
  19. longType = torch.cuda.LongTensor if USE_CUDA else torch.LongTensor
  20. class Variable(autograd.Variable):
  21. def __init__(self, data, *args, **kwargs):
  22. if USE_CUDA:
  23. data = data.cuda()
  24. super(Variable, self).__init__(data, *args, **kwargs)
  25. """
  26. OptimizerSpec containing following attributes
  27. constructor: The optimizer constructor ex: RMSprop
  28. kwargs: {Dict} arguments for constructing optimizer
  29. """
  30. OptimizerSpec = namedtuple("OptimizerSpec", ["constructor", "kwargs"])
  31. def double_dqn_learning(
  32. env,
  33. q_func,
  34. optimizer_spec,
  35. exploration,
  36. stopping_criterion=None,
  37. replay_buffer_size=1000000,
  38. batch_size=32,
  39. gamma=0.99,
  40. learning_starts=50000,
  41. learning_freq=4,
  42. frame_history_len=4,
  43. target_update_freq=10000,
  44. save_path=None,
  45. save_freq=100000,
  46. log_every_n_steps=3000,
  47. loss="bellman",
  48. **kwargs # To avoid complaints of unknown keywords
  49. ):
  50. """
  51. Similar to dqn_learn.py, but implements double DQN learning instead of normal DQN:
  52. https://arxiv.org/abs/1509.06461.pdf
  53. """
  54. assert type(env.observation_space) == gym.spaces.Box
  55. assert type(env.action_space) == gym.spaces.Discrete
  56. ###############
  57. # BUILD MODEL #
  58. ###############
  59. if len(env.observation_space.shape) == 1:
  60. # This means we are running on low-dimensional observations (e.g. RAM)
  61. input_arg = env.observation_space.shape[0]
  62. else:
  63. img_h, img_w, img_c = env.observation_space.shape
  64. input_arg = frame_history_len * img_c
  65. num_actions = env.action_space.n
  66. def to_pytorch(obs, type=dtype, normalize=True):
  67. t = torch.from_numpy(obs).type(type)
  68. if normalize:
  69. return t / 255.0
  70. else:
  71. return t
  72. def to_pytorch_var(x, grad=False, type=dtype, normalize=True):
  73. return Variable(to_pytorch(x, type=type, normalize=normalize), requires_grad=grad)
  74. # Construct an epilson greedy policy with given exploration schedule
  75. def select_epsilon_greedy_action(model, obs, t):
  76. sample = random.random()
  77. eps_threshold = exploration.value(t)
  78. if sample > eps_threshold:
  79. obs = to_pytorch(obs).unsqueeze(0)
  80. # Use volatile = True if variable is only used in inference mode, i.e. don’t save the history
  81. return model(Variable(obs, volatile=True)).data.max(1)[1].cpu()
  82. else:
  83. return torch.IntTensor([[random.randrange(num_actions)]])
  84. # Initialize target q function and q function, i.e. build the model.
  85. ######
  86. # YOUR CODE HERE
  87. print("Input and output size of network:")
  88. print(input_arg, num_actions)
  89. Q = q_func(input_arg, num_actions)
  90. Q_target = q_func(input_arg, num_actions)
  91. bellman_l1_loss = torch.nn.SmoothL1Loss(size_average=False)
  92. if USE_CUDA:
  93. Q = Q.cuda()
  94. Q_target = Q_target.cuda()
  95. Q_target.load_state_dict(Q.state_dict())
  96. def switch_Q_functions():
  97. print("Switching Q functions")
  98. q_state_dict = Q.state_dict()
  99. q_target_state_dict = Q_target.state_dict()
  100. Q.load_state_dict(q_target_state_dict)
  101. Q_target.load_state_dict(q_state_dict)
  102. ######
  103. # Construct Q network optimizer function
  104. optimizer = optimizer_spec.constructor(Q.parameters(), **optimizer_spec.kwargs)
  105. replay_buffer = ReplayBuffer(replay_buffer_size, frame_history_len)
  106. start_step = 0
  107. start_episode = 0
  108. Statistic = {
  109. "episode_rewards": []
  110. }
  111. def update_stats(t,episode,reward):
  112. rewards = Statistic["episode_rewards"]
  113. if len(rewards) == 0 or \
  114. rewards[-1][1] < episode:
  115. stat_tuple = (t, episode, reward)
  116. # print("Updating stats with ", stat_tuple)
  117. rewards.append(stat_tuple)
  118. def get_mean_episode_rewards(range):
  119. return np.mean([r for (_,_,r) in Statistic["episode_rewards"][-range:]])
  120. if save_path is not None:
  121. try:
  122. print("Trying to load state from ", save_path)
  123. with open(save_path + ".Q.pkl", 'rb') as f:
  124. Q.load_state_dict(pickle.load(f))
  125. with open(save_path + ".Q_target.pkl", 'rb') as f:
  126. Q_target.load_state_dict(pickle.load(f))
  127. with open(save_path + ".stats.json", 'r') as f:
  128. saved_stats = json.load(f)
  129. Statistic = saved_stats["stats"]
  130. start_step = saved_stats["timestep"]
  131. start_episode = saved_stats["episode"]
  132. except Exception as e:
  133. print("Saved state doesn't exist yet (probably)")
  134. print(e)
  135. def save_state(t,episode):
  136. """
  137. Saves the current stable network weights, together with the current time step and statistics, for resuming later.
  138. """
  139. if save_path is not None:
  140. print("Saving state")
  141. with open(save_path + ".Q.pkl", 'wb') as f:
  142. pickle.dump(Q.state_dict(), f, pickle.HIGHEST_PROTOCOL)
  143. with open(save_path + ".Q_target.pkl", 'wb') as f:
  144. pickle.dump(Q.state_dict(), f, pickle.HIGHEST_PROTOCOL)
  145. with open(save_path + ".stats.json", 'w') as f:
  146. saved_stats = {
  147. "timestep": t,
  148. "episode": episode,
  149. "stats": Statistic
  150. }
  151. json.dump(saved_stats, f)
  152. ###############
  153. # RUN ENV #
  154. ###############
  155. num_param_updates = 0
  156. last_obs = env.reset()
  157. for t in count(start=start_step):
  158. ### 1. Check stopping criterion
  159. if stopping_criterion is not None and stopping_criterion(env):
  160. break
  161. ### 2. Step the env and store the transition
  162. last_frame_idx = replay_buffer.store_frame(last_obs)
  163. enc_last_obs = replay_buffer.encode_recent_observation()
  164. action = select_epsilon_greedy_action(Q, enc_last_obs, t)
  165. new_frame, r, done, _ = env.step(action)
  166. replay_buffer.store_effect(last_frame_idx, action, r, done)
  167. if done:
  168. last_obs = env.reset()
  169. else:
  170. last_obs = new_frame
  171. #####
  172. ### 3. Perform experience replay and train the network.
  173. if (t > learning_starts and
  174. t % learning_freq == 0 and
  175. replay_buffer.can_sample(batch_size)):
  176. optimizer.zero_grad()
  177. obs_batch, act_batch, r_batch, next_obs_batch, done_mask = replay_buffer.sample(batch_size)
  178. Q_val_batch = Q(to_pytorch_var(obs_batch))
  179. Q_target_val_batch = Q_target(to_pytorch_var(next_obs_batch)).detach()
  180. # The following code will take only one cell from each vector of the Q_val_batch tensor.
  181. # Each vector corresponds to a single output of the Q net, and each cell corresponds to a single action.
  182. # This means we take only the cells of the actions that we actually took, since all others are irrelevant
  183. # when calculating the loss.
  184. act_batch_var = to_pytorch_var(act_batch, type=longType, normalize=False).unsqueeze(1)
  185. vals_of_actions_taken = Q_val_batch.gather(1, act_batch_var)
  186. # Here is the difference from normal DQN -
  187. # we select the maximal value action according to Q, and evaluate it according to Q_target
  188. Q_next_val_batch = Q(to_pytorch_var(next_obs_batch)).detach()
  189. _, Q_next_val_max_action = Q_next_val_batch.max(1)
  190. Q_next_val_max_action = Q_next_val_max_action.unsqueeze(1)
  191. Q_target_next_val_estimate = Q_target_val_batch.gather(1, Q_next_val_max_action)
  192. reverse_done_mask = 1 - to_pytorch_var(done_mask, normalize=False).unsqueeze(1)
  193. Q_target_masked_next_val_estimate = (reverse_done_mask * Q_target_next_val_estimate)
  194. Q_target_discounted = (gamma * Q_target_masked_next_val_estimate)
  195. r_batch_var = to_pytorch_var(r_batch, normalize=False).unsqueeze(1)
  196. Q_target_vals = r_batch_var + Q_target_discounted
  197. if loss == 'l1':
  198. bellman_l1_loss(vals_of_actions_taken, Q_target_vals).backward()
  199. else:
  200. bellman_error = Q_target_vals - vals_of_actions_taken
  201. clipped_error = bellman_error.clamp(-1, 1)
  202. vals_of_actions_taken.backward(-clipped_error)
  203. optimizer.step()
  204. num_param_updates += 1
  205. if num_param_updates % target_update_freq == 0:
  206. switch_Q_functions()
  207. ### 4. Log progress and keep track of statistics
  208. episode_rewards = get_wrapper_by_name(env, "Monitor").get_episode_rewards()
  209. episode_count = len(episode_rewards)
  210. total_episodes = start_episode + episode_count
  211. if episode_count > 0:
  212. update_stats(t, total_episodes, episode_rewards[-1])
  213. if t % log_every_n_steps == 0 and t > learning_starts and t > start_step:
  214. print("Timestep %d" % (t,))
  215. print("Episode %d" % (total_episodes,))
  216. print("mean reward (100 episodes) %f" % get_mean_episode_rewards(100))
  217. print("mean reward (10 episodes) %f" % get_mean_episode_rewards(10))
  218. print("exploration %f" % exploration.value(t))
  219. sys.stdout.flush()
  220. if t % save_freq == 0 and t > start_step:
  221. save_state(t,total_episodes)
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...