Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

dqn_learn_ex.py 8.6 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
  1. """
  2. This file is copied/apdated from https://github.com/berkeleydeeprlcourse/homework/tree/master/hw3
  3. """
  4. import sys
  5. import pickle
  6. import numpy as np
  7. from collections import namedtuple
  8. from itertools import count
  9. import random
  10. import gym.spaces
  11. import json
  12. import torch
  13. import torch.autograd as autograd
  14. from utils.replay_buffer import ReplayBuffer
  15. from utils.gym import get_wrapper_by_name
  16. USE_CUDA = torch.cuda.is_available()
  17. print("USE_CUDA=", USE_CUDA)
  18. dtype = torch.cuda.FloatTensor if USE_CUDA else torch.FloatTensor
  19. longType = torch.cuda.LongTensor if USE_CUDA else torch.LongTensor
  20. class Variable(autograd.Variable):
  21. def __init__(self, data, *args, **kwargs):
  22. if USE_CUDA:
  23. data = data.cuda()
  24. super(Variable, self).__init__(data, *args, **kwargs)
  25. """
  26. OptimizerSpec containing following attributes
  27. constructor: The optimizer constructor ex: RMSprop
  28. kwargs: {Dict} arguments for constructing optimizer
  29. """
  30. OptimizerSpec = namedtuple("OptimizerSpec", ["constructor", "kwargs"])
  31. def dqn_learn_ex(
  32. env,
  33. q_func,
  34. optimizer_spec,
  35. exploration,
  36. stopping_criterion=None,
  37. replay_buffer_size=1000000,
  38. batch_size=32,
  39. gamma=0.99,
  40. learning_starts=50000,
  41. learning_freq=4,
  42. frame_history_len=4,
  43. target_update_freq=10000,
  44. save_path=None,
  45. save_freq=100000,
  46. log_every_n_steps=3000,
  47. loss="bellman",
  48. **kwargs # To avoid complaints of unknown keywords
  49. ):
  50. """
  51. Extension of dqn_learn.py
  52. Moved extensions here in order to leave dqn_learn.py clean for grading.
  53. """
  54. assert type(env.observation_space) == gym.spaces.Box
  55. assert type(env.action_space) == gym.spaces.Discrete
  56. ###############
  57. # BUILD MODEL #
  58. ###############
  59. if len(env.observation_space.shape) == 1:
  60. # This means we are running on low-dimensional observations (e.g. RAM)
  61. input_arg = env.observation_space.shape[0]
  62. else:
  63. img_h, img_w, img_c = env.observation_space.shape
  64. input_arg = frame_history_len * img_c
  65. num_actions = env.action_space.n
  66. def to_pytorch(obs, type=dtype, normalize=True):
  67. t = torch.from_numpy(obs).type(type)
  68. if normalize:
  69. return t / 255.0
  70. else:
  71. return t
  72. def to_pytorch_var(x, grad=False, type=dtype, normalize=True):
  73. return Variable(to_pytorch(x, type=type, normalize=normalize), requires_grad=grad)
  74. # Construct an epilson greedy policy with given exploration schedule
  75. def select_epsilon_greedy_action(model, obs, t):
  76. sample = random.random()
  77. eps_threshold = exploration.value(t)
  78. if sample > eps_threshold:
  79. obs = to_pytorch(obs).unsqueeze(0)
  80. # Use volatile = True if variable is only used in inference mode, i.e. don’t save the history
  81. return model(Variable(obs, volatile=True)).data.max(1)[1].cpu()
  82. else:
  83. return torch.IntTensor([[random.randrange(num_actions)]])
  84. # Initialize target q function and q function, i.e. build the model.
  85. ######
  86. # YOUR CODE HERE
  87. print("Input and output size of network:")
  88. print(input_arg, num_actions)
  89. Q = q_func(input_arg, num_actions)
  90. Q_target = q_func(input_arg, num_actions)
  91. bellman_l1_loss = torch.nn.SmoothL1Loss(size_average=False)
  92. if USE_CUDA:
  93. Q = Q.cuda()
  94. Q_target = Q_target.cuda()
  95. def update_q_target():
  96. print("Updating Q_target")
  97. Q_target.load_state_dict(Q.state_dict())
  98. ######
  99. # Construct Q network optimizer function
  100. optimizer = optimizer_spec.constructor(Q.parameters(), **optimizer_spec.kwargs)
  101. replay_buffer = ReplayBuffer(replay_buffer_size, frame_history_len)
  102. start_step = 0
  103. start_episode = 0
  104. Statistic = {
  105. "episode_rewards": []
  106. }
  107. def update_stats(t,episode,reward):
  108. rewards = Statistic["episode_rewards"]
  109. if len(rewards) == 0 or \
  110. rewards[-1][1] < episode:
  111. stat_tuple = (t, episode, reward)
  112. # print("Updating stats with ", stat_tuple)
  113. rewards.append(stat_tuple)
  114. def get_mean_episode_rewards(range):
  115. return np.mean([r for (_,_,r) in Statistic["episode_rewards"][-range:]])
  116. if save_path is not None:
  117. try:
  118. print("Trying to load state from ", save_path)
  119. with open(save_path + ".Q.pkl", 'rb') as f:
  120. Q.load_state_dict(pickle.load(f))
  121. with open(save_path + ".stats.json", 'r') as f:
  122. saved_stats = json.load(f)
  123. Statistic = saved_stats["stats"]
  124. start_step = saved_stats["timestep"]
  125. start_episode = saved_stats["episode"]
  126. except Exception as e:
  127. print("Saved state doesn't exist yet (probably)")
  128. print(e)
  129. update_q_target()
  130. def save_state(t,episode):
  131. """
  132. Saves the current network weights, together with the current time step and statistics, for resuming later.
  133. """
  134. if save_path is not None:
  135. print("Saving state")
  136. with open(save_path + ".Q.pkl", 'wb') as f:
  137. pickle.dump(Q.state_dict(), f, pickle.HIGHEST_PROTOCOL)
  138. with open(save_path + ".stats.json", 'w') as f:
  139. saved_stats = {
  140. "timestep": t,
  141. "episode": episode,
  142. "stats": Statistic
  143. }
  144. json.dump(saved_stats, f)
  145. ###############
  146. # RUN ENV #
  147. ###############
  148. num_param_updates = 0
  149. last_obs = env.reset()
  150. for t in count(start=start_step):
  151. ### 1. Check stopping criterion
  152. if stopping_criterion is not None and stopping_criterion(env):
  153. break
  154. ### 2. Step the env and store the transition
  155. last_frame_idx = replay_buffer.store_frame(last_obs)
  156. enc_last_obs = replay_buffer.encode_recent_observation()
  157. action = select_epsilon_greedy_action(Q, enc_last_obs, t)
  158. new_frame, r, done, _ = env.step(action)
  159. replay_buffer.store_effect(last_frame_idx, action, r, done)
  160. if done:
  161. last_obs = env.reset()
  162. else:
  163. last_obs = new_frame
  164. #####
  165. ### 3. Perform experience replay and train the network.
  166. if (t > learning_starts and
  167. t % learning_freq == 0 and
  168. replay_buffer.can_sample(batch_size)):
  169. optimizer.zero_grad()
  170. obs_batch, act_batch, r_batch, next_obs_batch, done_mask = replay_buffer.sample(batch_size)
  171. Q_val_batch = Q(to_pytorch_var(obs_batch))
  172. Q_target_val_batch = Q_target(to_pytorch_var(next_obs_batch)).detach()
  173. act_batch_var = to_pytorch_var(act_batch, type=longType, normalize=False).unsqueeze(1)
  174. vals_of_actions_taken = Q_val_batch.gather(1, act_batch_var)
  175. Q_target_val_max, _ = Q_target_val_batch.max(1)
  176. Q_target_val_max = Q_target_val_max.unsqueeze(1)
  177. reverse_done_mask = 1 - to_pytorch_var(done_mask, normalize=False).unsqueeze(1)
  178. Q_target_masked_val_max = (reverse_done_mask * Q_target_val_max)
  179. Q_target_discounted = (gamma * Q_target_masked_val_max)
  180. r_batch_var = to_pytorch_var(r_batch, normalize=False).unsqueeze(1)
  181. Q_masked_target = r_batch_var + Q_target_discounted
  182. if loss == 'l1':
  183. bellman_l1_loss(vals_of_actions_taken, Q_masked_target).backward()
  184. else:
  185. bellman_error = Q_masked_target - vals_of_actions_taken
  186. clipped_error = bellman_error.clamp(-1, 1)
  187. vals_of_actions_taken.backward(-clipped_error)
  188. optimizer.step()
  189. num_param_updates += 1
  190. if num_param_updates % target_update_freq == 0:
  191. update_q_target()
  192. ### 4. Log progress and keep track of statistics
  193. episode_rewards = get_wrapper_by_name(env, "Monitor").get_episode_rewards()
  194. episode_count = len(episode_rewards)
  195. total_episodes = start_episode + episode_count
  196. if episode_count > 0:
  197. update_stats(t, total_episodes, episode_rewards[-1])
  198. if t % log_every_n_steps == 0 and t > learning_starts and t > start_step:
  199. print("Timestep %d" % (t,))
  200. print("Episode %d" % (total_episodes,))
  201. print("mean reward (100 episodes) %f" % get_mean_episode_rewards(100))
  202. print("mean reward (10 episodes) %f" % get_mean_episode_rewards(10))
  203. print("exploration %f" % exploration.value(t))
  204. sys.stdout.flush()
  205. if t % save_freq == 0 and t > start_step:
  206. save_state(t,total_episodes)
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...