Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

reproduced_model.py 7.3 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
  1. """
  2. This is the main file for the reproduced model, with no ablation performed.
  3. """
  4. import numpy as np
  5. import pandas as pd
  6. import tensorflow as tf
  7. from keras.layers import Dense
  8. from sklearn.preprocessing import MinMaxScaler
  9. import dagshub
  10. import mlflow
  11. import pickle
  12. mlflow.set_tracking_uri('https://dagshub.com/ML-Purdue/hackathonf23-Stacks.mlflow')
  13. dagshub.init(repo_owner='ML-Purdue', repo_name='hackathonf23-Stacks', mlflow=True)
  14. def get_or_create_experiment_id(name):
  15. exp = mlflow.get_experiment_by_name(name)
  16. if exp is None:
  17. exp_id = mlflow.create_experiment(name)
  18. return exp_id
  19. return exp.experiment_id
  20. class MaxEntIRL:
  21. def __init__(self, state_dim):
  22. self.state_dim = state_dim
  23. self.model = self._create_irl_model()
  24. def _create_irl_model(self):
  25. model = tf.keras.Sequential([
  26. Dense(self.state_dim, input_shape=(self.state_dim,), activation='relu'),
  27. Dense(4096, activation='relu'),
  28. Dense(2048, activation='relu'),
  29. Dense(self.state_dim, activation='linear')
  30. ])
  31. return model
  32. def generateHumanTrajectories(self, num_trajectories, trajectory_length):
  33. human_trajectories = []
  34. for _ in range(num_trajectories):
  35. trajectory = []
  36. state = np.zeros(self.state_dim)
  37. for _ in range(trajectory_length):
  38. direction_probabilities = self._generate_direction_probabilities() # Get direction probabilities
  39. action_coefficients = np.random.choice([-1, 0, 1], p=direction_probabilities)
  40. action = action_coefficients * 0.1
  41. new_state = state + action
  42. trajectory.append((state, action))
  43. state = new_state
  44. human_trajectories.append(trajectory)
  45. return human_trajectories
  46. def _generate_direction_probabilities(self):
  47. probabilities = np.random.dirichlet(np.ones(self.state_dim) * 0.1)
  48. return probabilities
  49. def loadDataset(self, file_path):
  50. data = pd.read_csv(file_path) # Load CSV data
  51. scaler = MinMaxScaler()
  52. columns_to_normalize = ['position x [mm]', 'position y [mm]', 'position z (height) [mm]', 'velocity [mm/s]']
  53. data[columns_to_normalize] = scaler.fit_transform(data[columns_to_normalize])
  54. return data
  55. def train_irl_with_dataset(self, data, lr=0.001, epochs=3):
  56. state_dim = self.state_dim
  57. optimizer = tf.keras.optimizers.Adam(learning_rate=lr)
  58. # Extract relevant columns from the loaded dataset
  59. positions = data[['position x [mm]', 'position y [mm]', 'position z (height) [mm]']].values
  60. velocities = data['velocity [mm/s]'].values
  61. mlflow.tensorflow.autolog()
  62. with mlflow.start_run(experiment_id=get_or_create_experiment_id("Base Reproduction")):
  63. for epoch in range(epochs):
  64. total_loss = 0
  65. state_frequencies = self._calculate_state_frequencies(positions)
  66. for idx in range(len(positions)):
  67. state = positions[idx]
  68. velocity = velocities[idx]
  69. with tf.GradientTape() as tape:
  70. preferences = self.model(state[np.newaxis, :])
  71. prob_human = tf.nn.softmax(preferences)
  72. # Define losses
  73. max_entropy_loss = -tf.reduce_sum(prob_human * tf.math.log(prob_human + 1e-8), axis=1)
  74. alignment_loss = -tf.reduce_sum(state_frequencies * tf.math.log(prob_human + 1e-8), axis=1)
  75. maxent_irl_objective = max_entropy_loss + alignment_loss
  76. # Compute the gradients
  77. grads = tape.gradient(maxent_irl_objective, self.model.trainable_variables)
  78. optimizer.apply_gradients(zip(grads, self.model.trainable_variables))
  79. total_loss += tf.reduce_sum(maxent_irl_objective) # Accumulate the total loss
  80. avg_loss = total_loss / len(positions)
  81. mlflow.log_metric(f"loss", avg_loss, step=epoch)
  82. print(f"Epoch {epoch + 1}/{epochs}, MaxEnt IRL Loss: {avg_loss}")
  83. def train_irl(self, human_trajectories=None, data=None, use_dataset=False, lr=0.001, epochs=3):
  84. if use_dataset and data is not None:
  85. # Train using the loaded dataset
  86. self.train_irl_with_dataset(data, lr=lr, epochs=epochs)
  87. else:
  88. # Train using the generative function
  89. if human_trajectories is None:
  90. human_trajectories = self.generateHumanTrajectories(num_trajectories, trajectory_length)
  91. self._train_irl_generative(human_trajectories, lr=lr, epochs=epochs)
  92. def _train_irl_generative(self, human_trajectories, lr=0.001, epochs=3):
  93. trajectory_length = len(human_trajectories[0])
  94. optimizer = tf.keras.optimizers.Adam(learning_rate=lr)
  95. for epoch in range(epochs):
  96. total_loss = 0
  97. state_frequencies = self._calculate_state_frequencies(human_trajectories, trajectory_length)
  98. for trajectory in human_trajectories:
  99. for state, _ in trajectory:
  100. with tf.GradientTape() as tape:
  101. preferences = self.model(state[np.newaxis, :])
  102. prob_human = tf.nn.softmax(preferences)
  103. # Inside the training loop:
  104. max_entropy_loss = -tf.reduce_sum(prob_human * tf.math.log(prob_human + 1e-8), axis=1)
  105. alignment_loss = -tf.reduce_sum(state_frequencies * tf.math.log(prob_human + 1e-8), axis=1)
  106. maxent_irl_objective = max_entropy_loss + alignment_loss
  107. grads = tape.gradient(maxent_irl_objective, self.model.trainable_variables)
  108. optimizer.apply_gradients(zip(grads, self.model.trainable_variables))
  109. total_loss += maxent_irl_objective
  110. avg_loss = total_loss / (len(human_trajectories) * trajectory_length)
  111. print(f"Epoch {epoch + 1}/{epochs}, MaxEnt IRL Loss: {avg_loss}")
  112. def _calculate_state_frequencies(self, positions):
  113. state_counts = np.sum(positions, axis=0)
  114. state_frequencies = state_counts / (len(positions) * self.state_dim)
  115. return state_frequencies
  116. def save_model(self, file_path):
  117. model_config = self.model.get_config()
  118. with open(file_path, 'wb') as f:
  119. pickle.dump(model_config, f)
  120. @classmethod
  121. def load_model(cls, file_path, state_dim):
  122. with open(file_path, 'rb') as f:
  123. model_config = pickle.load(f)
  124. irl_instance = cls(state_dim)
  125. irl_instance.model = tf.keras.Sequential.from_config(model_config)
  126. return irl_instance
  127. # Indicate test completion status
  128. state_dim = 3 # Dimension of the state space
  129. irl = MaxEntIRL(state_dim)
  130. num_trajectories = 100
  131. trajectory_length = 20
  132. # Load the dataset
  133. file_path = '/Users/vinay/Desktop/Computer_Science_Projects/ReScience/hackathonf23-Stacks/data/train.csv' # Replace with the actual file path
  134. data = irl.loadDataset(file_path)
  135. irl.train_irl(data=data, use_dataset=True, lr=0.001, epochs=3)
  136. irl.save_model('/Users/vinay/Desktop/Computer_Science_Projects/ReScience/hackathonf23-Stacks/models/reproduced_model.pkl')
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...