Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

rmv_discount_local.py 7.1 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
  1. """
  2. This is the second ablation study, of removing the discount factor during testing.
  3. """
  4. import numpy as np
  5. import pandas as pd
  6. import tensorflow as tf
  7. from keras.layers import Dense
  8. from sklearn.preprocessing import MinMaxScaler
  9. import dagshub
  10. import mlflow
  11. import mlflow.keras
  12. import pickle
  13. mlflow.set_tracking_uri('https://dagshub.com/ML-Purdue/hackathonf23-Stacks.mlflow')
  14. dagshub.init(repo_owner='ML-Purdue', repo_name='hackathonf23-Stacks', mlflow=True)
  15. def get_or_create_experiment_id(name):
  16. exp = mlflow.get_experiment_by_name(name)
  17. if exp is None:
  18. exp_id = mlflow.create_experiment(name)
  19. return exp_id
  20. return exp.experiment_id
  21. class MaxEntIRL:
  22. def __init__(self, state_dim):
  23. self.state_dim = state_dim
  24. self.model = self._create_irl_model()
  25. def _create_irl_model(self):
  26. model = tf.keras.Sequential([
  27. Dense(self.state_dim, input_shape=(self.state_dim,), activation='relu'),
  28. Dense(4096, activation='relu'),
  29. Dense(2048, activation='relu'),
  30. Dense(self.state_dim, activation='linear')
  31. ])
  32. return model
  33. def generateHumanTrajectories(self, num_trajectories, trajectory_length):
  34. human_trajectories = []
  35. for _ in range(num_trajectories):
  36. trajectory = []
  37. state = np.zeros(self.state_dim)
  38. for _ in range(trajectory_length):
  39. direction_probabilities = self._generate_direction_probabilities() # Get direction probabilities
  40. action_coefficients = np.random.choice([-1, 0, 1], p=direction_probabilities)
  41. action = action_coefficients * 0.1
  42. new_state = state + action
  43. trajectory.append((state, action))
  44. state = new_state
  45. human_trajectories.append(trajectory)
  46. return human_trajectories
  47. def _generate_direction_probabilities(self):
  48. probabilities = np.random.dirichlet(np.ones(self.state_dim) * 0.1)
  49. return probabilities
  50. def loadDataset(self, file_path):
  51. data = pd.read_csv(file_path) # Load CSV data
  52. scaler = MinMaxScaler()
  53. columns_to_normalize = ['position x [mm]', 'position y [mm]', 'position z (height) [mm]', 'velocity [mm/s]']
  54. data[columns_to_normalize] = scaler.fit_transform(data[columns_to_normalize])
  55. return data
  56. def train_irl_with_dataset(self, data, lr=0.001, epochs=3):
  57. state_dim = self.state_dim
  58. optimizer = tf.keras.optimizers.Adam(learning_rate=lr)
  59. positions = data[['position x [mm]', 'position y [mm]', 'position z (height) [mm]']].values
  60. velocities = data['velocity [mm/s]'].values
  61. mlflow.tensorflow.autolog()
  62. with mlflow.start_run(experiment_id=get_or_create_experiment_id("Ablation Study 2: Removed Discount Factor")):
  63. for epoch in range(epochs):
  64. total_loss = 0
  65. state_frequencies = self._calculate_state_frequencies(positions)
  66. for idx in range(len(positions)):
  67. state = positions[idx]
  68. velocity = velocities[idx]
  69. with tf.GradientTape() as tape:
  70. preferences = self.model(state[np.newaxis, :])
  71. prob_human = tf.nn.softmax(preferences)
  72. # Define losses excluding the discount factor
  73. alignment_loss = -tf.reduce_sum(state_frequencies * tf.math.log(prob_human + 1e-8), axis=1)
  74. # Compute the gradients
  75. grads = tape.gradient(alignment_loss, self.model.trainable_variables)
  76. optimizer.apply_gradients(zip(grads, self.model.trainable_variables))
  77. total_loss += tf.reduce_sum(alignment_loss) # Accumulate the total loss
  78. avg_loss = total_loss / len(positions)
  79. mlflow.log_metric(f"loss", avg_loss, step=epoch)
  80. print(f"Epoch {epoch + 1}/{epochs}, IRL Loss without Discount Factor: {avg_loss}")
  81. def train_irl(self, human_trajectories=None, data=None, use_dataset=False, lr=0.001, epochs=3):
  82. if use_dataset and data is not None:
  83. # Train using the loaded dataset
  84. self.train_irl_with_dataset(data, lr=lr, epochs=epochs)
  85. else:
  86. # Train using the generative function
  87. if human_trajectories is None:
  88. human_trajectories = self.generateHumanTrajectories(num_trajectories, trajectory_length)
  89. self._train_irl_generative(human_trajectories, lr=lr, epochs=epochs)
  90. def _train_irl_generative(self, human_trajectories, lr=0.001, epochs=3):
  91. trajectory_length = len(human_trajectories[0])
  92. optimizer = tf.keras.optimizers.Adam(learning_rate=lr)
  93. for epoch in range(epochs):
  94. total_loss = 0
  95. state_frequencies = self._calculate_state_frequencies(human_trajectories, trajectory_length)
  96. for trajectory in human_trajectories:
  97. for state, _ in trajectory:
  98. with tf.GradientTape() as tape:
  99. preferences = self.model(state[np.newaxis, :])
  100. prob_human = tf.nn.softmax(preferences)
  101. # Inside the training loop:
  102. max_entropy_loss = -tf.reduce_sum(prob_human * tf.math.log(prob_human + 1e-8), axis=1)
  103. alignment_loss = -tf.reduce_sum(state_frequencies * tf.math.log(prob_human + 1e-8), axis=1)
  104. maxent_irl_objective = max_entropy_loss + alignment_loss
  105. grads = tape.gradient(maxent_irl_objective, self.model.trainable_variables)
  106. optimizer.apply_gradients(zip(grads, self.model.trainable_variables))
  107. total_loss += maxent_irl_objective
  108. avg_loss = total_loss / (len(human_trajectories) * trajectory_length)
  109. print(f"Epoch {epoch + 1}/{epochs}, MaxEnt IRL Loss: {avg_loss}")
  110. def _calculate_state_frequencies(self, positions):
  111. state_counts = np.sum(positions, axis=0)
  112. state_frequencies = state_counts / (len(positions) * self.state_dim)
  113. return state_frequencies
  114. def save_model(self, file_path):
  115. model_config = self.model.get_config()
  116. with open(file_path, 'wb') as f:
  117. pickle.dump(model_config, f)
  118. @classmethod
  119. def load_model(cls, file_path, state_dim):
  120. with open(file_path, 'rb') as f:
  121. model_config = pickle.load(f)
  122. irl_instance = cls(state_dim)
  123. irl_instance.model = tf.keras.Sequential.from_config(model_config)
  124. return irl_instance
  125. # Indicate test completion status
  126. state_dim = 3 # Dimension of the state space
  127. irl = MaxEntIRL(state_dim)
  128. num_trajectories = 100
  129. trajectory_length = 20
  130. # Load the dataset
  131. file_path = '/Users/vinay/Desktop/Computer_Science_Projects/ReScience/hackathonf23-Stacks/data/train.csv' # Replace with the actual file path
  132. data = irl.loadDataset(file_path)
  133. irl.train_irl(data=data, use_dataset=True, lr=0.001, epochs=3)
  134. irl.save_model('/Users/vinay/Desktop/Computer_Science_Projects/ReScience/hackathonf23-Stacks/models/rmv_discount_model.pkl')
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...