Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

tree_gam_minimal.py 7.5 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
  1. from copy import deepcopy
  2. import numpy as np
  3. from sklearn.base import BaseEstimator
  4. from sklearn.tree import DecisionTreeRegressor
  5. from sklearn.utils import check_array
  6. from sklearn.utils.multiclass import check_classification_targets
  7. from sklearn.utils.validation import check_X_y, check_is_fitted, _check_sample_weight
  8. from sklearn.model_selection import train_test_split
  9. from sklearn.base import RegressorMixin, ClassifierMixin
  10. from sklearn.metrics import accuracy_score, roc_auc_score
  11. import imodels
  12. class TreeGAMMinimal(BaseEstimator):
  13. """Tree-based GAM classifier.
  14. Uses cyclical boosting to fit a GAM with small trees.
  15. Simplified version of the explainable boosting machine described in https://github.com/interpretml/interpret
  16. Only works for binary classification.
  17. Fits a scalar bias to the mean.
  18. """
  19. def __init__(
  20. self,
  21. n_boosting_rounds=100,
  22. max_leaf_nodes=3,
  23. learning_rate: float = 0.01,
  24. boosting_strategy="cyclic",
  25. validation_frac=0.15,
  26. random_state=None,
  27. ):
  28. """
  29. Params
  30. ------
  31. n_boosting_rounds : int
  32. Number of boosting rounds for the cyclic boosting.
  33. max_leaf_nodes : int
  34. Maximum number of leaf nodes for the trees in the cyclic boosting.
  35. learning_rate: float
  36. Learning rate for the cyclic boosting.
  37. boosting_strategy : str ["cyclic", "greedy"]
  38. Whether to use cyclic boosting (cycle over features) or greedy boosting (select best feature at each step)
  39. validation_frac: float
  40. Fraction of data to use for early stopping.
  41. random_state : int
  42. Random seed.
  43. """
  44. self.n_boosting_rounds = n_boosting_rounds
  45. self.max_leaf_nodes = max_leaf_nodes
  46. self.learning_rate = learning_rate
  47. self.boosting_strategy = boosting_strategy
  48. self.validation_frac = validation_frac
  49. self.random_state = random_state
  50. def fit(self, X, y, sample_weight=None):
  51. X, y = check_X_y(X, y, accept_sparse=False, multi_output=False)
  52. if isinstance(self, ClassifierMixin):
  53. check_classification_targets(y)
  54. self.classes_, y = np.unique(y, return_inverse=True)
  55. sample_weight = _check_sample_weight(sample_weight, X, dtype=None)
  56. # split into train and validation for early stopping
  57. (
  58. X_train,
  59. X_val,
  60. y_train,
  61. y_val,
  62. sample_weight_train,
  63. sample_weight_val,
  64. ) = train_test_split(
  65. X,
  66. y,
  67. sample_weight,
  68. test_size=self.validation_frac,
  69. random_state=self.random_state,
  70. stratify=y if isinstance(self, ClassifierMixin) else None,
  71. )
  72. self.estimators_ = []
  73. self.bias_ = np.mean(y)
  74. self._cyclic_boost(
  75. X_train,
  76. y_train,
  77. sample_weight_train,
  78. X_val,
  79. y_val,
  80. sample_weight_val,
  81. )
  82. self.mse_val_ = self._calc_mse(X_val, y_val, sample_weight_val)
  83. return self
  84. def _cyclic_boost(
  85. self, X_train, y_train, sample_weight_train, X_val, y_val, sample_weight_val
  86. ):
  87. """Apply cyclic boosting, storing trees in self.estimators_"""
  88. residuals_train = y_train - self.predict_proba(X_train)[:, 1]
  89. mse_val = self._calc_mse(X_val, y_val, sample_weight_val)
  90. for _ in range(self.n_boosting_rounds):
  91. boosting_round_ests = []
  92. boosting_round_mses = []
  93. feature_nums = np.arange(X_train.shape[1])
  94. for feature_num in feature_nums:
  95. X_ = np.zeros_like(X_train)
  96. X_[:, feature_num] = X_train[:, feature_num]
  97. est = DecisionTreeRegressor(
  98. max_leaf_nodes=self.max_leaf_nodes,
  99. random_state=self.random_state,
  100. )
  101. est.fit(X_, residuals_train, sample_weight=sample_weight_train)
  102. succesfully_split_on_feature = np.all(
  103. (est.tree_.feature[0] == feature_num) | (
  104. est.tree_.feature[0] == -2)
  105. )
  106. if not succesfully_split_on_feature:
  107. continue
  108. self.estimators_.append(est)
  109. residuals_train_new = (
  110. residuals_train - self.learning_rate * est.predict(X_train)
  111. )
  112. if self.boosting_strategy == "cyclic":
  113. residuals_train = residuals_train_new
  114. elif self.boosting_strategy == "greedy":
  115. mse_train_new = self._calc_mse(
  116. X_train, y_train, sample_weight_train
  117. )
  118. # don't add each estimator for greedy
  119. boosting_round_ests.append(
  120. deepcopy(self.estimators_.pop()))
  121. boosting_round_mses.append(mse_train_new)
  122. if self.boosting_strategy == "greedy":
  123. best_est = boosting_round_ests[np.argmin(boosting_round_mses)]
  124. self.estimators_.append(best_est)
  125. residuals_train = (
  126. residuals_train - self.learning_rate *
  127. best_est.predict(X_train)
  128. )
  129. # early stopping if validation error does not decrease
  130. mse_val_new = self._calc_mse(X_val, y_val, sample_weight_val)
  131. if mse_val_new >= mse_val:
  132. # print("early stop!")
  133. return
  134. else:
  135. mse_val = mse_val_new
  136. def predict_proba(self, X):
  137. X = check_array(X, accept_sparse=False, dtype=None)
  138. check_is_fitted(self)
  139. probs1 = np.ones(X.shape[0]) * self.bias_
  140. for i, est in enumerate(self.estimators_):
  141. probs1 += self.learning_rate * est.predict(X)
  142. probs1 = np.clip(probs1, a_min=0, a_max=1)
  143. return np.array([1 - probs1, probs1]).T
  144. def predict(self, X):
  145. if isinstance(self, RegressorMixin):
  146. return self.predict_proba(X)[:, 1]
  147. elif isinstance(self, ClassifierMixin):
  148. return np.argmax(self.predict_proba(X), axis=1)
  149. def _calc_mse(self, X, y, sample_weight=None):
  150. return np.average(
  151. np.square(y - self.predict_proba(X)[:, 1]),
  152. weights=sample_weight,
  153. )
  154. class TreeGAMMinimalRegressor(TreeGAMMinimal, RegressorMixin):
  155. ...
  156. class TreeGAMMinimalClassifier(TreeGAMMinimal, ClassifierMixin):
  157. ...
  158. if __name__ == "__main__":
  159. X, y, feature_names = imodels.get_clean_dataset("heart")
  160. X, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
  161. gam = TreeGAMMinimalClassifier(
  162. boosting_strategy="cyclic",
  163. random_state=42,
  164. learning_rate=0.1,
  165. max_leaf_nodes=3,
  166. n_boosting_rounds=100,
  167. )
  168. gam.fit(X, y_train)
  169. # check roc auc score
  170. y_pred = gam.predict_proba(X_test)[:, 1]
  171. # print(
  172. # "train roc:",
  173. # roc_auc_score(y_train, gam.predict_proba(X)[:, 1]).round(3),
  174. # )
  175. print(f"test roc: {roc_auc_score(y_test, y_pred):.3f}")
  176. print(f"test acc {accuracy_score(y_test, gam.predict(X_test)):.3f}")
  177. print('\t(imb:', np.mean(y_test).round(3), ')')
  178. # print(
  179. # "accs",
  180. # accuracy_score(y_train, gam.predict(X)).round(3),
  181. # accuracy_score(y_test, gam.predict(X_test)).round(3),
  182. # "imb",
  183. # np.mean(y_train).round(3),
  184. # np.mean(y_test).round(3),
  185. # )
  186. # # print(gam.estimators_)
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...