Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

tao.py 18 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
  1. import random
  2. from copy import deepcopy
  3. from queue import deque
  4. import numpy as np
  5. from mlxtend.classifier import LogisticRegression
  6. from sklearn import datasets
  7. from sklearn.base import BaseEstimator, RegressorMixin, ClassifierMixin
  8. from sklearn.linear_model import LinearRegression
  9. from sklearn.metrics import get_scorer
  10. from sklearn.model_selection import train_test_split
  11. from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor, export_text
  12. from sklearn.utils import check_X_y
  13. from imodels.util.arguments import check_fit_arguments
  14. class TaoTree(BaseEstimator):
  15. def __init__(self, model_type: str = 'CART',
  16. n_iters: int = 20,
  17. model_args: dict = {'max_leaf_nodes': 15},
  18. randomize_tree=False,
  19. update_scoring='accuracy',
  20. min_node_samples_tao=3,
  21. min_leaf_samples_tao=2,
  22. node_model='stump',
  23. node_model_args: dict = {},
  24. reg_param: float = 1e-3,
  25. weight_errors: bool = False,
  26. verbose: int = 0,
  27. ):
  28. """TAO: Alternating optimization of decision trees, with application to learning sparse oblique trees (Neurips 2018)
  29. https://proceedings.neurips.cc/paper/2018/hash/185c29dc24325934ee377cfda20e414c-Abstract.html
  30. Note: this implementation learns single-feature splits rather than oblique trees.
  31. Currently supports
  32. - given a CART tree, posthoc improve it with TAO
  33. - also works with HSTreeCV
  34. Todo
  35. - update bottom to top otherwise input points don't get updated
  36. - update leaf nodes
  37. - support regression
  38. - support FIGS
  39. - support error-weighting
  40. - support oblique trees
  41. - support generic models at decision node
  42. - support pruning (e.g. if weights -> 0, then remove a node)
  43. - support classifiers in leaves
  44. Parameters
  45. ----------
  46. model_type: str
  47. 'CART' or 'FIGS'
  48. n_iters
  49. Number of iterations to run TAO
  50. model_args
  51. Arguments to pass to the model
  52. randomize_tree
  53. Whether to randomize the tree before each iteration
  54. min_node_samples_tao: int
  55. Minimum number of samples in a node to apply tao
  56. min_leaf_samples_tao: int
  57. node_model: str
  58. 'stump' or 'linear'
  59. reg_param
  60. Regularization parameter for node-wise linear model (if node_model is 'linear')
  61. verbose: int
  62. Verbosity level
  63. """
  64. super().__init__()
  65. self.model_type = model_type
  66. self.n_iters = n_iters
  67. self.model_args = model_args
  68. self.randomize_tree = randomize_tree
  69. self.update_scoring = update_scoring
  70. self.min_node_samples_tao = min_node_samples_tao
  71. self.min_leaf_samples_tao = min_leaf_samples_tao
  72. self.node_model = node_model
  73. self.node_model_args = node_model_args
  74. self.reg_param = reg_param
  75. self.weight_errors = weight_errors
  76. self.verbose = verbose
  77. self._init_prediction_task() # decides between regressor and classifier
  78. def _init_prediction_task(self):
  79. """
  80. TaoRegressor and TaoClassifier override this method
  81. to alter the prediction task. When using this class directly,
  82. it is equivalent to SuperCARTRegressor
  83. """
  84. self.prediction_task = 'classification'
  85. def fit(self, X, y=None, feature_names=None, sample_weight=None):
  86. """
  87. Params
  88. ------
  89. _sample_weight: array-like of shape (n_samples,), default=None
  90. Sample weights. If None, then samples are equally weighted.
  91. Splits that would create child nodes with net zero or negative weight
  92. are ignored while searching for a split in each node.
  93. """
  94. X, y, feature_names = check_fit_arguments(self, X, y, feature_names)
  95. if isinstance(self, RegressorMixin):
  96. raise Warning('TAO Regression is not yet tested')
  97. X, y = check_X_y(X, y)
  98. y = y.astype(float)
  99. if feature_names is not None:
  100. self.feature_names_ = feature_names
  101. if self.model_type == 'CART':
  102. if isinstance(self, ClassifierMixin):
  103. self.model = DecisionTreeClassifier(**self.model_args)
  104. elif isinstance(self, RegressorMixin):
  105. self.model = DecisionTreeRegressor(**self.model_args)
  106. self.model.fit(X, y, sample_weight=sample_weight)
  107. if self.verbose:
  108. print(export_text(self.model))
  109. # plot_tree(self.model)
  110. # plt.savefig('/Users/chandan/Desktop/tree.png', dpi=300)
  111. # plt.show()
  112. if self.randomize_tree:
  113. np.random.shuffle(self.model.tree_.feature) # shuffle CART features
  114. # np.random.shuffle(self.model.tree_.threshold)
  115. for i in range(self.model.tree_.node_count): # split on feature medians
  116. self.model.tree_.threshold[i] = np.median(
  117. X[:, self.model.tree_.feature[i]])
  118. if self.verbose:
  119. print('starting score', self.model.score(X, y))
  120. for i in range(self.n_iters):
  121. num_updates = self._tao_iter_cart(X, y, self.model.tree_, sample_weight=sample_weight)
  122. if num_updates == 0:
  123. break
  124. return self
  125. def _tao_iter_cart(self, X, y, tree, X_score=None, y_score=None, sample_weight=None):
  126. """Updates tree by applying the tao algorithm to the tree
  127. Params
  128. ------
  129. X: array-like of shape (n_samples, n_features)
  130. The input samples.
  131. y: array-like of shape (n_samples,)
  132. The target values.
  133. model: DecisionTreeClassifier.tree_ or DecisionTreeRegressor.tree_
  134. The model to be post-hoc improved
  135. """
  136. # Tree properties
  137. children_left = tree.children_left
  138. children_right = tree.children_right
  139. feature = tree.feature
  140. threshold = tree.threshold
  141. value = tree.value
  142. # For each node, store the path to that node #######################################################
  143. indexes_with_prefix_paths = [] # data structure with (index, path_to_node_index)
  144. # e.g. if if node 3 is the left child of node 1 which is the right child of node 0
  145. # then we get (3, [(0, R), (1, L)])
  146. # start with the root node id (0) and its depth (0)
  147. queue = deque()
  148. queue.append((0, []))
  149. while len(queue) > 0:
  150. node_id, path_to_node_index = queue.popleft()
  151. indexes_with_prefix_paths.append((node_id, path_to_node_index))
  152. # If a split node, append left and right children and depth to queue
  153. if children_left[node_id] != children_right[node_id]:
  154. queue.append((children_left[node_id], path_to_node_index + [(node_id, 'L')]))
  155. queue.append((children_right[node_id], path_to_node_index + [(node_id, 'R')]))
  156. # print(indexes_with_prefix_paths)
  157. num_updates = 0
  158. # Reversing BFS queue presents nodes bottom -> top one level at a time
  159. for (node_id, path_to_node_index) in reversed(indexes_with_prefix_paths):
  160. # For each each node, try a TAO update
  161. # print('node_id', node_id, path_to_node_index)
  162. # Compute the points being input to the node ######################################
  163. def filter_points_by_path(X, y, path_to_node_index):
  164. """Returns the points in X that are in the path to the node"""
  165. for node_id, direction in path_to_node_index:
  166. idxs = X[:, feature[node_id]] <= threshold[node_id]
  167. if direction == 'R':
  168. idxs = ~idxs
  169. # print('idxs', idxs.size, idxs.sum())
  170. X = X[idxs]
  171. y = y[idxs]
  172. return X, y
  173. X_node, y_node = filter_points_by_path(X, y, path_to_node_index)
  174. if sample_weight is not None:
  175. sample_weight_node = filter_points_by_path(X, sample_weight, path_to_node_index)[1]
  176. else:
  177. sample_weight_node = np.ones(y_node.size)
  178. # Skip over leaf nodes and nodes with too few samples ######################################
  179. if children_left[node_id] == children_right[node_id]: # is leaf node
  180. if isinstance(self, RegressorMixin) and X_node.shape[0] >= self.min_leaf_samples_tao:
  181. # old_score = self.model.score(X, y)
  182. value[node_id] = np.mean(y_node)
  183. """
  184. new_score = self.model.score(X, y)
  185. if new_score > old_score:
  186. print(f'\tLeaf improved score from {old_score:0.3f} to {new_score:0.3f}')
  187. if new_score < old_score:
  188. print(f'\tLeaf reduced score from {old_score:0.3f} to {new_score:0.3f}')
  189. # raise ValueError('Leaf update reduced score')
  190. """
  191. # print('\tshapes', X_node.shape, y_node.shape)
  192. # print('\tvals:', value[node_id][0][0], np.mean(y_node))
  193. # assert value[node_id][0][0] == np.mean(y_node), 'unless tree changed, vals should be leaf means'
  194. continue
  195. elif X_node.shape[0] < self.min_node_samples_tao:
  196. continue
  197. # Compute the outputs for these points if they go left or right ######################################
  198. def predict_from_node(X, node_id):
  199. """Returns predictions for X starting at node node_id"""
  200. def predict_from_node(x, node_id):
  201. """Returns predictions for x starting at node node_id"""
  202. if children_left[node_id] == children_right[node_id]:
  203. if isinstance(self, RegressorMixin):
  204. return value[node_id]
  205. if isinstance(self, ClassifierMixin):
  206. return np.argmax(value[node_id]) # note value stores counts for each class
  207. if x[feature[node_id]] <= threshold[node_id]:
  208. return predict_from_node(x, children_left[node_id])
  209. else:
  210. return predict_from_node(x, children_right[node_id])
  211. preds = np.zeros(X.shape[0])
  212. for i in range(X.shape[0]):
  213. preds[i] = predict_from_node(X[i], node_id)
  214. return preds
  215. y_node_left = predict_from_node(X_node, children_left[node_id])
  216. y_node_right = predict_from_node(X_node, children_right[node_id])
  217. if node_id == 0: # root node
  218. assert np.all(np.logical_or(self.model.predict(X_node) == y_node_left,
  219. self.model.predict(
  220. X_node) == y_node_right)), \
  221. 'actual predictions should match either predict_from_node left or right'
  222. # Decide on prediction target (want to go left (0) / right (1) when advantageous)
  223. # TAO paper binarizes these (e.g. predict 0 or 1 depending on which of these is better)
  224. y_node_absolute_errors = np.abs(np.vstack((y_node - y_node_left,
  225. y_node - y_node_right))).T
  226. # screen out indexes where going left/right has no effect
  227. idxs_relevant = y_node_absolute_errors[:, 0] != y_node_absolute_errors[:, 1]
  228. if idxs_relevant.sum() <= 1: # nothing to change
  229. if self.verbose:
  230. print('no errors to change')
  231. continue
  232. # assert np.all((self.model.predict(X) != y)[idxs_relevant]), 'relevant indexes should be errors'
  233. y_node_target = np.argmin(y_node_absolute_errors, axis=1)
  234. y_node_target = y_node_target[idxs_relevant]
  235. # here, we optionally weight these errors by the size of the error
  236. # if we want this to work for classification, must switch to predict_proba
  237. # if self.prediction_task == 'regression':
  238. # weight by the difference in error ###############################################################
  239. if self.weight_errors:
  240. sample_weight_node *= np.abs(y_node_absolute_errors[:, 1] - y_node_absolute_errors[:, 0])
  241. sample_weight_node_target = sample_weight_node[idxs_relevant]
  242. X_node = X_node[idxs_relevant]
  243. # Fit a 1-variable binary classification model on these outputs ######################################
  244. # Note: this could be customized (e.g. for sparse oblique trees)
  245. best_score = -np.inf
  246. best_feat_num = None
  247. for feat_num in range(X.shape[1]):
  248. if isinstance(self, ClassifierMixin):
  249. if self.node_model == 'linear':
  250. m = LogisticRegression(**self.node_model_args)
  251. elif self.node_model == 'stump':
  252. m = DecisionTreeClassifier(max_depth=1, **self.node_model_args)
  253. if isinstance(self, RegressorMixin):
  254. if self.node_model == 'linear':
  255. m = LinearRegression(**self.node_model_args)
  256. elif self.node_model == 'stump':
  257. m = DecisionTreeRegressor(max_depth=1, **self.node_model_args)
  258. X_node_single_feat = X_node[:, feat_num: feat_num + 1]
  259. m.fit(X_node_single_feat, y_node_target, sample_weight=sample_weight_node_target)
  260. score = m.score(X_node_single_feat, y_node_target, sample_weight=sample_weight_node_target)
  261. if score > best_score:
  262. best_score = score
  263. best_feat_num = feat_num
  264. best_model = deepcopy(m)
  265. if self.node_model == 'linear':
  266. best_threshold = -best_model.intercept_ / best_model.coef_[0]
  267. elif self.node_model == 'stump':
  268. best_threshold = best_model.tree_.threshold[0]
  269. # print((feature[node_id], threshold[node_id]), '\n->',
  270. # (best_feat_num, best_threshold))
  271. # Update the node with the new feature / threshold ######################################
  272. old_feat_num = feature[node_id]
  273. old_threshold = threshold[node_id]
  274. # print(X.sum(), y.sum())
  275. if X_score is None:
  276. X_score = X
  277. if y_score is None:
  278. y_score = y
  279. scorer = get_scorer(self.update_scoring)
  280. old_score = scorer(self.model, X_score, y_score)
  281. feature[node_id] = best_feat_num
  282. threshold[node_id] = best_threshold
  283. new_score = scorer(self.model, X_score, y_score)
  284. # debugging
  285. if self.verbose > 1:
  286. if old_score == new_score:
  287. print('\tno change', best_feat_num, old_feat_num)
  288. print(f'\tscore_total {old_score:0.4f} -> {new_score:0.4f}')
  289. if old_score >= new_score:
  290. feature[node_id] = old_feat_num
  291. threshold[node_id] = old_threshold
  292. else:
  293. # (Track if any updates were necessary)
  294. num_updates += 1
  295. if self.verbose > 0:
  296. print(f'Improved score from {old_score:0.4f} to {new_score:0.4f}')
  297. # debugging snippet (if score_m_new > score_m_old, then new_score should be > old_score, but it isn't!!!!)
  298. if self.verbose > 1:
  299. """
  300. X_node_single_feat = X_node[:, best_feat_num: best_feat_num + 1]
  301. score_m_new = best_model.score(X_node_single_feat, y_node_target, sample_weight=sample_weight)
  302. best_model.tree_.feature[0] = old_feat_num
  303. best_model.tree_.threshold[0] = old_threshold
  304. X_node_single_feat = X_node[:, old_feat_num: old_feat_num + 1]
  305. score_m_old = best_model.score(X_node_single_feat, y_node_target, sample_weight=sample_weight)
  306. print('\t\t', f'score_local {score_m_old:0.4f} -> {score_m_new:0.4f}')
  307. """
  308. return num_updates
  309. def predict(self, X):
  310. return self.model.predict(X)
  311. def predict_proba(self, X):
  312. return self.model.predict_proba(X)
  313. def score(self, X, y):
  314. return self.model.score(X, y)
  315. class TaoTreeRegressor(TaoTree, RegressorMixin):
  316. pass
  317. class TaoTreeClassifier(TaoTree, ClassifierMixin):
  318. pass
  319. if __name__ == '__main__':
  320. np.random.seed(13)
  321. random.seed(13)
  322. X, y = datasets.load_breast_cancer(return_X_y=True)
  323. X_train, X_test, y_train, y_test = train_test_split(
  324. X, y, test_size=0.33, random_state=42
  325. )
  326. print('X.shape', X.shape)
  327. print('ys', np.unique(y_train), '\n\n')
  328. m = TaoTreeClassifier(randomize_tree=False, weight_errors=False,
  329. node_model='stump', model_args={'max_depth': 3},
  330. verbose=1)
  331. m.fit(X_train, y_train)
  332. print('Train acc', np.mean(m.predict(X_train) == y_train))
  333. print('Test acc', np.mean(m.predict(X_test) == y_test))
  334. # print(m.predict(X_train), m.predict_proba(X_train).shape)
  335. # print(m.predict_proba(X_train))
  336. # X, y = datasets.load_diabetes(return_X_y=True) # regression
  337. # X = np.random.randn(500, 10)
  338. # y = (X[:, 0] > 0).astype(float) + (X[:, 1] > 1).astype(float)
  339. # X_train, X_test, y_train, y_test = train_test_split(
  340. # X, y, test_size=0.33, random_state=42
  341. # )
  342. # m = TaoRegressor()
  343. # m.fit(X_train, y_train)
  344. # print('mse', np.mean(np.square(m.predict(X_test) - y_test)),
  345. # 'baseline', np.mean(np.square(y_test)))
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...