Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

block_transformers_ys.py 21 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
  1. # from abc import ABC, abstractmethod
  2. # import numpy as np
  3. # from collections import defaultdict
  4. # from sklearn.base import TransformerMixin, BaseEstimator
  5. # from sklearn.ensemble import BaseEnsemble
  6. # from sklearn.ensemble._forest import _generate_unsampled_indices, _generate_sample_indices
  7. # from sklearn.preprocessing import StandardScaler
  8. # from .local_stumps import make_stumps, tree_feature_transform
  9. # class BlockPartitionedData:
  10. # """
  11. # Abstraction for a feature matrix in which the columns are grouped into
  12. # blocks.
  13. # Parameters
  14. # ----------
  15. # data_blocks: list of ndarray
  16. # Blocks of feature columns
  17. # common_block: ndarray
  18. # A set of feature columns that should be common to all blocks
  19. # """
  20. # def __init__(self, data_blocks, common_block=None):
  21. # self.n_blocks = len(data_blocks)
  22. # self.n_samples = data_blocks[0].shape[0]
  23. # self._data_blocks = data_blocks
  24. # self._common_block = common_block
  25. # self._create_block_indices()
  26. # self._means = [np.mean(data_block, axis=0) for data_block in
  27. # self._data_blocks]
  28. # def get_all_data(self):
  29. # """
  30. # Returns
  31. # -------
  32. # all_data: ndarray
  33. # Returns the data matrix obtained by concatenating all feature
  34. # blocks together
  35. # """
  36. # if self._common_block is None:
  37. # all_data = np.hstack(self._data_blocks)
  38. # else:
  39. # all_data = np.hstack(self._data_blocks + [self._common_block])
  40. # # Common block appended at the end
  41. # return all_data
  42. # def _create_block_indices(self):
  43. # self._block_indices_dict = dict({})
  44. # start_index = 0
  45. # for k in range(self.n_blocks):
  46. # stop_index = start_index + self._data_blocks[k].shape[1]
  47. # self._block_indices_dict[k] = list(range(start_index, stop_index))
  48. # start_index = stop_index
  49. # if self._common_block is None:
  50. # self._common_block_indices = []
  51. # else:
  52. # stop_index = start_index + self._common_block.shape[1]
  53. # self._common_block_indices = list(range(start_index, stop_index))
  54. # def get_indices(self, k, all_except=False):
  55. # """
  56. # Parameters
  57. # ----------
  58. # all_except
  59. # k: int
  60. # The index of the feature block desired
  61. # Returns
  62. # -------
  63. # block_indices: list of int
  64. # The indices of the features in the desired block
  65. # """
  66. # if k not in self._block_indices_dict.keys():
  67. # raise ValueError(f"{k} not a block index.")
  68. # if all_except:
  69. # indices = []
  70. # for block_no, block_indices in self._block_indices_dict.items():
  71. # if block_no != k:
  72. # indices += block_indices
  73. # else:
  74. # indices = self._block_indices_dict[k]
  75. # indices += self._common_block_indices
  76. # return indices
  77. # def get_blocks(self, k, all_except=False):
  78. # """
  79. # Parameters
  80. # ----------
  81. # k: int
  82. # The index of the feature block desired
  83. # Returns
  84. # -------
  85. # block: ndarray
  86. # The feature block desired
  87. # """
  88. # if k not in self._block_indices_dict.keys():
  89. # raise ValueError(f"{k} not a block index.")
  90. # if all_except:
  91. # blocks = []
  92. # for block_no, block in enumerate(self._data_blocks):
  93. # if block_no != k:
  94. # blocks.append(block)
  95. # else:
  96. # blocks = [self._data_blocks[k]]
  97. # if self._common_block is not None:
  98. # blocks.append(self._common_block)
  99. # if len(blocks) > 1:
  100. # stacked_blocks = np.hstack([self._common_block,
  101. # self._data_blocks[k]])
  102. # else:
  103. # stacked_blocks = blocks[0]
  104. # return stacked_blocks
  105. # def get_modified_data(self, k, mode="keep_k"):
  106. # """
  107. # Modify the data by either imputing the mean of each feature in block k
  108. # (keep_rest) or imputing the mean of each feature not in block k
  109. # (keep_k). Return the full data matrix with the modified data.
  110. # Parameters
  111. # ----------
  112. # k: int
  113. # The index of the feature block not to modify
  114. # mode: string in {"keep_k", "keep_rest"}
  115. # Mode for the method. "keep_k" imputes the mean of each feature not
  116. # in block k, "keep_rest" imputes the mean of each feature in block k
  117. # Returns
  118. # -------
  119. # all_data: ndarray
  120. # Returns the data matrix obtained by concatenating all feature
  121. # blocks together
  122. # """
  123. # modified_blocks = [np.outer(np.ones(self.n_samples), self._means[i])
  124. # for i in range(self.n_blocks)]
  125. # if mode == "keep_k":
  126. # data_blocks = \
  127. # [self._data_blocks[i] if i == k else modified_blocks[i] for
  128. # i in range(self.n_blocks)]
  129. # elif mode == "keep_rest":
  130. # data_blocks = \
  131. # [modified_blocks[i] if i == k else self._data_blocks[i] for
  132. # i in range(self.n_blocks)]
  133. # else:
  134. # raise ValueError("Unsupported mode.")
  135. # if self._common_block is None:
  136. # all_data = np.hstack(data_blocks)
  137. # else:
  138. # all_data = np.hstack(data_blocks + [self._common_block])
  139. # return all_data
  140. # def train_test_split(self, train_indices, test_indices):
  141. # train_blocks = [self.get_blocks(k)[train_indices, :] for
  142. # k in range(self.n_blocks)]
  143. # train_blocked_data = BlockPartitionedData(train_blocks)
  144. # test_blocks = [self.get_blocks(k)[test_indices, :] for
  145. # k in range(self.n_blocks)]
  146. # test_blocked_data = BlockPartitionedData(test_blocks)
  147. # return train_blocked_data, test_blocked_data
  148. # def __repr__(self):
  149. # return self.get_all_data().__repr__()
  150. # class RFPlusFeatureMapping(ABC):
  151. # """
  152. # An interface for block transformers, objects that transform a data matrix
  153. # into a BlockPartitionedData object comprising one block of engineered
  154. # features for each original feature
  155. # """
  156. # def __init__(self, estimator=None, additional_transformer="default",
  157. # drop_features=True, center=True, rescale=False):
  158. # self.n_blocks = None
  159. # self.estimator = estimator
  160. # self.additional_transformer = additional_transformer
  161. # self.drop_features = drop_features
  162. # self._make_stumps()
  163. # self._base_transformers = {}
  164. # self.is_fitted = False
  165. # self.center = center
  166. # self.rescale = rescale
  167. # def _make_stumps(self):
  168. # if isinstance(self.estimator, BaseEnsemble):
  169. # tree_models = self.estimator.estimators_
  170. # else:
  171. # tree_models = [self.estimator]
  172. # # Make stumps for each tree
  173. # all_stumps = []
  174. # for tree_model in tree_models:
  175. # tree_stumps = make_stumps(tree_model.tree_)
  176. # all_stumps += tree_stumps
  177. # # Identify the stumps that split on feature k, for each k
  178. # self.stumps = defaultdict(list)
  179. # for stump in all_stumps:
  180. # self.stumps[stump.feature].append(stump)
  181. # self.n_splits = {k: len(stumps) for k, stumps in self.stumps.items()}
  182. # def fit(self, X):
  183. # self.n_blocks = X.shape[1]
  184. # for k in range(self.n_blocks):
  185. # self._make_base_transformer(X, k)
  186. # self.is_fitted = True
  187. # return self
  188. # def _make_base_transformer(self, X, k):
  189. # if self.drop_features and len(self.stumps[k]) == 0:
  190. # return None
  191. # if self.additional_transformer == "default":
  192. # additional_transformer = Stan
  193. # def check_is_fitted(self):
  194. # if not self.is_fitted:
  195. # raise AttributeError("Transformer has not yet been fitted.")
  196. # def transform(self, X):
  197. # """
  198. # Transform a data matrix into a BlockPartitionedData object comprising
  199. # one block for each original feature in X
  200. # Parameters
  201. # ----------
  202. # X: ndarray
  203. # The data matrix to be transformed
  204. # center: bool
  205. # Flag for whether to center the transformed data
  206. # normalize: bool
  207. # Flag for whether to rescale the transformed data to have unit
  208. # variance
  209. # Returns
  210. # -------
  211. # blocked_data: BlockPartitionedData object
  212. # The transformed data
  213. # """
  214. # self.check_is_fitted()
  215. # data_blocks = [self._base_transformers[k].transform(X) for
  216. # k in range(self.n_blocks)]
  217. # blocked_data = BlockPartitionedData(data_blocks)
  218. # return blocked_data
  219. # def fit_transform(self, X):
  220. # self.fit(X)
  221. # return self.transform(X)
  222. # def _center_and_rescale(self, base_transformer):
  223. # std_scaler = StandardScaler(with_mean=self.center,
  224. # with_std=self.rescale)
  225. # return data_block
  226. # class StumpTransformer(TransformerMixin, BaseEstimator):
  227. # def __init__(self, stumps):
  228. # self.stumps = stumps
  229. # def fit(self, X):
  230. # pass
  231. # def transform(self, X):
  232. # return tree_feature_transform(self.stumps, X)
  233. # class IdentityTransformer(BlockTransformerBase, ABC):
  234. # """
  235. # Block transformer that creates a block partitioned data object with each
  236. # block k containing only the original feature k.
  237. # """
  238. # def _fit_one_feature(self, X, k):
  239. # self._centers[k] = np.mean(X[:, [k]])
  240. # self._scales[k] = np.std(X[:, [k]])
  241. # def _transform_one_feature(self, X, k):
  242. # return X[:, [k]]
  243. # class TreeTransformer(BlockTransformerBase, ABC):
  244. # """
  245. # A block transformer that transforms data using a representation built from
  246. # local decision stumps from a tree or tree ensemble. The transformer also
  247. # comes with metadata on the local decision stumps and methods that allow for
  248. # transformations using sub-representations corresponding to each of the
  249. # original features.
  250. # Parameters
  251. # ----------
  252. # estimator: scikit-learn estimator
  253. # The scikit-learn tree or tree ensemble estimator object.
  254. # data: ndarray
  255. # A data matrix that can be used to update the number of samples in each
  256. # node of the tree(s) in the supplied estimator object. This affects
  257. # the node values of the resulting engineered features.
  258. # """
  259. # def __init__(self, estimator, data=None):
  260. # super().__init__()
  261. # self.estimator = estimator
  262. # self.oob_seed = self.estimator.random_state
  263. # # Check if single tree or tree ensemble
  264. # if isinstance(estimator, BaseEnsemble):
  265. # tree_models = estimator.estimators_
  266. # if data is not None:
  267. # # If a data matrix is supplied, use it to update the number
  268. # # of samples in each node
  269. # for tree_model in tree_models:
  270. # _update_n_node_samples(tree_model, data)
  271. # else:
  272. # tree_models = [estimator]
  273. # # Make stumps for each tree
  274. # all_stumps = []
  275. # for tree_model in tree_models:
  276. # tree_stumps = make_stumps(tree_model.tree_)
  277. # all_stumps += tree_stumps
  278. # # Identify the stumps that split on feature k, for each k
  279. # self.stumps = defaultdict(list)
  280. # for stump in all_stumps:
  281. # self.stumps[stump.feature].append(stump)
  282. # self.n_splits = {k: len(stumps) for k, stumps in self.stumps.items()}
  283. # def _fit_one_feature(self, X, k):
  284. # stump_features = tree_feature_transform(self.stumps[k], X)
  285. # self._centers[k] = np.mean(stump_features, axis=0)
  286. # self._scales[k] = np.std(stump_features, axis=0)
  287. # def _transform_one_feature(self, X, k):
  288. # return tree_feature_transform(self.stumps[k], X)
  289. # def _fit_transform_one_feature(self, X, k):
  290. # stump_features = tree_feature_transform(self.stumps[k], X)
  291. # self._centers[k] = np.mean(stump_features, axis=0)
  292. # self._scales[k] = np.std(stump_features, axis=0)
  293. # return stump_features
  294. # class CompositeTransformer(BlockTransformerBase, ABC):
  295. # """
  296. # A block transformer that is built by concatenating the blocks of the same
  297. # index from a list of block transformers.
  298. # Parameters
  299. # ----------
  300. # block_transformer_list: list of BlockTransformer objects
  301. # The list of block transformers to combine
  302. # rescale_mode: string in {"max", "mean", None}
  303. # Flag for the type of rescaling to be done to the blocks from different
  304. # base transformers. If "max", divide each block by the max std deviation
  305. # of a column within the block. If "mean", divide each block by the mean
  306. # std deviation of a column within the block. If None, do not rescale.
  307. # drop_features: bool
  308. # Flag for whether to return an empty block if that from the first
  309. # transformer in the list is trivial.
  310. # """
  311. # def __init__(self, block_transformer_list, rescale_mode=None, drop_features=True):
  312. # super().__init__()
  313. # self.block_transformer_list = block_transformer_list
  314. # assert len(self.block_transformer_list) > 0, "Need at least one base" \
  315. # "transformer."
  316. # for transformer in block_transformer_list:
  317. # if hasattr(transformer, "oob_seed") and \
  318. # transformer.oob_seed is not None:
  319. # self.oob_seed = transformer.oob_seed
  320. # break
  321. # self.rescale_mode = rescale_mode
  322. # self.drop_features = drop_features
  323. # self._rescale_factors = {}
  324. # self._trivial_block_indices = {}
  325. # def _fit_one_feature(self, X, k):
  326. # data_blocks = []
  327. # centers = []
  328. # scales = []
  329. # for block_transformer in self.block_transformer_list:
  330. # data_block = block_transformer.fit_transform_one_feature(
  331. # X, k, center=False, normalize=False)
  332. # data_blocks.append(data_block)
  333. # centers.append(block_transformer._centers[k])
  334. # scales.append(block_transformer._scales[k])
  335. # # Handle trivial blocks
  336. # self._trivial_block_indices[k] = \
  337. # [idx for idx, data_block in enumerate(data_blocks) if
  338. # _empty_or_constant(data_block)]
  339. # if (0 in self._trivial_block_indices[k] and self.drop_features) or \
  340. # (len(self._trivial_block_indices[k]) == len(data_blocks)):
  341. # # If first block is trivial and self.drop_features is True,
  342. # self._centers[k] = np.array([0])
  343. # self._scales[k] = np.array([1])
  344. # return
  345. # else:
  346. # # Remove trivial blocks
  347. # for idx in reversed(self._trivial_block_indices[k]):
  348. # data_blocks.pop(idx)
  349. # centers.pop(idx)
  350. # scales.pop(idx)
  351. # self._centers[k] = np.hstack(centers)
  352. # self._scales[k] = np.hstack(scales)
  353. # self._rescale_factors[k] = _get_rescale_factors(data_blocks, self.rescale_mode)
  354. # def _transform_one_feature(self, X, k):
  355. # data_blocks = []
  356. # for block_transformer in self.block_transformer_list:
  357. # data_block = block_transformer.transform_one_feature(
  358. # X, k, center=False, normalize=False)
  359. # data_blocks.append(data_block)
  360. # # Handle trivial blocks
  361. # if (0 in self._trivial_block_indices[k] and self.drop_features) or \
  362. # (len(self._trivial_block_indices[k]) == len(data_blocks)):
  363. # # If first block is trivial and self.drop_features is True,
  364. # # return empty block
  365. # return np.empty((X.shape[0], 0))
  366. # else:
  367. # # Remove trivial blocks
  368. # for idx in reversed(self._trivial_block_indices[k]):
  369. # data_blocks.pop(idx)
  370. # composite_block = np.hstack(
  371. # [data_block / scale_factor for data_block, scale_factor in
  372. # zip(data_blocks, self._rescale_factors[k])]
  373. # )
  374. # return composite_block
  375. # def _fit_transform_one_feature(self, X, k):
  376. # data_blocks = []
  377. # centers = []
  378. # scales = []
  379. # for block_transformer in self.block_transformer_list:
  380. # data_block = block_transformer.fit_transform_one_feature(
  381. # X, k, center=False, normalize=False)
  382. # data_blocks.append(data_block)
  383. # centers.append(block_transformer._centers[k])
  384. # scales.append(block_transformer._scales[k])
  385. # # Handle trivial blocks
  386. # self._trivial_block_indices[k] = \
  387. # [idx for idx, data_block in enumerate(data_blocks) if
  388. # _empty_or_constant(data_block)]
  389. # if (0 in self._trivial_block_indices[k] and self.drop_features) or \
  390. # (len(self._trivial_block_indices[k]) == len(data_blocks)):
  391. # # If first block is trivial and self.drop_features is True,
  392. # # return empty block
  393. # self._centers[k] = np.array([0])
  394. # self._scales[k] = np.array([1])
  395. # return np.empty((X.shape[0], 0))
  396. # else:
  397. # # Remove trivial blocks
  398. # for idx in reversed(self._trivial_block_indices[k]):
  399. # data_blocks.pop(idx)
  400. # centers.pop(idx)
  401. # scales.pop(idx)
  402. # self._centers[k] = np.hstack(centers)
  403. # self._scales[k] = np.hstack(scales)
  404. # self._rescale_factors[k] = _get_rescale_factors(data_blocks, self.rescale_mode)
  405. # composite_block = np.hstack(
  406. # [data_block / scale_factor for data_block, scale_factor in
  407. # zip(data_blocks, self._rescale_factors[k])]
  408. # )
  409. # return composite_block
  410. # class GmdiDefaultTransformer(CompositeTransformer, ABC):
  411. # """
  412. # Default block transformer used in GMDI. For each original feature, this
  413. # forms a block comprising the local decision stumps, from a single tree
  414. # model, that split on the feature, and appends the original feature.
  415. # Parameters
  416. # ----------
  417. # tree_model: scikit-learn estimator
  418. # The scikit-learn tree estimator object.
  419. # rescale_mode: string in {"max", "mean", None}
  420. # Flag for the type of rescaling to be done to the blocks from different
  421. # base transformers. If "max", divide each block by the max std deviation
  422. # of a column within the block. If "mean", divide each block by the mean
  423. # std deviation of a column within the block. If None, do not rescale.
  424. # drop_features: bool
  425. # Flag for whether to return an empty block if that from the first
  426. # transformer in the list is trivial.
  427. # """
  428. # def __init__(self, tree_model, rescale_mode="max", drop_features=True):
  429. # super().__init__([TreeTransformer(tree_model), IdentityTransformer()],
  430. # rescale_mode, drop_features)
  431. # def _update_n_node_samples(tree, X):
  432. # node_indicators = tree.decision_path(X)
  433. # new_n_node_samples = node_indicators.getnnz(axis=0)
  434. # for i in range(len(new_n_node_samples)):
  435. # tree.tree_.n_node_samples[i] = new_n_node_samples[i]
  436. # def _get_rescale_factors(data_blocks, rescale_mode):
  437. # if rescale_mode == "max":
  438. # scale_factors = np.array([max(data_block.std(axis=0)) for
  439. # data_block in data_blocks])
  440. # elif rescale_mode == "mean":
  441. # scale_factors = np.array([np.mean(data_block.std(axis=0)) for
  442. # data_block in data_blocks])
  443. # elif rescale_mode is None:
  444. # scale_factors = np.ones(len(data_blocks))
  445. # else:
  446. # raise ValueError("Invalid rescale mode.")
  447. # scale_factors = scale_factors / scale_factors[0]
  448. # return scale_factors
  449. # def _empty_or_constant(data_block):
  450. # return data_block.shape[1] == 0 or max(data_block.std(axis=0)) == 0
  451. # def _blocked_train_test_split(blocked_data, y, oob_seed):
  452. # n_samples = len(y)
  453. # train_indices = _generate_sample_indices(oob_seed, n_samples, n_samples)
  454. # test_indices = _generate_unsampled_indices(oob_seed, n_samples, n_samples)
  455. # train_blocked_data, test_blocked_data = \
  456. # blocked_data.train_test_split(train_indices, test_indices)
  457. # if y.ndim > 1:
  458. # y_train = y[train_indices, :]
  459. # y_test = y[test_indices, :]
  460. # else:
  461. # y_train = y[train_indices]
  462. # y_test = y[test_indices]
  463. # return train_blocked_data, test_blocked_data, y_train, y_test, train_indices, test_indices
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...