Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

brs.py 23 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
  1. '''Original implementation at https://github.com/wangtongada/BOA
  2. '''
  3. import itertools
  4. import operator
  5. import os
  6. import warnings
  7. from os.path import join as oj
  8. from bisect import bisect_left
  9. from collections import defaultdict
  10. from copy import deepcopy
  11. from itertools import combinations
  12. from random import sample
  13. import numpy as np
  14. import pandas as pd
  15. from mlxtend.frequent_patterns import fpgrowth
  16. from numpy.random import random
  17. from pandas import read_csv
  18. from scipy.sparse import csc_matrix
  19. from sklearn.base import BaseEstimator, ClassifierMixin
  20. from sklearn.ensemble import RandomForestClassifier
  21. from sklearn.utils.multiclass import check_classification_targets
  22. from sklearn.utils.validation import check_X_y, check_is_fitted
  23. from imodels.rule_set.rule_set import RuleSet
  24. from imodels.util.arguments import check_fit_arguments
  25. class BayesianRuleSetClassifier(RuleSet, BaseEstimator, ClassifierMixin):
  26. '''Bayesian or-of-and algorithm.
  27. Generates patterns that satisfy the minimum support and maximum length and then select the Nrules rules that have the highest entropy.
  28. In function SA_patternbased, each local maximum is stored in maps and the best BOA is returned.
  29. Remember here the BOA contains only the index of selected rules from Nrules self.rules_
  30. '''
  31. def __init__(self, n_rules: int = 2000,
  32. supp=5, maxlen: int = 10,
  33. num_iterations=5000, num_chains=3, q=0.1,
  34. alpha_pos=100, beta_pos=1,
  35. alpha_neg=100, beta_neg=1,
  36. alpha_l=None, beta_l=None,
  37. discretization_method='randomforest', random_state=0):
  38. '''
  39. Params
  40. ------
  41. n_rules
  42. number of rules to be used in SA_patternbased and also the output of generate_rules
  43. supp
  44. The higher this supp, the 'larger' a pattern is. 5% is a generally good number
  45. maxlen
  46. maximum length of a pattern
  47. num_iterations
  48. number of iterations in each chain
  49. num_chains
  50. number of chains in the simulated annealing search algorithm
  51. q
  52. alpha_pos
  53. $\rho = alpha/(alpha+beta)$. Make sure $\rho$ is close to one when choosing alpha and beta
  54. The alpha and beta parameters alter the prior distributions for different rules
  55. beta_pos
  56. alpha_neg
  57. beta_neg
  58. alpha_l
  59. beta_l
  60. discretization_method
  61. discretization method
  62. '''
  63. self.n_rules = n_rules
  64. self.supp = supp
  65. self.maxlen = maxlen
  66. self.num_iterations = num_iterations
  67. self.num_chains = num_chains
  68. self.q = q
  69. self.alpha_pos = alpha_pos
  70. self.beta_pos = beta_pos
  71. self.alpha_neg = alpha_neg
  72. self.beta_neg = beta_neg
  73. self.discretization_method = discretization_method
  74. self.alpha_l = alpha_l
  75. self.beta_l = beta_l
  76. self.random_state = 0
  77. def fit(self, X, y, feature_names: list = None, init=[], verbose=False):
  78. '''
  79. Parameters
  80. ----------
  81. X : array-like, shape = [n_samples, n_features]
  82. Training data
  83. y : array_like, shape = [n_samples]
  84. Labels
  85. feature_names : array_like, shape = [n_features], optional (default: [])
  86. String labels for each feature.
  87. If empty and X is a DataFrame, column labels are used.
  88. If empty and X is not a DataFrame, then features are simply enumerated
  89. '''
  90. # check inputs
  91. self.attr_level_num = defaultdict(int) # any missing value defaults to 0
  92. self.attr_names = []
  93. X, y, feature_names = check_fit_arguments(self, X, y, feature_names)
  94. np.random.seed(self.random_state)
  95. # convert to pandas DataFrame
  96. X = pd.DataFrame(X, columns=feature_names)
  97. for i, name in enumerate(X.columns):
  98. self.attr_level_num[name] += 1
  99. self.attr_names.append(name)
  100. self.attr_names_orig = deepcopy(self.attr_names)
  101. self.attr_names = list(set(self.attr_names))
  102. # set up patterns
  103. self._set_pattern_space()
  104. # parameter checking
  105. if self.alpha_l is None or self.beta_l is None or len(self.alpha_l) != self.maxlen or len(
  106. self.beta_l) != self.maxlen:
  107. if verbose:
  108. print('No or wrong input for alpha_l and beta_l - the model will use default parameters.')
  109. self.C = [1.0 / self.maxlen] * self.maxlen
  110. self.C.insert(0, -1)
  111. self.alpha_l = [10] * (self.maxlen + 1)
  112. self.beta_l = [10 * self.pattern_space[i] / self.C[i] for i in range(self.maxlen + 1)]
  113. else:
  114. self.alpha_l = [1] + list(self.alpha_l)
  115. self.beta_l = [1] + list(self.beta_l)
  116. # setup
  117. self._generate_rules(X, y, verbose)
  118. n_rules_current = len(self.rules_)
  119. self.rules_len_list = [len(rule) for rule in self.rules_]
  120. maps = defaultdict(list)
  121. T0 = 1000 # initial temperature for simulated annealing
  122. split = 0.7 * self.num_iterations
  123. # run simulated annealing
  124. for chain in range(self.num_chains):
  125. # initialize with a random pattern set
  126. if init != []:
  127. rules_curr = init.copy()
  128. else:
  129. assert n_rules_current > 1, f'Only {n_rules_current} potential rules found, change hyperparams to allow for more'
  130. N = sample(range(1, min(8, n_rules_current), 1), 1)[0]
  131. rules_curr = sample(range(n_rules_current), N)
  132. rules_curr_norm = self._normalize(rules_curr)
  133. pt_curr = -100000000000
  134. maps[chain].append(
  135. [-1, [pt_curr / 3, pt_curr / 3, pt_curr / 3], rules_curr, [self.rules_[i] for i in rules_curr]])
  136. for iter in range(self.num_iterations):
  137. if iter >= split:
  138. p = np.array(range(1 + len(maps[chain])))
  139. p = np.array(list(_accumulate(p)))
  140. p = p / p[-1]
  141. index = _find_lt(p, random())
  142. rules_curr = maps[chain][index][2].copy()
  143. rules_curr_norm = maps[chain][index][2].copy()
  144. # propose new rules
  145. rules_new, rules_norm = self._propose(rules_curr.copy(), rules_curr_norm.copy(), self.q, y)
  146. # compute probability of new rules
  147. cfmatrix, prob = self._compute_prob(rules_new, y)
  148. T = T0 ** (1 - iter / self.num_iterations) # temperature for simulated annealing
  149. pt_new = sum(prob)
  150. with warnings.catch_warnings():
  151. if not verbose:
  152. warnings.simplefilter("ignore")
  153. alpha = np.exp(float(pt_new - pt_curr) / T)
  154. if pt_new > sum(maps[chain][-1][1]):
  155. maps[chain].append([iter, prob, rules_new, [self.rules_[i] for i in rules_new]])
  156. if verbose:
  157. print((
  158. '\n** chain = {}, max at iter = {} ** \n accuracy = {}, TP = {},FP = {}, TN = {}, FN = {}'
  159. '\n pt_new is {}, prior_ChsRules={}, likelihood_1 = {}, likelihood_2 = {}\n').format(
  160. chain, iter, (cfmatrix[0] + cfmatrix[2] + 0.0) / len(y), cfmatrix[0], cfmatrix[1],
  161. cfmatrix[2], cfmatrix[3], sum(prob), prob[0], prob[1], prob[2])
  162. )
  163. self._print_rules(rules_new)
  164. print(rules_new)
  165. if random() <= alpha:
  166. rules_curr_norm, rules_curr, pt_curr = rules_norm.copy(), rules_new.copy(), pt_new
  167. pt_max = [sum(maps[chain][-1][1]) for chain in range(self.num_chains)]
  168. index = pt_max.index(max(pt_max))
  169. self.rules_ = maps[index][-1][3]
  170. return self
  171. def __str__(self):
  172. return ' '.join(str(r) for r in self.rules_)
  173. def predict(self, X):
  174. check_is_fitted(self)
  175. if isinstance(X, np.ndarray):
  176. df = pd.DataFrame(X, columns=self.attr_names_orig)
  177. else:
  178. df = X
  179. Z = [[]] * len(self.rules_)
  180. dfn = 1 - df # df has negative associations
  181. dfn.columns = [name.strip() + '_neg' for name in df.columns]
  182. df = pd.concat([df, dfn], axis=1)
  183. for i, rule in enumerate(self.rules_):
  184. Z[i] = (np.sum(df[list(rule)], axis=1) == len(rule)).astype(int)
  185. Yhat = (np.sum(Z, axis=0) > 0).astype(int)
  186. return Yhat
  187. def predict_proba(self, X):
  188. raise Exception('BOA does not support predicted probabilities.')
  189. def _set_pattern_space(self):
  190. """Compute the rule space from the levels in each attribute
  191. """
  192. # add feat_neg to each existing feature feat
  193. for item in self.attr_names:
  194. self.attr_level_num[item + '_neg'] = self.attr_level_num[item]
  195. tmp = [item + '_neg' for item in self.attr_names]
  196. self.attr_names.extend(tmp)
  197. # set up pattern_space
  198. self.pattern_space = np.zeros(self.maxlen + 1)
  199. for k in range(1, self.maxlen + 1, 1):
  200. for subset in combinations(self.attr_names, k):
  201. tmp = 1
  202. for i in subset:
  203. tmp = tmp * self.attr_level_num[i]
  204. # print('subset', subset, 'tmp', tmp, 'k', k)
  205. self.pattern_space[k] = self.pattern_space[k] + tmp
  206. def _generate_rules(self, X, y, verbose):
  207. '''This function generates rules that satisfy supp and maxlen using fpgrowth, then it selects the top n_rules rules that make data have the biggest decrease in entropy.
  208. There are two ways to generate rules. fpgrowth can handle cases where the maxlen is small. If maxlen<=3, fpgrowth can generates rules much faster than randomforest.
  209. If maxlen is big, fpgrowth tends to generate too many rules that overflow the memory.
  210. '''
  211. df = 1 - X # df has negative associations
  212. df.columns = [name.strip() + '_neg' for name in X.columns]
  213. df = pd.concat([X, df], axis=1)
  214. if self.discretization_method == 'fpgrowth' and self.maxlen <= 3:
  215. itemMatrix = [[item for item in df.columns if row[item] == 1] for i, row in df.iterrows()]
  216. pindex = np.where(y == 1)[0]
  217. rules = fpgrowth([itemMatrix[i] for i in pindex], supp=self.supp, zmin=1, zmax=self.maxlen)
  218. rules = [tuple(np.sort(rule[0])) for rule in rules]
  219. rules = list(set(rules))
  220. else:
  221. '''todo: replace this with imodels.RFDiscretizer
  222. '''
  223. rules = []
  224. for length in range(1, self.maxlen + 1, 1):
  225. n_estimators = min(pow(df.shape[1], length), 4000)
  226. clf = RandomForestClassifier(n_estimators=n_estimators, max_depth=length)
  227. clf.fit(X, y)
  228. for n in range(n_estimators):
  229. rules.extend(_extract_rules(clf.estimators_[n], df.columns))
  230. rules = [list(x) for x in set(tuple(x) for x in rules)]
  231. self.rules_ = rules
  232. # select the top n_rules rules using secondary criteria, information gain
  233. self._screen_rules(df, y, verbose) # updates self.rules_
  234. self._set_pattern_space()
  235. def _screen_rules(self, df, y, verbose):
  236. '''Screening rules using information gain
  237. '''
  238. item_ind_dict = {}
  239. for i, name in enumerate(df.columns):
  240. item_ind_dict[name] = i
  241. indices = np.array(
  242. list(itertools.chain.from_iterable([[
  243. item_ind_dict[x] for x in rule]
  244. for rule in self.rules_])))
  245. len_rules = [len(rule) for rule in self.rules_]
  246. indptr = list(_accumulate(len_rules))
  247. indptr.insert(0, 0)
  248. indptr = np.array(indptr)
  249. data = np.ones(len(indices))
  250. rule_matrix = csc_matrix((data, indices, indptr),
  251. shape=(len(df.columns),
  252. len(self.rules_)))
  253. mat = df.values @ rule_matrix
  254. print('mat.shape', mat.shape)
  255. len_matrix = np.array([len_rules] * df.shape[0])
  256. Z = (mat == len_matrix).astype(int)
  257. Zpos = [Z[i] for i in np.where(y > 0)][0]
  258. TP = np.sum(Zpos, axis=0)
  259. supp_select = np.where(TP >= self.supp * sum(y) / 100)[0]
  260. FP = np.sum(Z, axis=0) - TP
  261. TN = len(y) - np.sum(y) - FP
  262. FN = np.sum(y) - TP
  263. p1 = TP.astype(float) / (TP + FP)
  264. p2 = FN.astype(float) / (FN + TN)
  265. pp = (TP + FP).astype(float) / (TP + FP + TN + FN)
  266. # p1 = np.clip(p1, a_min=1e-10, a_max=1-1e-10)
  267. print('\n\n\n\np1.shape', p1.shape, 'pp.shape', pp.shape, 'cond_entropy.shape') # , cond_entropy.shape)
  268. with warnings.catch_warnings():
  269. if not verbose:
  270. warnings.simplefilter("ignore") # ignore warnings about invalid values (e.g. log(0))
  271. cond_entropy = -pp * (p1 * np.log(p1) + (1 - p1) * np.log(1 - p1)) - (1 - pp) * (
  272. p2 * np.log(p2) + (1 - p2) * np.log(1 - p2))
  273. cond_entropy[p1 * (1 - p1) == 0] = -((1 - pp) * (p2 * np.log(p2) + (1 - p2) * np.log(1 - p2)))[
  274. p1 * (1 - p1) == 0]
  275. cond_entropy[p2 * (1 - p2) == 0] = -(pp * (p1 * np.log(p1) + (1 - p1) * np.log(1 - p1)))[p2 * (1 - p2) == 0]
  276. cond_entropy[p1 * (1 - p1) * p2 * (1 - p2) == 0] = 0
  277. select = np.argsort(cond_entropy[supp_select])[::-1][-self.n_rules:]
  278. self.rules_ = [self.rules_[i] for i in supp_select[select]]
  279. self.RMatrix = np.array(Z[:, supp_select[select]])
  280. def _propose(self, rules_curr, rules_norm, q, y):
  281. nRules = len(self.rules_)
  282. yhat = (np.sum(self.RMatrix[:, rules_curr], axis=1) > 0).astype(int)
  283. incorr = np.where(y != yhat)[0]
  284. N = len(rules_curr)
  285. if len(incorr) == 0:
  286. # BOA correctly classified all points but there could be redundant patterns, so cleaning is needed
  287. move = ['clean']
  288. else:
  289. ex = sample(incorr.tolist(), 1)[0]
  290. t = random()
  291. if y[ex] == 1 or N == 1:
  292. if t < 1.0 / 2 or N == 1:
  293. move = ['add'] # action: add
  294. else:
  295. move = ['cut', 'add'] # action: replace
  296. else:
  297. if t < 1.0 / 2:
  298. move = ['cut'] # action: cut
  299. else:
  300. move = ['cut', 'add'] # action: replace
  301. if move[0] == 'cut':
  302. """ cut """
  303. if random() < q:
  304. candidate = list(set(np.where(self.RMatrix[ex, :] == 1)[0]).intersection(rules_curr))
  305. if len(candidate) == 0:
  306. candidate = rules_curr
  307. cut_rule = sample(candidate, 1)[0]
  308. else:
  309. p = []
  310. all_sum = np.sum(self.RMatrix[:, rules_curr], axis=1)
  311. for index, rule in enumerate(rules_curr):
  312. yhat = ((all_sum - np.array(self.RMatrix[:, rule])) > 0).astype(int)
  313. TP, FP, TN, FN = _get_confusion_matrix(yhat, y)
  314. p.append(TP.astype(float) / (TP + FP + 1))
  315. p = [x - min(p) for x in p]
  316. p = np.exp(p)
  317. p = np.insert(p, 0, 0)
  318. p = np.array(list(_accumulate(p)))
  319. if p[-1] == 0:
  320. index = sample(range(len(rules_curr)), 1)[0]
  321. else:
  322. p = p / p[-1]
  323. index = _find_lt(p, random())
  324. cut_rule = rules_curr[index]
  325. rules_curr.remove(cut_rule)
  326. rules_norm = self._normalize(rules_curr)
  327. move.remove('cut')
  328. if len(move) > 0 and move[0] == 'add':
  329. """ add """
  330. if random() < q:
  331. add_rule = sample(range(nRules), 1)[0]
  332. else:
  333. Yhat_neg_index = list(np.where(np.sum(self.RMatrix[:, rules_curr], axis=1) < 1)[0])
  334. mat = np.multiply(self.RMatrix[Yhat_neg_index, :].transpose(), y[Yhat_neg_index])
  335. TP = np.sum(mat, axis=1)
  336. FP = np.array((np.sum(self.RMatrix[Yhat_neg_index, :], axis=0) - TP))
  337. p = (TP.astype(float) / (TP + FP + 1))
  338. p[rules_curr] = 0
  339. add_rule = sample(np.where(p == max(p))[0].tolist(), 1)[0]
  340. if add_rule not in rules_curr:
  341. rules_curr.append(add_rule)
  342. rules_norm = self._normalize(rules_curr)
  343. if len(move) > 0 and move[0] == 'clean':
  344. remove = []
  345. for i, rule in enumerate(rules_norm):
  346. yhat = (np.sum(
  347. self.RMatrix[:, [rule for j, rule in enumerate(rules_norm) if (j != i and j not in remove)]],
  348. axis=1) > 0).astype(int)
  349. TP, FP, TN, FN = _get_confusion_matrix(yhat, y)
  350. if TP + FP == 0:
  351. remove.append(i)
  352. for x in remove:
  353. rules_norm.remove(x)
  354. return rules_curr, rules_norm
  355. return rules_curr, rules_norm
  356. def _compute_prob(self, rules, y):
  357. Yhat = (np.sum(self.RMatrix[:, rules], axis=1) > 0).astype(int)
  358. TP, FP, TN, FN = _get_confusion_matrix(Yhat, y)
  359. Kn_count = list(np.bincount([self.rules_len_list[x] for x in rules], minlength=self.maxlen + 1))
  360. prior_ChsRules = sum([_log_betabin(Kn_count[i], self.pattern_space[i], self.alpha_l[i], self.beta_l[i]) for i in
  361. range(1, len(Kn_count), 1)])
  362. likelihood_1 = _log_betabin(TP, TP + FP, self.alpha_pos, self.beta_pos)
  363. likelihood_2 = _log_betabin(TN, FN + TN, self.alpha_neg, self.beta_neg)
  364. return [TP, FP, TN, FN], [prior_ChsRules, likelihood_1, likelihood_2]
  365. def _normalize_add(self, rules_new, rule_index):
  366. rules = rules_new.copy()
  367. for rule in rules_new:
  368. if set(self.rules_[rule]).issubset(self.rules_[rule_index]):
  369. return rules_new.copy()
  370. if set(self.rules_[rule_index]).issubset(self.rules_[rule]):
  371. rules.remove(rule)
  372. rules.append(rule_index)
  373. return rules
  374. def _normalize(self, rules_new):
  375. try:
  376. rules_len = [len(self.rules_[index]) for index in rules_new]
  377. rules = [rules_new[i] for i in np.argsort(rules_len)[::-1][:len(rules_len)]]
  378. p1 = 0
  379. while p1 < len(rules):
  380. for p2 in range(p1 + 1, len(rules), 1):
  381. if set(self.rules_[rules[p2]]).issubset(set(self.rules_[rules[p1]])):
  382. rules.remove(rules[p1])
  383. p1 -= 1
  384. break
  385. p1 += 1
  386. return rules
  387. except:
  388. return rules_new.copy()
  389. def _print_rules(self, rules_max):
  390. for rule_index in rules_max:
  391. print(self.rules_[rule_index])
  392. def _accumulate(iterable, func=operator.add):
  393. '''Return running totals
  394. Ex. _accumulate([1,2,3,4,5]) --> 1 3 6 10 15
  395. Ex. _accumulate([1,2,3,4,5], operator.mul) --> 1 2 6 24 120
  396. '''
  397. it = iter(iterable)
  398. total = next(it)
  399. yield total
  400. for element in it:
  401. total = func(total, element)
  402. yield total
  403. def _find_lt(a, x):
  404. """ Find rightmost value less than x"""
  405. i = bisect_left(a, x)
  406. if i:
  407. return int(i - 1)
  408. print('in _find_lt,{}'.format(a))
  409. raise ValueError
  410. def _log_gampoiss(k, alpha, beta):
  411. import math
  412. k = int(k)
  413. return math.lgamma(k + alpha) + alpha * np.log(beta) - math.lgamma(alpha) - math.lgamma(k + 1) - (
  414. alpha + k) * np.log(1 + beta)
  415. def _log_betabin(k, n, alpha, beta):
  416. import math
  417. try:
  418. const = math.lgamma(alpha + beta) - math.lgamma(alpha) - math.lgamma(beta)
  419. except:
  420. print('alpha = {}, beta = {}'.format(alpha, beta))
  421. if isinstance(k, list) or isinstance(k, np.ndarray):
  422. if len(k) != len(n):
  423. print('length of k is %d and length of n is %d' % (len(k), len(n)))
  424. raise ValueError
  425. lbeta = []
  426. for ki, ni in zip(k, n):
  427. lbeta.append(math.lgamma(ki + alpha) + math.lgamma(ni - ki + beta) - math.lgamma(ni + alpha + beta) + const)
  428. return np.array(lbeta)
  429. else:
  430. return math.lgamma(k + alpha) + math.lgamma(n - k + beta) - math.lgamma(n + alpha + beta) + const
  431. def _get_confusion_matrix(Yhat, Y):
  432. if len(Yhat) != len(Y):
  433. raise NameError('Yhat has different length')
  434. TP = np.dot(np.array(Y), np.array(Yhat))
  435. FP = np.sum(Yhat) - TP
  436. TN = len(Y) - np.sum(Y) - FP
  437. FN = len(Yhat) - np.sum(Yhat) - TN
  438. return TP, FP, TN, FN
  439. def _extract_rules(tree, feature_names):
  440. left = tree.tree_.children_left
  441. right = tree.tree_.children_right
  442. features = [feature_names[i] for i in tree.tree_.feature]
  443. # get ids of child nodes
  444. idx = np.argwhere(left == -1)[:, 0]
  445. def _recurse(left, right, child, lineage=None):
  446. if lineage is None:
  447. lineage = []
  448. if child in left:
  449. parent = np.where(left == child)[0].item()
  450. suffix = '_neg'
  451. else:
  452. parent = np.where(right == child)[0].item()
  453. suffix = ''
  454. lineage.append((features[parent].strip() + suffix))
  455. if parent == 0:
  456. lineage.reverse()
  457. return lineage
  458. else:
  459. return _recurse(left, right, parent, lineage)
  460. rules = []
  461. for child in idx:
  462. rule = []
  463. for node in _recurse(left, right, child):
  464. rule.append(node)
  465. rules.append(rule)
  466. return rules
  467. if __name__ == '__main__':
  468. test_dir = os.path.dirname(os.path.abspath(__file__))
  469. df = read_csv(oj(test_dir, '../../tests/test_data', 'tictactoe_X.txt'), header=0, sep=" ")
  470. Y = np.loadtxt(open(oj(test_dir, '../../tests/test_data', 'tictactoe_Y.txt'), "rb"), delimiter=" ")
  471. lenY = len(Y)
  472. idxs_train = sample(range(lenY), int(0.50 * lenY))
  473. idxs_test = [i for i in range(lenY) if i not in idxs_train]
  474. y_test = Y[idxs_test]
  475. model = BayesianRuleSetClassifier(n_rules=100,
  476. supp=5,
  477. maxlen=3,
  478. num_iterations=100,
  479. num_chains=2,
  480. alpha_pos=500, beta_pos=1,
  481. alpha_neg=500, beta_neg=1,
  482. alpha_l=None, beta_l=None)
  483. # fit and check accuracy
  484. np.random.seed(13)
  485. # random.seed(13)
  486. model.fit(df.iloc[idxs_train], Y[idxs_train])
  487. y_pred = model.predict(df.iloc[idxs_test])
  488. acc1 = np.mean(y_pred == y_test)
  489. assert acc1 > 0.8
  490. # try fitting np version
  491. np.random.seed(13)
  492. # random.seed(13)
  493. model.fit(df.iloc[idxs_train].values, Y[idxs_train])
  494. y_pred = model.predict(df.iloc[idxs_test].values)
  495. y_test = Y[idxs_test]
  496. acc2 = np.mean(y_pred == y_test)
  497. assert acc2 > 0.8
  498. # assert np.abs(acc1 - acc2) < 0.05 # todo: fix seeding
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...