Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

mdlp.py 18 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
  1. '''
  2. # Discretization MDLP
  3. Python implementation of Fayyad and Irani's MDLP criterion discretization algorithm
  4. **Reference:**
  5. Irani, Keki B. "Multi-interval discretization of continuous-valued attributes for classification learning." (1993).
  6. '''
  7. __author__ = 'Victor Ruiz, vmr11@pitt.edu'
  8. import numbers
  9. from math import log
  10. import numpy as np
  11. import pandas as pd
  12. from imodels.util.metrics import entropy, cut_point_information_gain
  13. class MDLPDiscretizer(object):
  14. def __init__(self, dataset, class_label, out_path_data=None, out_path_bins=None, features=None):
  15. '''
  16. initializes discretizer object:
  17. saves raw copy of data and creates self._data with only features to discretize and class
  18. computes initial entropy (before any splitting)
  19. self._features = features to be discretized
  20. self._classes = unique classes in raw_data
  21. self._class_name = label of class in pandas dataframe
  22. self._data = partition of data with only features of interest and class
  23. self._cuts = dictionary with cut points for each feature
  24. Params
  25. ------
  26. dataset
  27. pandas dataframe with data to discretize
  28. class_label
  29. name of the column containing class in input dataframe
  30. features
  31. if !None, features that the user wants to discretize specifically
  32. '''
  33. if not isinstance(dataset, pd.core.frame.DataFrame): # class needs a pandas dataframe
  34. raise AttributeError('input dataset should be a pandas data frame')
  35. self._data_raw = dataset # copy or original input data
  36. self._class_name = class_label
  37. self._classes = self._data_raw[self._class_name] # .unique()
  38. self._classes.drop_duplicates()
  39. # if user specifies which attributes to discretize
  40. if features:
  41. self._features = [f for f in features if f in self._data_raw.columns] # check if features in dataframe
  42. missing = set(features) - set(self._features) # specified columns not in dataframe
  43. if missing:
  44. print('WARNING: user-specified features %s not in input dataframe' % str(missing))
  45. else: # then we need to recognize which features are numeric
  46. numeric_cols = self._data_raw._data.get_numeric_data().items
  47. self._features = [f for f in numeric_cols if f != class_label]
  48. # other features that won't be discretized
  49. self._ignored_features = set(self._data_raw.columns) - set(self._features)
  50. # create copy of data only including features to discretize and class
  51. self._data = self._data_raw.loc[:, self._features + [class_label]]
  52. self._data = self._data.infer_objects() # convert_objects(convert_numeric=True)
  53. # pre-compute all boundary points in dataset
  54. self._boundaries = self._compute_boundary_points_all_features()
  55. # initialize feature bins with empty arrays
  56. self._cuts = {f: [] for f in self._features}
  57. # get cuts for all features
  58. self._all_features_accepted_cutpoints()
  59. # discretize self._data
  60. self._apply_cutpoints(out_data_path=out_path_data, out_bins_path=out_path_bins)
  61. def MDLPC_criterion(self, data, feature, cut_point):
  62. '''
  63. Determines whether a partition is accepted according to the MDLPC criterion
  64. :param feature: feature of interest
  65. :param cut_point: proposed cut_point
  66. :param partition_index: index of the sample (dataframe partition) in the interval of interest
  67. :return: True/False, whether to accept the partition
  68. '''
  69. # get dataframe only with desired attribute and class columns, and split by cut_point
  70. data_partition = data.copy(deep=True)
  71. data_left = data_partition[data_partition[feature] <= cut_point]
  72. data_right = data_partition[data_partition[feature] > cut_point]
  73. # compute information gain obtained when splitting data at cut_point
  74. cut_point_gain = cut_point_information_gain(dataset=data_partition, cut_point=cut_point,
  75. feature_label=feature, class_label=self._class_name)
  76. # compute delta term in MDLPC criterion
  77. N = len(data_partition) # number of examples in current partition
  78. partition_entropy = entropy(data_partition[self._class_name])
  79. k = len(data_partition[self._class_name].unique())
  80. k_left = len(data_left[self._class_name].unique())
  81. k_right = len(data_right[self._class_name].unique())
  82. entropy_left = entropy(data_left[self._class_name]) # entropy of partition
  83. entropy_right = entropy(data_right[self._class_name])
  84. delta = log(3 ** k, 2) - (k * partition_entropy) + (k_left * entropy_left) + (k_right * entropy_right)
  85. # to split or not to split
  86. gain_threshold = (log(N - 1, 2) + delta) / N
  87. if cut_point_gain > gain_threshold:
  88. return True
  89. else:
  90. return False
  91. def _feature_boundary_points(self, data, feature):
  92. '''
  93. Given an attribute, find all potential cut_points (boundary points)
  94. :param feature: feature of interest
  95. :param partition_index: indices of rows for which feature value falls within interval of interest
  96. :return: array with potential cut_points
  97. '''
  98. # get dataframe with only rows of interest, and feature and class columns
  99. data_partition = data.copy(deep=True)
  100. data_partition.sort_values(feature, ascending=True, inplace=True)
  101. boundary_points = []
  102. # add temporary columns
  103. data_partition['class_offset'] = data_partition[self._class_name].shift(
  104. 1) # column where first value is now second, and so forth
  105. data_partition['feature_offset'] = data_partition[feature].shift(
  106. 1) # column where first value is now second, and so forth
  107. data_partition['feature_change'] = (data_partition[feature] != data_partition['feature_offset'])
  108. data_partition['mid_points'] = data_partition.loc[:, [feature, 'feature_offset']].mean(axis=1)
  109. potential_cuts = data_partition[data_partition['feature_change'] == True].index[1:]
  110. sorted_index = data_partition.index.tolist()
  111. for row in potential_cuts:
  112. old_value = data_partition.loc[sorted_index[sorted_index.index(row) - 1]][feature]
  113. new_value = data_partition.loc[row][feature]
  114. old_classes = data_partition[data_partition[feature] == old_value][self._class_name].unique()
  115. new_classes = data_partition[data_partition[feature] == new_value][self._class_name].unique()
  116. if len(set.union(set(old_classes), set(new_classes))) > 1:
  117. boundary_points += [data_partition.loc[row]['mid_points']]
  118. return set(boundary_points)
  119. def _compute_boundary_points_all_features(self):
  120. '''
  121. Computes all possible boundary points for each attribute in self._features (features to discretize)
  122. :return:
  123. '''
  124. boundaries = {}
  125. for attr in self._features:
  126. data_partition = self._data.loc[:, [attr, self._class_name]]
  127. boundaries[attr] = self._feature_boundary_points(data=data_partition, feature=attr)
  128. return boundaries
  129. def _boundaries_in_partition(self, data, feature):
  130. '''
  131. From the collection of all cut points for all features, find cut points that fall within a feature-partition's
  132. attribute-values' range
  133. :param data: data partition (pandas dataframe)
  134. :param feature: attribute of interest
  135. :return: points within feature's range
  136. '''
  137. range_min, range_max = (data[feature].min(), data[feature].max())
  138. return set([x for x in self._boundaries[feature] if (x > range_min) and (x < range_max)])
  139. def _best_cut_point(self, data, feature):
  140. '''
  141. Selects the best cut point for a feature in a data partition based on information gain
  142. :param data: data partition (pandas dataframe)
  143. :param feature: target attribute
  144. :return: value of cut point with highest information gain (if many, picks first). None if no candidates
  145. '''
  146. candidates = self._boundaries_in_partition(data=data, feature=feature)
  147. # candidates = self.feature_boundary_points(data=data, feature=feature)
  148. if not candidates:
  149. return None
  150. gains = [(cut, cut_point_information_gain(dataset=data, cut_point=cut, feature_label=feature,
  151. class_label=self._class_name)) for cut in candidates]
  152. gains = sorted(gains, key=lambda x: x[1], reverse=True)
  153. return gains[0][0] # return cut point
  154. def _single_feature_accepted_cutpoints(self, feature, partition_index=pd.DataFrame().index):
  155. '''
  156. Computes the cuts for binning a feature according to the MDLP criterion
  157. :param feature: attribute of interest
  158. :param partition_index: index of examples in data partition for which cuts are required
  159. :return: list of cuts for binning feature in partition covered by partition_index
  160. '''
  161. if partition_index.size == 0:
  162. partition_index = self._data.index # if not specified, full sample to be considered for partition
  163. data_partition = self._data.loc[partition_index, [feature, self._class_name]]
  164. # exclude missing data:
  165. if data_partition[feature].isnull().values.any:
  166. data_partition = data_partition[~data_partition[feature].isnull()]
  167. # stop if constant or null feature values
  168. if len(data_partition[feature].unique()) < 2:
  169. return
  170. # determine whether to cut and where
  171. cut_candidate = self._best_cut_point(data=data_partition, feature=feature)
  172. if cut_candidate == None:
  173. return
  174. decision = self.MDLPC_criterion(data=data_partition, feature=feature, cut_point=cut_candidate)
  175. # apply decision
  176. if not decision:
  177. return # if partition wasn't accepted, there's nothing else to do
  178. if decision:
  179. # try:
  180. # now we have two new partitions that need to be examined
  181. left_partition = data_partition[data_partition[feature] <= cut_candidate]
  182. right_partition = data_partition[data_partition[feature] > cut_candidate]
  183. if left_partition.empty or right_partition.empty:
  184. return # extreme point selected, don't partition
  185. self._cuts[feature] += [cut_candidate] # accept partition
  186. self._single_feature_accepted_cutpoints(feature=feature, partition_index=left_partition.index)
  187. self._single_feature_accepted_cutpoints(feature=feature, partition_index=right_partition.index)
  188. # order cutpoints in ascending order
  189. self._cuts[feature] = sorted(self._cuts[feature])
  190. return
  191. def _all_features_accepted_cutpoints(self):
  192. '''
  193. Computes cut points for all numeric features (the ones in self._features)
  194. :return:
  195. '''
  196. for attr in self._features:
  197. self._single_feature_accepted_cutpoints(feature=attr)
  198. return
  199. def _apply_cutpoints(self, out_data_path=None, out_bins_path=None):
  200. '''
  201. Discretizes data by applying bins according to self._cuts. Saves a new, discretized file, and a description of
  202. the bins
  203. :param out_data_path: path to save discretized data
  204. :param out_bins_path: path to save bins description
  205. :return:
  206. '''
  207. bin_label_collection = {}
  208. for attr in self._features:
  209. if len(self._cuts[attr]) == 0:
  210. self._data[attr] = 'All'
  211. bin_label_collection[attr] = ['All']
  212. else:
  213. cuts = [-np.inf] + self._cuts[attr] + [np.inf]
  214. start_bin_indices = range(0, len(cuts) - 1)
  215. bin_labels = ['%s_to_%s' % (str(cuts[i]), str(cuts[i + 1])) for i in start_bin_indices]
  216. bin_label_collection[attr] = bin_labels
  217. self._data[attr] = pd.cut(x=self._data[attr].values, bins=cuts, right=False, labels=bin_labels,
  218. precision=6, include_lowest=True)
  219. # reconstitute full data, now discretized
  220. if self._ignored_features:
  221. to_return = pd.concat([self._data, self._data_raw[list(self._ignored_features)]], axis=1)
  222. to_return = to_return[self._data_raw.columns] # sort columns so they have the original order
  223. else:
  224. to_return = self._data
  225. # save data as csv
  226. if out_data_path:
  227. to_return.to_csv(out_data_path)
  228. # save bins description
  229. if out_bins_path:
  230. with open(out_bins_path, 'w') as bins_file:
  231. print('Description of bins in file: %s' % out_data_path, file=bins_file)
  232. # print(>>bins_file, 'Description of bins in file: %s' % out_data_path)
  233. for attr in self._features:
  234. print('attr: %s\n\t%s' % (attr, ', '.join([bin_label for bin_label in bin_label_collection[attr]])),
  235. file=bins_file)
  236. class BRLDiscretizer:
  237. def __init__(self, feature_labels, verbose=False):
  238. self.feature_labels_original = feature_labels
  239. self.verbose = verbose
  240. def fit(self, X, y, undiscretized_features=[]):
  241. # check which features are numeric (to be discretized)
  242. self.discretized_features = []
  243. X_str_disc = self._encode_strings(X)
  244. for fi in range(X_str_disc.shape[1]):
  245. # if not string, has values other than 0 and 1, and not specified as undiscretized
  246. if (
  247. isinstance(X_str_disc[0][fi], numbers.Number)
  248. and (not set(np.unique(X_str_disc[:, fi])).issubset({0, 1}))
  249. and (len(self.feature_labels) == 0 or
  250. len(undiscretized_features) == 0 or
  251. self.feature_labels[fi] not in undiscretized_features
  252. )
  253. ):
  254. self.discretized_features.append(self.feature_labels[fi])
  255. if len(self.discretized_features) > 0:
  256. if self.verbose:
  257. print(
  258. "Warning: non-categorical data found. Trying to discretize. (Please convert categorical values to "
  259. "strings, and/or specify the argument 'undiscretized_features', to avoid this.)")
  260. X_str_and_num_disc = self.discretize(X_str_disc, y)
  261. self.discretized_X = X_str_and_num_disc
  262. else:
  263. self.discretizer = None
  264. return
  265. def discretize(self, X, y):
  266. '''Discretize the features specified in self.discretized_features
  267. '''
  268. if self.verbose:
  269. print("Discretizing ", self.discretized_features, "...")
  270. D = pd.DataFrame(np.hstack((X, np.expand_dims(y, axis=1))), columns=list(self.feature_labels) + ["y"])
  271. self.discretizer = MDLPDiscretizer(dataset=D, class_label="y", features=self.discretized_features)
  272. cat_data = pd.DataFrame(np.zeros_like(X))
  273. for i in range(len(self.feature_labels)):
  274. label = self.feature_labels[i]
  275. if label in self.discretized_features:
  276. new_column = label + " : " + self.discretizer._data[label].astype(str)
  277. cat_data.iloc[:, i] = new_column
  278. else:
  279. cat_data.iloc[:, i] = D[label]
  280. return np.array(cat_data).tolist()
  281. def _encode_strings(self, X):
  282. # handle string data
  283. X_str_disc = pd.DataFrame([])
  284. for fi in range(X.shape[1]):
  285. if issubclass(type(X[0][fi]), str):
  286. new_columns = pd.get_dummies(X[:, fi])
  287. new_columns.columns = [self.feature_labels_original[fi] + '_' + value for value in new_columns.columns]
  288. new_columns_colon_format = new_columns.apply(lambda s: s.name + ' : ' + s.astype(str))
  289. X_str_disc = pd.concat([X_str_disc, new_columns_colon_format], axis=1)
  290. else:
  291. X_str_disc = pd.concat([X_str_disc, pd.Series(X[:, fi], name=self.feature_labels_original[fi])], axis=1)
  292. self.feature_labels = list(X_str_disc.columns)
  293. return X_str_disc.values
  294. def transform(self, X, return_onehot=True):
  295. if type(X) in [pd.DataFrame, pd.Series]:
  296. X = X.values
  297. if self.discretizer is None:
  298. return pd.DataFrame(X, columns=self.feature_labels_original)
  299. self.data = pd.DataFrame(self._encode_strings(X), columns=self.feature_labels)
  300. self._apply_cutpoints()
  301. D = np.array(self.data)
  302. # prepend feature labels
  303. Dl = np.copy(D).astype(str).tolist()
  304. for i in range(len(Dl)):
  305. for j in range(len(Dl[0])):
  306. Dl[i][j] = self.feature_labels[j] + " : " + Dl[i][j]
  307. if not return_onehot:
  308. return Dl
  309. else:
  310. return self.get_onehot_df(Dl)
  311. @property
  312. def onehot_df(self):
  313. return self.get_onehot_df(self.discretized_X)
  314. def get_onehot_df(self, discretized_X):
  315. '''Create readable one-hot encoded DataFrame from discretized features
  316. '''
  317. data = list(discretized_X[:])
  318. X_colname_removed = data.copy()
  319. replace_str_entries_func = lambda s: s.split(' : ')[1] if type(s) is str else s
  320. for i in range(len(data)):
  321. X_colname_removed[i] = list(map(replace_str_entries_func, X_colname_removed[i]))
  322. X_df_categorical = pd.DataFrame(X_colname_removed, columns=self.feature_labels)
  323. X_df_onehot = pd.get_dummies(X_df_categorical)
  324. return X_df_onehot
  325. @property
  326. def data(self):
  327. return self.discretizer._data
  328. @data.setter
  329. def data(self, value):
  330. self.discretizer._data = value
  331. def _apply_cutpoints(self):
  332. return self.discretizer._apply_cutpoints()
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...