Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

analyze_helper.py 7.8 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
  1. from matplotlib import pyplot as plt
  2. import os
  3. from os.path import join as oj
  4. import numpy as np
  5. import pandas as pd
  6. import data
  7. from sklearn.model_selection import KFold
  8. from colorama import Fore
  9. import pickle as pkl
  10. import config
  11. import viz
  12. from config import *
  13. def load_results(out_dir):
  14. r = []
  15. for fname in os.listdir(out_dir):
  16. d = pkl.load(open(oj(out_dir, fname), 'rb'))
  17. metrics = {k: d['cv'][k] for k in d['cv'].keys() if not 'curve' in k}
  18. num_pts_by_fold_cv = d['num_pts_by_fold_cv']
  19. out = {k: np.average(metrics[k], weights=num_pts_by_fold_cv) for k in metrics}
  20. out.update({k + '_std': np.std(metrics[k]) for k in metrics})
  21. out['model_type'] = fname.replace('.pkl', '') # d['model_type']
  22. imp_mat = np.array(d['imps']['imps'])
  23. imp_mu = imp_mat.mean(axis=0)
  24. imp_sd = imp_mat.std(axis=0)
  25. feat_names = d['feat_names_selected']
  26. out.update({feat_names[i] + '_f': imp_mu[i] for i in range(len(feat_names))})
  27. out.update({feat_names[i] + '_std_f': imp_sd[i] for i in range(len(feat_names))})
  28. r.append(pd.Series(out))
  29. r = pd.concat(r, axis=1, sort=False).T.infer_objects()
  30. r = r.reindex(sorted(r.columns), axis=1) # sort the column names
  31. r = r.round(3)
  32. r = r.set_index('model_type')
  33. return r
  34. def get_data_over_folds(model_names: list, out_dir: str, cell_nums: pd.Series, X, y, outcome_def='y_consec_sig', dset='clath_aux+gak_a7d2'):
  35. '''Returns predictions/labels over folds in the dataset
  36. Params
  37. ------
  38. cell_nums: pd.Series
  39. equivalent to df.cell_num
  40. Returns
  41. -------
  42. d_full_cv: pd.DataFrame
  43. n rows, one for each data point in the training set (over all folds)
  44. 2 columns for each model, one for predictions, and one for predicted probabilities
  45. idxs_cv: np.array
  46. indexes corresponding locations of the validation set
  47. for example, df.y_thresh.iloc[idxs_cv] would yield all the labels corresponding to the cv preds
  48. '''
  49. # split testing data based on cell num
  50. d = {}
  51. cell_nums_train = config.DSETS[dset]['train']
  52. kf = KFold(n_splits=len(cell_nums_train))
  53. idxs_cv = []
  54. # get predictions over all folds and save into a dict
  55. if not type(model_names) == 'list' and not 'ndarray' in str(type(model_names)):
  56. model_names = [model_names]
  57. for i, model_name in enumerate(model_names):
  58. results_individual = pkl.load(open(f'{out_dir}/{model_name}.pkl', 'rb'))
  59. fold_num = 0
  60. for cv_idx, cv_val_idx in kf.split(cell_nums_train):
  61. # get sample indices
  62. idxs_val_cv = cell_nums.isin(cell_nums_train[np.array(cv_val_idx)])
  63. X_val_cv, Y_val_cv = X[idxs_val_cv], y[idxs_val_cv]
  64. # get predictions
  65. preds, preds_proba = analyze_individual_results(results_individual, X_val_cv, Y_val_cv,
  66. print_results=False, plot_results=False,
  67. model_cv_fold=fold_num)
  68. d[f'{model_name}_{fold_num}'] = preds
  69. d[f'{model_name}_{fold_num}_proba'] = preds_proba
  70. if i == 0:
  71. idxs_cv.append(np.arange(X.shape[0])[idxs_val_cv])
  72. fold_num += 1
  73. # concatenate over folds
  74. d2 = {}
  75. for model_name in model_names:
  76. d2[model_name] = np.hstack([d[k] for k in d.keys() if model_name in k and not 'proba' in k])
  77. d2[model_name + '_proba'] = np.hstack([d[k] for k in d.keys() if model_name in k and 'proba' in k])
  78. return pd.DataFrame.from_dict(d2), np.hstack(idxs_cv)
  79. def analyze_individual_results(results, X_test, Y_test, print_results=False, plot_results=False, model_cv_fold=0):
  80. scores_cv = results['cv']
  81. imps = results['imps']
  82. m = imps['model'][model_cv_fold]
  83. preds = m.predict(X_test[results['feat_names_selected']])
  84. try:
  85. preds_proba = m.predict_proba(X_test[results['feat_names_selected']])[:, 1]
  86. except:
  87. preds_proba = preds
  88. if print_results:
  89. print(Fore.CYAN + f'{"metric":<25}\tvalidation') # \ttest')
  90. for s in results['metrics']:
  91. if not 'curve' in s:
  92. print(Fore.WHITE + f'{s:<25}\t{np.mean(scores_cv[s]):.3f} ~ {np.std(scores_cv[s]):.3f}')
  93. # print(Fore.WHITE + f'{s:<25}\t{np.mean(scores_cv[s]):.3f} ~ {np.std(scores_cv[s]):.3f}\t{np.mean(scores_test[s]):.3f} ~ {np.std(scores_test[s]):.3f}')
  94. print(Fore.CYAN + '\nfeature importances')
  95. imp_mat = np.array(imps['imps'])
  96. imp_mu = imp_mat.mean(axis=0)
  97. imp_sd = imp_mat.std(axis=0)
  98. for i, feat_name in enumerate(results['feat_names_selected']):
  99. print(Fore.WHITE + f'{feat_name:<25}\t{imp_mu[i]:.3f} ~ {imp_sd[i]:.3f}')
  100. if plot_results:
  101. # print(m.coef_)
  102. plt.figure(figsize=(10, 3), dpi=140)
  103. R, C = 1, 3
  104. plt.subplot(R, C, 1)
  105. # print(X_test.shape, results['feat_names'])
  106. viz.plot_confusion_matrix(Y_test, preds, classes=np.array(['Failure', 'Success']))
  107. plt.subplot(R, C, 2)
  108. prec, rec, thresh = scores_test['precision_recall_curve'][0]
  109. plt.plot(rec, prec)
  110. plt.xlim((-0.1, 1.1))
  111. plt.ylim((-0.1, 1.1))
  112. plt.ylabel('Precision')
  113. plt.xlabel('Recall')
  114. plt.subplot(R, C, 3)
  115. plt.hist(preds_proba[Y_test == 0], alpha=0.5, label='Failure')
  116. plt.hist(preds_proba[Y_test == 1], alpha=0.5, label='Success')
  117. plt.xlabel('Predicted probability')
  118. plt.ylabel('Count')
  119. plt.legend()
  120. plt.tight_layout()
  121. plt.show()
  122. return preds, preds_proba
  123. def load_results_many_models(out_dir, model_names, X_test, Y_test):
  124. d = {}
  125. for i, model_name in enumerate(model_names):
  126. results_individual = pkl.load(open(oj(out_dir, f'{model_name}.pkl'), 'rb'))
  127. preds, preds_proba = analyze_individual_results(results_individual, X_test, Y_test,
  128. print_results=False, plot_results=False)
  129. d[model_name] = preds
  130. d[model_name + '_proba'] = preds_proba
  131. d[model_name + '_errs'] = preds != Y_test
  132. df_preds = pd.DataFrame.from_dict(d)
  133. return df_preds
  134. # normalize and store
  135. def normalize(df, outcome_def):
  136. X = df[data.get_feature_names(df)]
  137. X_mean = X.mean()
  138. X_std = X.std()
  139. ks = list(X.keys())
  140. norms = {ks[i]: {'mu': X_mean[i], 'std': X_std[i]}
  141. for i in range(len(ks))}
  142. X = (X - X_mean) / X_std
  143. y = df[outcome_def].values
  144. return X, y, norms
  145. def normalize_and_predict(m0, feat_names_selected, dset_name, normalize_by_train,
  146. exclude_easy_tracks=False, outcome_def='y_consec_thresh'):
  147. df_new = data.get_data(dset=dset_name, use_processed=True,
  148. use_processed_dicts=True, outcome_def=outcome_def,
  149. previous_meta_file=oj(DIR_PROCESSED,
  150. 'metadata_clath_aux+gak_a7d2.pkl'))
  151. if exclude_easy_tracks:
  152. df_new = df_new[df_new['valid']] # exclude test cells, short/long tracks, hotspots
  153. # impute (only does anything for dynamin data)
  154. df_new = df_new.fillna(df_new.median())
  155. X_new = df_new[data.get_feature_names(df_new)]
  156. if normalize_by_train:
  157. X_new = (X_new - X_mean_train) / X_std_train
  158. else:
  159. X_new = (X_new - X_new.mean()) / X_new.std()
  160. y_new = df_new[outcome_def].values
  161. preds_new = m0.predict(X_new[feat_names_selected])
  162. preds_proba_new = m0.predict_proba(X_new[feat_names_selected])[:, 1]
  163. Y_maxes = df_new['Y_max']
  164. return df_new, y_new, preds_new, preds_proba_new, Y_maxes
  165. def calc_errs(preds, y_full_cv):
  166. tp = np.logical_and(preds == 1, y_full_cv == 1)
  167. tn = np.logical_and(preds == 0, y_full_cv == 0)
  168. fp = preds > y_full_cv
  169. fn = preds < y_full_cv
  170. return tp, tn, fp, fn
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...