Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

features.py 17 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
  1. import os
  2. from copy import deepcopy
  3. from os.path import join as oj
  4. import numpy as np
  5. import pandas as pd
  6. from sklearn.linear_model import LinearRegression, RidgeCV
  7. pd.options.mode.chained_assignment = None # default='warn' - caution: this turns off setting with copy warning
  8. import pickle as pkl
  9. #from viz import *
  10. import config
  11. from scipy.interpolate import UnivariateSpline
  12. from sklearn.decomposition import DictionaryLearning, NMF
  13. from sklearn import decomposition
  14. import trend_filtering
  15. import data
  16. from scipy.stats import skew, pearsonr
  17. def add_pcs(df):
  18. '''adds 10 pcs based on feature names
  19. '''
  20. feat_names = data.get_feature_names(df)
  21. X = df[feat_names]
  22. X = (X - X.mean()) / X.std()
  23. pca = decomposition.PCA(whiten=True)
  24. pca.fit(X[df.valid])
  25. X_reduced = pca.transform(X)
  26. for i in range(10):
  27. df['pc_' + str(i)] = X_reduced[:, i]
  28. return df
  29. def add_dict_features(df,
  30. sc_comps_file=oj(config.DIR_INTERIM, 'dictionaries/sc_12_alpha=1.pkl'),
  31. nmf_comps_file=oj(config.DIR_INTERIM, 'dictionaries/nmf_12.pkl'),
  32. use_processed=True):
  33. '''Add features from saved dictionary to df
  34. '''
  35. def sparse_code(X_mat, n_comps=12, alpha=1, out_dir=oj(config.DIR_INTERIM, 'dictionaries')):
  36. print('sparse coding...')
  37. d = DictionaryLearning(n_components=n_comps, alpha=alpha, random_state=42)
  38. d.fit(X_mat)
  39. pkl.dump(d, open(oj(out_dir, f'sc_{n_comps}_alpha={alpha}.pkl'), 'wb'))
  40. def nmf(X_mat, n_comps=12, out_dir=oj(config.DIR_INTERIM, 'dictionaries')):
  41. print('running nmf...')
  42. d = NMF(n_components=n_comps, random_state=42)
  43. d.fit(X_mat)
  44. pkl.dump(d, open(oj(out_dir, f'nmf_{n_comps}.pkl'), 'wb'))
  45. X_mat = extract_X_mat(df)
  46. X_mat -= np.min(X_mat)
  47. # if feats don't exist, compute them
  48. if not use_processed or not os.path.exists(sc_comps_file):
  49. os.makedirs(oj(config.DIR_INTERIM, 'dictionaries'), exist_ok=True)
  50. sparse_code(X_mat)
  51. nmf(X_mat)
  52. try:
  53. # sc
  54. d_sc = pkl.load(open(sc_comps_file, 'rb'))
  55. encoding = d_sc.transform(X_mat)
  56. for i in range(encoding.shape[1]):
  57. df[f'sc_{i}'] = encoding[:, i]
  58. # nmf
  59. d_nmf = pkl.load(open(nmf_comps_file, 'rb'))
  60. encoding_nmf = d_nmf.transform(X_mat)
  61. for i in range(encoding_nmf.shape[1]):
  62. df[f'nmf_{i}'] = encoding_nmf[:, i]
  63. except:
  64. print('dict features not added!')
  65. return df
  66. def add_smoothed_splines(df,
  67. method='spline',
  68. s_spl=0.004):
  69. X_smooth_spl = []
  70. X_smooth_spl_dx = []
  71. X_smooth_spl_d2x = []
  72. def num_local_maxima(x):
  73. return (len([i for i in range(1, len(x) - 1) if x[i] > x[i - 1] and x[i] > x[i + 1]]))
  74. for x in df['X']:
  75. spl = UnivariateSpline(x=range(len(x)),
  76. y=x,
  77. w=[1.0 / len(x)] * len(x),
  78. s=np.var(x) * s_spl)
  79. spl_dx = spl.derivative()
  80. spl_d2x = spl_dx.derivative()
  81. X_smooth_spl.append(spl(range(len(x))))
  82. X_smooth_spl_dx.append(spl_dx(range(len(x))))
  83. X_smooth_spl_d2x.append(spl_d2x(range(len(x))))
  84. df['X_smooth_spl'] = np.array(X_smooth_spl)
  85. df['X_smooth_spl_dx'] = np.array(X_smooth_spl_dx)
  86. df['X_smooth_spl_d2x'] = np.array(X_smooth_spl_d2x)
  87. df['X_max_spl'] = np.array([np.max(x) for x in X_smooth_spl])
  88. df['dx_max_spl'] = np.array([np.max(x) for x in X_smooth_spl_dx])
  89. df['d2x_max_spl'] = np.array([np.max(x) for x in X_smooth_spl_d2x])
  90. df['num_local_max_spl'] = np.array([num_local_maxima(x) for x in X_smooth_spl])
  91. df['num_local_min_spl'] = np.array([num_local_maxima(-1 * x) for x in X_smooth_spl])
  92. # linear fits
  93. x = np.arange(5).reshape(-1, 1)
  94. df['end_linear_fit'] = [LinearRegression().fit(x, end).coef_[0] for end in df['X_ends']]
  95. df['start_linear_fit'] = [LinearRegression().fit(x, start).coef_[0] for start in df['X_starts']]
  96. return df
  97. def add_trend_filtering(df):
  98. df_tf = deepcopy(df)
  99. for i in range(len(df)):
  100. df_tf['X'].iloc[i] = trend_filtering.trend_filtering(y=df['X'].iloc[i], vlambda=len(df['X'].iloc[i]) * 5,
  101. order=1)
  102. df_tf = add_features(df_tf)
  103. feat_names = data.get_feature_names(df_tf)
  104. feat_names = [x for x in feat_names
  105. if not x.startswith('sc_')
  106. and not x.startswith('nmf_')
  107. and not x in ['center_max', 'left_max', 'right_max', 'up_max', 'down_max',
  108. 'X_max_around_Y_peak', 'X_max_after_Y_peak', 'X_max_diff_after_Y_peak',
  109. 'X_tf']
  110. and not x.startswith('pc_')
  111. # and not 'local' in x
  112. # and not 'X_peak' in x
  113. # and not 'slope' in x
  114. # and not x in ['fall_final', 'fall_slope', 'fall_imp', 'fall']
  115. ]
  116. for feat in feat_names:
  117. df[feat + '_tf_smooth'] = df_tf[feat]
  118. return df
  119. def add_basic_features(df):
  120. '''Add a bunch of extra features to the df based on df.X, df.X_extended, df.Y, df.lifetime
  121. '''
  122. df = df[df.lifetime > 2]
  123. df['X_max'] = np.array([max(x) for x in df.X.values])
  124. df['X_max_extended'] = np.array([max(x) for x in df.X_extended.values])
  125. df['X_min'] = np.array([min(x) for x in df.X.values])
  126. df['X_mean'] = np.nan_to_num(np.array([np.nanmean(x) for x in df.X.values]))
  127. df['X_std'] = np.nan_to_num(np.array([np.std(x) for x in df.X.values]))
  128. df['Y_max'] = np.array([max(y) for y in df.Y.values])
  129. df['Y_mean'] = np.nan_to_num(np.array([np.nanmean(y) for y in df.Y.values]))
  130. df['Y_std'] = np.nan_to_num(np.array([np.std(y) for y in df.Y.values]))
  131. df['X_peak_idx'] = np.nan_to_num(np.array([np.argmax(x) for x in df.X]))
  132. df['Y_peak_idx'] = np.nan_to_num(np.array([np.argmax(y) for y in df.Y]))
  133. df['X_peak_time_frac'] = df['X_peak_idx'].values / df['lifetime'].values
  134. # df['slope_end'] = df.apply(lambda row: (row['X_max'] - row['X'][-1]) / (row['lifetime'] - row['X_peak_idx']),
  135. # axis=1)
  136. df['X_peak_last_15'] = df['X_peak_time_frac'] >= 0.85
  137. df['X_peak_last_5'] = df['X_peak_time_frac'] >= 0.95
  138. # hand-engineeredd features
  139. def calc_rise(x):
  140. '''max change before peak
  141. '''
  142. idx_max = np.argmax(x)
  143. val_max = x[idx_max]
  144. return val_max - np.min(x[:idx_max + 1])
  145. def calc_fall(x):
  146. '''max change after peak
  147. '''
  148. idx_max = np.argmax(x)
  149. val_max = x[idx_max]
  150. return val_max - np.min(x[idx_max:])
  151. def calc_rise_slope(x):
  152. '''slope to max change before peak
  153. '''
  154. idx_max = np.argmax(x)
  155. val_max = x[idx_max]
  156. x_early = x[:idx_max + 1]
  157. idx_min = np.argmin(x_early)
  158. denom = (idx_max - idx_min)
  159. if denom == 0:
  160. return 0
  161. return (val_max - np.min(x_early)) / denom
  162. def calc_fall_slope(x):
  163. '''slope to max change after peak
  164. '''
  165. idx_max = np.argmax(x)
  166. val_max = x[idx_max]
  167. x_late = x[idx_max:]
  168. idx_min = np.argmin(x_late)
  169. denom = idx_min
  170. if denom == 0:
  171. return 0
  172. return (val_max - np.min(x_late)) / denom
  173. def max_diff(x):
  174. return np.max(np.diff(x))
  175. def min_diff(x):
  176. return np.min(np.diff(x))
  177. df['rise'] = df.apply(lambda row: calc_rise(row['X']), axis=1)
  178. df['fall'] = df.apply(lambda row: calc_fall(row['X']), axis=1)
  179. df['rise_extended'] = df.apply(lambda row: calc_rise(row['X_extended']), axis=1)
  180. df['fall_extended'] = df.apply(lambda row: calc_fall(row['X_extended']), axis=1)
  181. df['fall_late_extended'] = df.apply(lambda row: row['fall_extended'] if row['X_peak_last_15'] else row['fall'],
  182. axis=1)
  183. # df['fall_final'] = df.apply(lambda row: row['X'][-3] - row['X'][-1], axis=1)
  184. df['rise_slope'] = df.apply(lambda row: calc_rise_slope(row['X']), axis=1)
  185. df['fall_slope'] = df.apply(lambda row: calc_fall_slope(row['X']), axis=1)
  186. num = 3
  187. df['rise_local_3'] = df.apply(lambda row:
  188. calc_rise(np.array(row['X'][max(0, row['X_peak_idx'] - num):
  189. row['X_peak_idx'] + num + 1])),
  190. axis=1)
  191. df['fall_local_3'] = df.apply(lambda row:
  192. calc_fall(np.array(row['X'][max(0, row['X_peak_idx'] - num):
  193. row['X_peak_idx'] + num + 1])),
  194. axis=1)
  195. num2 = 11
  196. df['rise_local_11'] = df.apply(lambda row:
  197. calc_rise(np.array(row['X'][max(0, row['X_peak_idx'] - num2):
  198. row['X_peak_idx'] + num2 + 1])),
  199. axis=1)
  200. df['fall_local_11'] = df.apply(lambda row:
  201. calc_fall(np.array(row['X'][max(0, row['X_peak_idx'] - num2):
  202. row['X_peak_idx'] + num2 + 1])),
  203. axis=1)
  204. df['max_diff'] = df.apply(lambda row: max_diff(row['X']), axis=1)
  205. df['min_diff'] = df.apply(lambda row: min_diff(row['X']), axis=1)
  206. # imputed feats
  207. d = df[['X_max', 'X_mean', 'lifetime', 'rise', 'fall']]
  208. d = d[df['X_peak_time_frac'] <= 0.8]
  209. # m = RidgeCV().fit(d[['X_max', 'X_mean', 'lifetime', 'rise']], d['fall'])
  210. # fall_pred = m.predict(df[['X_max', 'X_mean', 'lifetime', 'rise']])
  211. # fall_imp = df['fall']
  212. # fall_imp[df['X_peak_time_frac'] > 0.8] = fall_pred[df['X_peak_time_frac'] > 0.8]
  213. # df['fall_imp'] = fall_imp
  214. return df
  215. def extract_X_mat(df):
  216. '''Extract matrix for X filled with zeros after sequences
  217. Width of matrix is length of longest lifetime
  218. '''
  219. p = df.lifetime.max()
  220. n = df.shape[0]
  221. X_mat = np.zeros((n, p)).astype(np.float32)
  222. X = df['X'].values
  223. for i in range(n):
  224. x = X[i]
  225. num_timepoints = min(p, len(x))
  226. X_mat[i, :num_timepoints] = x[:num_timepoints]
  227. X_mat = np.nan_to_num(X_mat)
  228. X_mat -= np.min(X_mat)
  229. X_mat /= np.std(X_mat)
  230. return X_mat
  231. def add_binary_features(df, outcome_def):
  232. '''binarize features at the difference between the mean of each class
  233. '''
  234. feat_names = data.get_feature_names(df)
  235. threshes = (df[df[outcome_def] == 1].mean() + df[df[outcome_def] == 0].mean()) / 2
  236. for i, k in tqdm(enumerate(feat_names)):
  237. thresh = threshes.loc[k]
  238. df[k + '_binary'] = df[k] >= thresh
  239. return df
  240. def add_dasc_features(df, bins=100, by_cell=True):
  241. """
  242. add DASC features from Wang et al. 2020 paper
  243. Parameters:
  244. df: pd.DataFrame
  245. bins: int
  246. number of bins
  247. default value is 100: the intensity level of clathrin is assigned to 100 equal-length bins
  248. from vmin(min intensity across all tracks) to vmax(max intensity across all tracks)
  249. by_cell: Boolean
  250. whether to do binning within each cell
  251. """
  252. x_dist = {}
  253. n = len(df)
  254. # gather min and max clathrin intensity within each cell
  255. if by_cell == True:
  256. for cell in set(df['cell_num']):
  257. x = []
  258. cell_idx = np.where(df['cell_num'].values == cell)[0]
  259. for i in cell_idx:
  260. x += df['X'].values[i]
  261. x_dist[cell] = (min(x), max(x))
  262. else:
  263. x = []
  264. for i in range(n):
  265. x += df['X'].values[i]
  266. for cell in set(df['cell_num']):
  267. x_dist[cell] = (min(x), max(x))
  268. # transform the clathrin intensity to a value between 0 to 100
  269. X_quantiles = []
  270. for i in range(n):
  271. r = df.iloc[i]
  272. cell = r['cell_num']
  273. X_quantiles.append([np.int(1.0*bins*(x - x_dist[cell][0])/(x_dist[cell][1] - x_dist[cell][0])) if not np.isnan(x) else 0 for x in r['X']])
  274. df['X_quantiles'] = X_quantiles
  275. # compute transition probability between different intensities, for different frames
  276. trans_prob = {}
  277. tmax = max([len(df['X_quantiles'].values[i]) for i in range(len(df))])
  278. for t in range(tmax - 1):
  279. int_pairs = []
  280. for i in range(n):
  281. if len(df['X_quantiles'].values[i]) > t + 1:
  282. int_pairs.append([df['X_quantiles'].values[i][t], df['X_quantiles'].values[i][t + 1]])
  283. int_pairs = np.array(int_pairs)
  284. trans_prob_t = {}
  285. for i in range(bins + 1):
  286. x1 = np.where(int_pairs[:,0]== i)[0]
  287. lower_states_num = np.zeros((i, 2))
  288. for j in range(len(int_pairs)):
  289. if int_pairs[j, 0] < i:
  290. lower_states_num[int_pairs[j, 0], 0] += 1
  291. if int_pairs[j, 1] == i:
  292. lower_states_num[int_pairs[j, 0], 1] += 1
  293. lower_prob = [1.*lower_states_num[k, 1]/lower_states_num[k, 0] for k in range(i) if lower_states_num[k, 0] > 0]
  294. trans_prob_t[i] = (np.nanmean(int_pairs[x1,1] < i),
  295. #np.nanmean(int_pairs[x1,1] > i)
  296. sum(lower_prob)
  297. )
  298. trans_prob[t] = trans_prob_t
  299. # compute D sequence
  300. X_d = [[] for i in range(len(df))]
  301. for i in range(len(df)):
  302. for j, q in enumerate(df['X_quantiles'].values[i][:-1]):
  303. probs = trans_prob[j][q]
  304. if 0 < probs[0] and 0 < probs[1]:
  305. X_d[i].append(np.log(probs[0]/probs[1]))
  306. else:
  307. X_d[i].append(0)
  308. # compute features
  309. d1 = [np.mean(x) for x in X_d]
  310. d2 = [np.log(max((np.max(x) - np.min(x))/len(x), 1e-4)) for x in X_d]
  311. d3 = [skew(x) for x in X_d]
  312. df['X_d1'] = d1
  313. df['X_d2'] = d2
  314. df['X_d3'] = d3
  315. return df
  316. def downsample(x, length, padding='end'):
  317. """
  318. downsample (clathrin) track
  319. Parameters:
  320. ==========================================================
  321. x: list
  322. original clathrin track (of different lengths)
  323. length: int
  324. length of track after downsampling
  325. Returns:
  326. ==========================================================
  327. x_ds: list
  328. downsampled track
  329. """
  330. x = np.array(x)[np.where(np.isnan(x) == False)]
  331. n = len(x)
  332. if n >= length:
  333. # if length of original track is greater than targeted length, downsample
  334. x_ds = [x[np.int(1.0 * (n-1) * i/(length - 1))] for i in range(length)]
  335. else:
  336. # if length of original track is smaller than targeted length, fill the track with 0s
  337. if padding == 'front':
  338. x_ds = [0]*(length - len(x)) + list(x)
  339. else:
  340. x_ds = list(x) + [0]*(length - len(x))
  341. return x_ds
  342. def downsample_video(x, length):
  343. """
  344. downsample video feature in the same way
  345. """
  346. n = len(x)
  347. if n >= length:
  348. # if length of original track is greater than targeted length, downsample
  349. time_index = [np.int(1.0 * (n-1) * i/(length - 1)) for i in range(length)]
  350. x_ds = x[time_index, :, :]
  351. elif n > 0:
  352. # if length of original track is smaller than targeted length, fill the track with 0s
  353. x_ds = np.vstack((x, np.zeros((length - n, 10, 10))))
  354. else:
  355. x_ds = np.zeros((40, 10, 10))
  356. return x_ds
  357. def normalize_track(df, track='X_same_length', by_time_point=True):
  358. """
  359. normalize tracks
  360. """
  361. df[f'{track}_normalized'] = df[track].values
  362. for cell in set(df['cell_num']):
  363. cell_idx = np.where(df['cell_num'].values == cell)[0]
  364. y = df[track].values[cell_idx]
  365. y = np.array(list(y))
  366. if by_time_point:
  367. df[f'{track}_normalized'].values[cell_idx] = list((y - np.mean(y, axis=0))/np.std(y, axis=0))
  368. else:
  369. df[f'{track}_normalized'].values[cell_idx] = list((y - np.mean(y))/np.std(y))
  370. return df
  371. def normalize_feature(df, feat):
  372. """
  373. normalize scalar features
  374. """
  375. df = df.astype({feat: 'float64'})
  376. for cell in set(df['cell_num']):
  377. cell_idx = np.where(df['cell_num'].values == cell)[0]
  378. y = df[feat].values[cell_idx]
  379. #y = np.array(list(y))
  380. df[feat].values[cell_idx] = (y - np.nanmean(y))/np.nanstd(y)
  381. return df
  382. def normalize_video(df, video='X_video'):
  383. """
  384. normalize videos (different frames are normalized separately)
  385. e.g. to normalize the first frame, we take the first frame of all videos,
  386. flatten and concatenate them into one 1-d array,
  387. and extract the mean and std
  388. """
  389. df[f'{video}_normalized'] = df[video].values
  390. for cell in set(df['cell_num']):
  391. cell_idx = np.where(df['cell_num'].values == cell)[0]
  392. y = df[video].values[cell_idx]
  393. video_shape = y[0].shape
  394. video_mean, video_std = np.zeros(video_shape), np.zeros(video_shape)
  395. for j in (range(video_shape[0])):
  396. all_frames_j = np.array([y[i][j].reshape(1, -1)[0] for i in range(len(y))]).reshape(1, -1)[0]
  397. video_mean[j] = np.mean(all_frames_j) * np.ones((video_shape[1], video_shape[2]))
  398. video_std[j] = np.std(all_frames_j) * np.ones((video_shape[1], video_shape[2]))
  399. df[f'{video}_normalized'].values[cell_idx] = list((list(y) - video_mean)/(video_std))
  400. return df
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...