Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

outcomes.py 9.5 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
  1. import numpy as np
  2. import pandas as pd
  3. from math import floor
  4. pd.options.mode.chained_assignment = None # default='warn' - caution: this turns off setting with copy warning
  5. from viz import *
  6. import config
  7. def add_rule_based_label(df):
  8. df['Y_peak_time_frac'] = df['Y_peak_idx'].values / df['lifetime'].values
  9. df['y_z_score'] = (df['Y_max'].values - df['Y_mean'].values) / df['Y_std'].values
  10. X_max_around_Y_peak = []
  11. X_max_after_Y_peak = []
  12. for i in range(len(df)):
  13. pt = df['Y_peak_idx'].values[i]
  14. lt = floor(df['lifetime'].values[i])
  15. left_bf = int(0.2 * lt) + 1 # look at a window with length = 30%*lifetime
  16. right_bf = int(0.1 * lt) + 1
  17. arr_around = df['X'].iloc[i][max(0, pt - left_bf): min(pt + right_bf, lt)]
  18. arr_after = df['X'].iloc[i][min(pt + right_bf, lt - 1):]
  19. X_max_around_Y_peak.append(max(arr_around))
  20. if len(arr_after) > 0:
  21. X_max_after_Y_peak.append(max(arr_after))
  22. else:
  23. X_max_after_Y_peak.append(max(arr_around))
  24. df['X_max_around_Y_peak'] = X_max_around_Y_peak
  25. df['X_max_after_Y_peak'] = X_max_after_Y_peak
  26. df['X_max_diff'] = df['X_max_around_Y_peak'] - df['X_max_after_Y_peak']
  27. def rule_based_model(track):
  28. # three rules:
  29. # if aux peaks too early -- negative
  30. # elif:
  31. # if y_consec_sig or y_conservative_thresh or (cla drops around aux peak, and aux max is greater than
  32. # mean + 2.6*std), then positive
  33. # else: negative
  34. if track['Y_peak_time_frac'] < 0.2:
  35. return 0
  36. if track['y_consec_sig'] or track['y_conservative_thresh']:
  37. return 1
  38. # if track['X_max_diff'] > 260 and track['y_z_score'] > 2.6:
  39. # return 1
  40. if track['X_max_diff'] > 260 and track['Y_max'] > 560:
  41. return 1
  42. return 0
  43. df['y_rule_based'] = np.array([rule_based_model(df.iloc[i]) for i in range(len(df))])
  44. return df
  45. def add_outcomes(df, LABELS=None, thresh=3.25, p_thresh=0.05,
  46. aux_peak=642.375, aux_thresh=973, vps_data=False):
  47. '''Add binary outcome of whether spike happened and info on whether events were questionable
  48. '''
  49. df['y_score'] = df['Y_max'].values - (df['Y_mean'].values + thresh * df['Y_std'].values)
  50. df['y_thresh'] = (df['y_score'].values > 0).astype(int) # Y_max was big
  51. df['y'] = df['Y_max'] > aux_peak
  52. # outcomes based on significant p-values
  53. num_sigs = [np.array(df['Y_pvals'].iloc[i]) < p_thresh for i in range(df.shape[0])]
  54. df['y_num_sig'] = np.array([num_sigs[i].sum() for i in range(df.shape[0])]).astype(int)
  55. df['y_single_sig'] = np.array([num_sigs[i].sum() > 0 for i in range(df.shape[0])]).astype(int)
  56. df['y_double_sig'] = np.array([num_sigs[i].sum() > 1 for i in range(df.shape[0])]).astype(int)
  57. df['y_conservative_thresh'] = (df['Y_max'].values > aux_thresh).astype(int)
  58. y_consec_sig = []
  59. y_sig_min_diff = []
  60. for i in range(df.shape[0]):
  61. idxs_sig = np.where(num_sigs[i] == 1)[0] # indices of significance
  62. if len(idxs_sig) > 1:
  63. y_sig_min_diff.append(np.min(np.diff(idxs_sig)))
  64. else:
  65. y_sig_min_diff.append(np.nan)
  66. # find whether there were consecutive sig. indices
  67. if len(idxs_sig) > 1 and np.min(np.diff(idxs_sig)) == 1:
  68. y_consec_sig.append(1)
  69. else:
  70. y_consec_sig.append(0)
  71. df['y_consec_sig'] = y_consec_sig
  72. df['y_sig_min_diff'] = y_sig_min_diff
  73. df['y_consec_thresh'] = np.logical_or(df['y_consec_sig'], df['y_conservative_thresh'])
  74. def add_hotspots(df, num_sigs, outcome_def='consec_sig'):
  75. '''Identify hotspots as any track which over its time course has multiple events
  76. events must meet the event definition, then for a time not meet it, then meet it again
  77. Example: two consecutive significant p-values, then non-significant p-value, then 2 more consecutive p-values
  78. '''
  79. if outcome_def == 'consec_sig':
  80. hotspots = np.zeros(df.shape[0]).astype(int)
  81. for i in range(df.shape[0]):
  82. idxs_sig = np.where(num_sigs[i] == 1)[0] # indices of significance
  83. if idxs_sig.size < 5:
  84. hotspots[i] = 0
  85. else:
  86. diffs = np.diff(idxs_sig)
  87. consecs = np.where(diffs == 1)[0] # diffs==1 means there were consecutive sigs
  88. consec_diffs = np.diff(consecs)
  89. if consec_diffs.shape[0] > 0 and np.max(
  90. consec_diffs) > 2: # there were greated than 2 non-consec sigs between the consec sigs
  91. hotspots[i] = 1
  92. else:
  93. hotspots[i] = 0
  94. df['sig_idxs'] = num_sigs
  95. df['hotspots'] = hotspots == 1
  96. return df
  97. df = add_hotspots(df, num_sigs)
  98. if LABELS is not None:
  99. df['y_consec_thresh'][df.pid.isin(LABELS['pos'])] = 1 # add manual pos labels
  100. df['y_consec_thresh'][df.pid.isin(LABELS['neg'])] = 0 # add manual neg labels
  101. df['hotspots'][df.pid.isin(LABELS['hotspots'])] = True # add manual hotspot labels
  102. if not vps_data:
  103. df = add_rule_based_label(df)
  104. return df
  105. def add_sig_mean(df, resp_tracks=['Y']):
  106. """add response of regression problem: mean auxilin strength among significant observations
  107. """
  108. for track in resp_tracks:
  109. sig_mean = []
  110. for i in range(len(df)):
  111. r = df.iloc[i]
  112. sigs = np.array(r[f'{track}_pvals']) < 0.05
  113. if sum(sigs)>0:
  114. sig_mean.append(np.mean(np.array(r[track])[sigs]))
  115. else:
  116. sig_mean.append(0)
  117. df[f'{track}_sig_mean'] = sig_mean
  118. df[f'{track}_sig_mean_normalized'] = sig_mean
  119. for cell in set(df['cell_num']):
  120. cell_idx = np.where(df['cell_num'].values == cell)[0]
  121. y = df[f'{track}_sig_mean'].values[cell_idx]
  122. df[f'{track}_sig_mean_normalized'].values[cell_idx] = (y - np.mean(y))/np.std(y)
  123. return df
  124. def add_aux_dyn_outcome(df, p_thresh=0.05, clath_thresh=1500, dyn_thresh=2000,
  125. dyn_cons_thresh=5, clath_sig_frac=0.5, clath_consec_thresh_frac=0.15):
  126. """add response of regression problem: mean auxilin strength among significant observations
  127. """
  128. # look for clathrin significance
  129. num_sigs = [np.array(df['X_pvals'].iloc[i]) < p_thresh for i in range(df.shape[0])]
  130. x_consec_sig = []
  131. x_frac_sig = []
  132. lifetime_steps = np.array([len(df['X'].iloc[i]) for i in range(df.shape[0])]) # get lifetimes
  133. for i in range(df.shape[0]):
  134. l = lifetime_steps[i]
  135. sigs = num_sigs[i]
  136. x_frac_sig.append(np.mean(sigs) >= clath_sig_frac)
  137. cons = 0
  138. consec_flag = False
  139. for j in range(len(sigs)):
  140. if sigs[j] == 1:
  141. cons += 1
  142. else:
  143. cons = 0
  144. if cons >= max(l * clath_consec_thresh_frac, 5):
  145. consec_flag = True
  146. break
  147. if consec_flag:
  148. x_consec_sig.append(1)
  149. else:
  150. x_consec_sig.append(0)
  151. # outcomes based on significant p-values
  152. df['clath_conservative_thresh'] = (df['X_max'].values > clath_thresh).astype(int)
  153. df['clath_sig'] = np.logical_and(x_consec_sig, x_frac_sig)
  154. df['successful'] = np.logical_and(df['y_consec_thresh'], df['clath_conservative_thresh'])
  155. df['successful_dynamin'] = df['successful']
  156. df['successful_full'] = np.logical_and(df['clath_sig'], df['successful_dynamin'])
  157. # look for dynamin peak
  158. if 'Z' in df.keys():
  159. num_sigs = [np.array(df['Z_pvals'].iloc[i]) < p_thresh for i in range(df.shape[0])]
  160. z_consec_sig = []
  161. for i in range(df.shape[0]):
  162. sigs = num_sigs[i]
  163. cons = 0
  164. consec_flag = False
  165. for j in range(len(sigs)):
  166. if sigs[j] == 1:
  167. cons += 1
  168. else:
  169. cons = 0
  170. if cons >= dyn_cons_thresh:
  171. consec_flag = True
  172. break
  173. if consec_flag:
  174. z_consec_sig.append(1)
  175. else:
  176. z_consec_sig.append(0)
  177. df['z_consec_sig'] = z_consec_sig
  178. df['Z_max'] = [np.max(df.iloc[i]['Z']) for i in range(df.shape[0])]
  179. df['z_thresh'] = df['Z_max'] > dyn_thresh
  180. df['z_consec_thresh'] = np.logical_and(df['z_consec_sig'], df['z_thresh'])
  181. df['Y_peak_idx'] = np.nan_to_num(np.array([np.argmax(y) for y in df.Y]))
  182. df['Z_peak_idx'] = np.nan_to_num(np.array([np.argmax(z) for z in df.Z]))
  183. df['z_peaked_first'] = df['Z_peak_idx'] < df['Y_peak_idx']
  184. df['z_peak'] = np.logical_and(df['z_consec_thresh'], df['z_peaked_first'])
  185. # peaks must happen at end of track
  186. df['z_peak'] = np.logical_and(df['z_peak'], df['Z_peak_idx'] > lifetime_steps / 2)
  187. df['successful_dynamin'] = np.logical_or(
  188. df['successful'],
  189. np.logical_and(df['clath_conservative_thresh'], df['z_peak'])
  190. )
  191. df['successful_full'] = np.logical_and(df['clath_sig'], df['successful_dynamin'])
  192. # add more manual labels
  193. df['successful_full'] = df['successful_full']
  194. df['successful_full'][df.pid.isin(config.LABELS_DYNAMIN_NEW['pos'])] = 1
  195. df['successful_full'][df.pid.isin(config.LABELS_DYNAMIN_NEW['neg'])] = 0
  196. df['hotspots'][df.pid.isin(config.LABELS_DYNAMIN_NEW['hotspots'])] = True
  197. return df
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...