Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

predict_current_BGL.py 29 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
  1. # To add a new cell, type '# %%'
  2. # To add a new markdown cell, type '# %% [markdown]'
  3. # %% [markdown]
  4. # reseach question: i want to predict the current BGL, every 5 min, without using any past bgl informations.
  5. #
  6. # variables: only the main ones 'glucose','basal', 'CHO', 'insulin'
  7. # %% [markdown]
  8. #
  9. # %%
  10. #loading imports ---------------
  11. import GlucoNet_Loading
  12. import numpy as np
  13. import pandas as pd
  14. import os
  15. #processing imports ------------
  16. from Proc_func import col_to_check, checkCarb, dummyCarbs, create_samples_V2, extract_data, get_valid_df, col_to_check
  17. from datetime import timedelta, datetime
  18. #modelling imports --------------
  19. from sklearn.linear_model import LinearRegression
  20. from sklearn.metrics import explained_variance_score,max_error,mean_absolute_error,mean_squared_error,r2_score
  21. # %% [markdown]
  22. # Loading Data.
  23. # In the cell below are created 4 dictionaries. Each key value pair is constituted by
  24. # key = id number of patient
  25. # value = associated pandas dataframe
  26. # There are 12 patients in total.
  27. # The 2 dictionaries all_df_train_stage1 and all_df_test_stage1 contains 6 training and 6 testing dataframes respectively for six patients.
  28. # The 2 dictionaries all_df_train_stage2 and all_df_test_stage2 contains 6 training and 6 testing dataframes respectively for the ramaining six patients.
  29. #
  30. # path = path to the folders containing the data to load. The loading process takes care of the correct transformation from xml to pandas df.
  31. # %%
  32. path = str(os.getcwd()) #+ '/datasets/' ATTENTION: currently the datasets folder must be in the samen path of the GlucoNet_Loading.py file
  33. print(path)
  34. all_df_train_stage1 = GlucoNet_Loading.parse_directory(path,'OhioT1DM-training', sys = 'mac')
  35. all_df_test_stage1 = GlucoNet_Loading.parse_directory(path,'OhioT1DM-testing', sys = 'mac')
  36. all_df_train_stage2 = GlucoNet_Loading.parse_directory(path,'OhioT1DM-2-training', sys = 'mac')
  37. all_df_test_stage2 = GlucoNet_Loading.parse_directory(path,'OhioT1DM-2-testing', sys = 'mac')
  38. # %% [markdown]
  39. # Variable validation.
  40. #
  41. # %%
  42. data = all_df_train_stage1.get('559') #pid number must be string
  43. data.columns
  44. # %%
  45. var_used_4_current_hyp = ["datetime",'glucose','basal',
  46. 'CHO', 'insulin','basis_heart_rate', 'basis_gsr', 'basis_skin_temperature',
  47. 'basis_air_temperature', 'basis_steps']
  48. pid_to_remove1, pid_to_remove2 = col_to_check(var_used_4_current_hyp, dict1 = all_df_train_stage1, dict2 = all_df_test_stage1, dict3 = all_df_train_stage2, dict4 = all_df_test_stage2)
  49. valid_pid_stage1, valid_pid_stage2 = get_valid_df(pid_to_remove1 = pid_to_remove1, pid_to_remove2 = pid_to_remove2)
  50. # %%
  51. data = data[var_used_4_current_hyp]
  52. data
  53. # %%
  54. def resample(data, freq): #TODO: new rule - consider the imputation logic as if i must try different resampling
  55. """
  56. :param data: dataframe
  57. :param freq: sampling frequency
  58. :return: resampled data between the the first day at 00:00:00 and the last day at 23:60-freq:00 at freq sample frequency
  59. sum impute 0 when missing value
  60. mean impute nan when missing value
  61. """
  62. start = data.datetime.iloc[0].strftime('%Y-%m-%d') + " 00:00:00"
  63. end = datetime.strptime(data.datetime.iloc[-1].strftime('%Y-%m-%d'), "%Y-%m-%d") + timedelta(days=1) - timedelta(
  64. minutes=freq)
  65. index = pd.period_range(start=start,
  66. end=end,
  67. freq=str(freq) + 'min').to_timestamp()
  68. data = data.resample(str(freq) + 'min', on="datetime").agg({'glucose': np.mean,'basal': np.mean, 'CHO': np.sum,'insulin': np.sum, 'basis_heart_rate': np.mean, 'basis_gsr': np.mean, 'basis_skin_temperature': np.mean,
  69. 'basis_air_temperature': np.mean, 'basis_steps': np.sum})
  70. data = data.reindex(index=index)
  71. data = data.reset_index()
  72. data = data.rename(columns={"index": "datetime"})
  73. return data
  74. data_resampled = resample(data, 5)
  75. # %%
  76. def fill_na(df):
  77. df = df.copy(deep=True)
  78. return df
  79. data_filled = fill_na(data_resampled)
  80. # %%
  81. def data_interpolation(df,method,order,limit):
  82. """
  83. limit value must be present in order to make all value of glucose of positive sign
  84. """
  85. df = df.copy(deep=True)
  86. df["glucose"].interpolate(method = "polynomial", order = 1, inplace = True, limit = 4)
  87. df["basis_gsr"].interpolate(method = "polynomial", order = 1, inplace = True, limit = 4)
  88. df["basis_heart_rate"].interpolate(method = "polynomial", order = 1, inplace = True, limit = 4)
  89. df["basis_skin_temperature"].interpolate(method = "polynomial", order = 1, inplace = True, limit = 4)
  90. return df
  91. data_iterpolated = data_interpolation(data_filled, method = "polynomial", order = 1, limit = 4)
  92. # %% [markdown]
  93. # Feature Engineering.
  94. # This is a substantial part. As an example, i'll create a new feature using a function, mealZone.
  95. # mealZone, and eventually all the other feature eng. steps, must be called by the feature_eng function, that enables to apply the same operations to all datasets automatically.
  96. # %%
  97. def mealZone2(df, before, after ):
  98. """
  99. create a new column mealZone with 1 if the observations falls 50 min before or 30 min after a meal(assuming that the resampling is every 5 minutes).
  100. this numbers can be generalized for a mor flezible function:
  101. in the np.linspace line, i-n(8 in this case) indicate the periods before a meal; i+q(6 in this case) indicate the number of periods after a meal
  102. it is interesting to try n=0 to explicit the fact that for a window after a meal the glucose is being processed
  103. df = pandas dataframe object
  104. before = int = how many periods before cho timestamp to consider mealzone
  105. after = int = how many periods after cho timestamp to consider mealzone
  106. """
  107. df = df.copy(deep=True)
  108. mealZone = dummyCarbs(df).values
  109. mealIndex = np.nonzero(mealZone)[0]
  110. extendedMealIndex = []
  111. for i in mealIndex:
  112. to_append = np.linspace(i-before,i+after,after+before+1,dtype = int)
  113. extendedMealIndex.append(to_append)
  114. okExtendedIndex = []
  115. for sublist in extendedMealIndex:
  116. for element in sublist:
  117. okExtendedIndex.append(element)
  118. mealZone[okExtendedIndex] = 1
  119. df["mealZone" + str(before) + '-' +str(after)] = mealZone
  120. return df
  121. def feature_eng(df, mealzone = False):
  122. df = df.copy(deep=True)
  123. df = mealZone2(df, 0, 16)
  124. #df = mealZone2(df, -8, 24)# TODO: fix bug
  125. df['hour'] = df['datetime'].dt.hour
  126. df['minute'] = df['datetime'].dt.minute
  127. return df
  128. data_feature_added = feature_eng(data_iterpolated, mealzone = True)
  129. # %% [markdown]
  130. # Additional manipulation can be tested here and added in the processing function. One additional manipulation that is nearly always present is selecting a subset of the variables or the transformation to categorical y.
  131. # %%
  132. def additional_manipulation(df):
  133. df = df.copy(deep=True)
  134. df['basal'].fillna( method='ffill', inplace = True)
  135. return df
  136. data_manipulated = additional_manipulation(data_feature_added)
  137. data_manipulated.columns
  138. # %% [markdown]
  139. # Sample creation.
  140. # whith the function create_samples_V2, final training samples are created using a window approach.
  141. # Since the function is always the same, it is not reported here but imported
  142. #
  143. # TODO: insert link detailing the window approach
  144. # %%
  145. data_sampled = create_samples_V2(data_manipulated,number_lags = 12,colonne_da_laggare=[],colonna_Y='glucose',pred_horizon=0)
  146. data_sampled.dropna(inplace = True)
  147. data_sampled.drop('glucose_t', axis = 1, inplace = True)
  148. # %% [markdown]
  149. # This functions simply splits the data in X and y. It works for training and testing data as well, in spite of the name.
  150. # %%
  151. x, y = extract_data(data_sampled, 0)
  152. x
  153. # %% [markdown]
  154. # Since the sample creation using the window approach generates new features, in the "final_x_manipulations" step are aggregated all the necessary operations that results in the final structure for the features's dataset ( the X_train/test structure)
  155. # %%
  156. def final_x_manipulation(df):
  157. pass
  158. return df
  159. x = final_x_manipulation(x)
  160. x.columns
  161. # %% [markdown]
  162. # Processing function:
  163. # here are reported all the functions detailed before in order to apply the same process to all training and test data.
  164. # %%
  165. def processing(data, vars):
  166. """
  167. vars = list of vars of the current hypothesis
  168. """
  169. data = data[vars]
  170. data_resampled = resample(data, 5)
  171. data_filled = fill_na(data_resampled)
  172. data_iterpolated = data_interpolation(data_filled, method = "polynomial", order = 1, limit = 4)
  173. data_feature_added = feature_eng(data_iterpolated, mealzone = True)
  174. data_manipulated = additional_manipulation(data_feature_added)
  175. data_sampled = create_samples_V2(data_manipulated,number_lags = 0,colonne_da_laggare=['basal', 'CHO', 'insulin', 'basis_heart_rate',
  176. 'basis_gsr', 'basis_skin_temperature', 'basis_air_temperature',
  177. 'basis_steps', 'mealZone0-16', 'hour', 'minute'],colonna_Y='glucose',pred_horizon=0)
  178. data_sampled.drop('glucose_t', axis = 1, inplace = True)
  179. data_sampled.dropna(inplace = True)
  180. x, y = extract_data(data_sampled, 0)
  181. x = final_x_manipulation(x)
  182. return x, y
  183. #if it is printed "True True" the processing() function works as intended
  184. xp, yp = processing(data, var_used_4_current_hyp)
  185. print(xp.equals(x),yp.equals(y))
  186. # %% [markdown]
  187. # Modelling.
  188. # %% [markdown]
  189. # accuracy measure is a function that has to take as arguments ytest and ypred and has to return a dataframe.
  190. # This dataframe must have the different accuracy measures in the columns and a single row containing the results for each measure
  191. # %%
  192. def accuracy_measure(ytest, predictions): #TODO: add cod patient to index name
  193. columns_names = ['evs','me','mae','rmse','r2']
  194. metrics_values = [explained_variance_score(ytest, predictions),
  195. max_error(ytest, predictions),
  196. mean_absolute_error(ytest, predictions),
  197. mean_squared_error(ytest, predictions, squared = False),
  198. r2_score(ytest, predictions)]
  199. acc_measure_df = pd.DataFrame(columns = columns_names)
  200. acc_measure_df.loc[1] = metrics_values #maybe i can parametrize the loc value?
  201. return acc_measure_df
  202. # %% [markdown]
  203. # Find a model: here is the space for experimenting and finding the best model to then pass into the modelling function
  204. # %%
  205. #scikitlearn 0.23.2 is needed - i also have to install pycaret and than all other packages in the enviroment
  206. #exp_reg001 = setup(data = caret_data, target = 'target',fold_shuffle=True, session_id=2, imputation_type='iterative')
  207. #best = compare_models(exclude = ['ransac'])
  208. # %%
  209. # %% [markdown]
  210. # in the modelling function is specified the model and all the steps that generate the final ypred values.
  211. # The inputs should be the patient's id and xtrain, xtest , ytrain, ytest.
  212. # Are returned two objects:
  213. # res_y_ypred which is a dataframe containing all ytest and ypred values, used in later operations
  214. # acc_measure_df which is the dataframe containing the results. again, one column for every measure and one row containing all the values.
  215. # %%
  216. def modelling (xtrain, xtest , ytrain, ytest, cod_patient):#attenzione all'ordine degli argomenti
  217. model = LinearRegression()
  218. model = model.fit(xtrain,ytrain)
  219. predictions = model.predict(xtest)
  220. acc_measure_df = accuracy_measure(ytest, predictions) # TODO: add index name as pid
  221. res_y_ypred = pd.DataFrame({'ytest':ytest, 'pred':predictions})
  222. return res_y_ypred, acc_measure_df
  223. # %%
  224. data = all_df_train_stage1.get('559')
  225. xtrain, ytrain = processing(data, var_used_4_current_hyp)
  226. data = all_df_test_stage1.get('559')
  227. xtest, ytest = processing(data, var_used_4_current_hyp)
  228. res_y_ypred, acc_measure_df = modelling( xtrain, xtest, ytrain, ytest, '559')
  229. # %% [markdown]
  230. # Getting results.
  231. # it is possible in the cells above to test the procedure with 1 patiece. Below are presented the funtion for automatically esperimenting on all patients.
  232. #
  233. # With get_single_results_stage1/2(pid) is possible to get results fast for a single pid specified directly in the funtion.
  234. #
  235. # Instead of running get_single_results_stage1/2(pid), it is possible to run recursive_get_single_result() just one time, and get all the results.
  236. #
  237. # The function recursive_conglobate_get_single_result is used to see how well the models generalise to unseen patients. on the total of n valid pids, the training is done on the training and testing data of n-1 patients. Then it can be decided to include or not in the big training dataset the remaining training dataset of the patient to test. This function cycles trough all n patience.
  238. # %%
  239. def get_single_results_stage1(pid, valid_vars = var_used_4_current_hyp):
  240. pid = str(pid)
  241. data = all_df_train_stage1.get(pid)
  242. xtrain, ytrain = processing(data, valid_vars)
  243. data = all_df_test_stage1.get(pid)
  244. xtest, ytest = processing(data, valid_vars)
  245. res_y_ypred, acc_measure_df = modelling(xtrain, xtest, ytrain, ytest, pid)
  246. return res_y_ypred, acc_measure_df
  247. def get_single_results_stage2(pid, valid_vars = var_used_4_current_hyp):
  248. pid = str(pid)
  249. data = all_df_train_stage2.get(pid)
  250. xtrain, ytrain = processing(data, valid_vars)
  251. data = all_df_test_stage2.get(pid)
  252. xtest, ytest = processing(data, valid_vars)
  253. res_y_ypred, acc_measure_df = modelling(xtrain, xtest, ytrain, ytest, pid )
  254. return res_y_ypred, acc_measure_df
  255. # %%
  256. res_y_ypred, acc_measure_df = get_single_results_stage1(pid = '559' , valid_vars = var_used_4_current_hyp)
  257. acc_measure_df
  258. # %%
  259. def recursive_get_single_result(valid_pid_stage1 = valid_pid_stage1, valid_pid_stage2 = valid_pid_stage2, valid_vars = var_used_4_current_hyp):
  260. """
  261. get separate result from each end every patient.
  262. for each pid, the training is done on the relative train set
  263. and the testing on the test set
  264. """
  265. res_y_ypred_tot = pd.DataFrame(columns = ['ytest', 'pred'])
  266. acc_measure_df_tot = pd.DataFrame()
  267. for i in valid_pid_stage1:
  268. res_y_ypred, acc_measure_df = get_single_results_stage1(pid = str(i), valid_vars = valid_vars)
  269. res_y_ypred_tot = res_y_ypred_tot.append(res_y_ypred)
  270. acc_measure_df_tot = pd.concat([acc_measure_df_tot, acc_measure_df], axis = 0)
  271. for i in valid_pid_stage2:
  272. res_y_ypred, acc_measure_df = get_single_results_stage2(pid = str(i), valid_vars = valid_vars)
  273. res_y_ypred_tot = res_y_ypred_tot.append(res_y_ypred)
  274. acc_measure_df_tot = pd.concat([acc_measure_df_tot, acc_measure_df], axis = 0)
  275. acc_measure_df_tot.index = valid_pid_stage1 + valid_pid_stage2
  276. return res_y_ypred_tot, acc_measure_df_tot
  277. # %%
  278. res_y_ypred_tot, acc_measure_df_tot = recursive_get_single_result(valid_pid_stage1 = valid_pid_stage1, valid_pid_stage2 = valid_pid_stage2, valid_vars = var_used_4_current_hyp)
  279. acc_measure_df_tot
  280. # %%
  281. def recursive_conglobate_get_single_result(include_in_tr = True, valid_pid_stage1 = valid_pid_stage1, valid_pid_stage2 = valid_pid_stage2, valid_vars = var_used_4_current_hyp): #TODO: fix warning
  282. """
  283. for pid n, train the algorithm on all training and test data of the other n - 1.
  284. if include in training = True, the training data is n is used for training, otherwise
  285. all train and test data are used for testing
  286. result are given separately for each and every pid
  287. """
  288. res_y_ypred_tot = pd.DataFrame(columns = ['ytest', 'pred'])
  289. acc_measure_df_tot = pd.DataFrame()
  290. sogg = valid_pid_stage1 + valid_pid_stage2
  291. for num in sogg:
  292. group = [x for x in sogg if x != str(num)]
  293. # num = left out subject
  294. xtr = pd.DataFrame()
  295. ytr = pd.Series()
  296. for i in group:
  297. if i in valid_pid_stage1:
  298. data = all_df_train_stage1.get(i)
  299. xtrain, ytrain = processing(data , valid_vars)
  300. xtr = xtr.append(xtrain)
  301. ytr = ytr.append(ytrain, ignore_index=True)
  302. data = all_df_test_stage1.get(i)
  303. xtrain, ytrain = processing(data , valid_vars)
  304. xtr = xtr.append(xtrain)
  305. ytr = ytr.append(ytrain, ignore_index=True)
  306. else:
  307. data = all_df_train_stage2.get(i)
  308. xtrain, ytrain = processing(data , valid_vars)
  309. xtr = xtr.append(xtrain)
  310. ytr = ytr.append(ytrain, ignore_index=True)
  311. data = all_df_test_stage2.get(i)
  312. xtrain, ytrain = processing(data , valid_vars)
  313. xtr = xtr.append(xtrain)
  314. ytr = ytr.append(ytrain, ignore_index=True)
  315. if include_in_tr == False:
  316. if num in valid_pid_stage1:
  317. xte = pd.DataFrame()
  318. yte = pd.Series()
  319. data = all_df_train_stage1.get(num)
  320. xtrain, ytrain = processing(data , valid_vars)
  321. xte = xte.append(xtrain)
  322. yte = yte.append(ytrain, ignore_index=True)
  323. data = all_df_test_stage1.get(num)
  324. xtrain, ytrain = processing(data , valid_vars)
  325. xte = xte.append(xtrain)
  326. yte = yte.append(ytrain, ignore_index=True)
  327. else:
  328. xte = pd.DataFrame()
  329. yte = pd.Series()
  330. data = all_df_train_stage2.get(num)
  331. xtrain, ytrain = processing(data , valid_vars)
  332. xte = xte.append(xtrain)
  333. yte = yte.append(ytrain, ignore_index=True)
  334. data = all_df_test_stage2.get(num)
  335. xtrain, ytrain = processing(data , valid_vars)
  336. xte = xte.append(xtrain)
  337. yte = yte.append(ytrain, ignore_index=True)
  338. else:
  339. if num in valid_pid_stage1:
  340. data = all_df_train_stage1.get(num)
  341. xtrain, ytrain = processing(data , valid_vars)
  342. xtr = xtr.append(xtrain)
  343. ytr = ytr.append(ytrain, ignore_index=True)
  344. data = all_df_test_stage1.get(num)
  345. xte, yte = processing(data , valid_vars)
  346. else:
  347. data = all_df_train_stage2.get(num)
  348. xtrain, ytrain = processing(data , valid_vars)
  349. xtr = xtr.append(xtrain)
  350. ytr = ytr.append(ytrain, ignore_index=True)
  351. data = all_df_test_stage2.get(num)
  352. xte, yte = processing(data , valid_vars)
  353. res_y_ypred, acc_measure_df = modelling(xtr, xte, ytr, yte, str(num))
  354. res_y_ypred_tot = res_y_ypred_tot.append(res_y_ypred)
  355. acc_measure_df_tot = pd.concat([acc_measure_df_tot, acc_measure_df], axis = 0)
  356. #acc_measure_df_tot.columns = sogg
  357. acc_measure_df_tot.index = sogg
  358. #for some reason 0 are imputed instead of nans, correction:
  359. #acc_measure_df_tot = acc_measure_df_tot.fillna(0)
  360. return res_y_ypred_tot, acc_measure_df_tot
  361. # %%
  362. #experiment setup
  363. risultati = []
  364. for i in [2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30]:
  365. def processing(data, vars):
  366. """
  367. vars = list of vars
  368. """
  369. data = data[vars]
  370. data_resampled = resample(data, 5)
  371. data_filled = fill_na(data_resampled)
  372. data_iterpolated = data_interpolation(data_filled, method = "polynomial", order = 1, limit = 4)
  373. data_feature_added = feature_eng(data_iterpolated, mealzone = True)
  374. data_manipulated = additional_manipulation(data_feature_added)
  375. data_sampled = create_samples_V2(data_manipulated,number_lags = i,colonne_da_laggare=['basal', 'CHO', 'insulin', 'basis_heart_rate',
  376. 'basis_gsr', 'basis_skin_temperature', 'basis_air_temperature',
  377. 'basis_steps', 'mealZone0-16', 'hour', 'minute'],colonna_Y='glucose',pred_horizon=0)
  378. data_sampled.drop('glucose_t', axis = 1, inplace = True)
  379. data_sampled.dropna(inplace = True)
  380. x, y = extract_data(data_sampled, 0)
  381. x = final_x_manipulation(x)
  382. return x, y
  383. res_y_ypred_tot2, acc_measure_df_tot2 = recursive_conglobate_get_single_result(include_in_tr = False, valid_pid_stage1 = valid_pid_stage1, valid_pid_stage2 = valid_pid_stage2)
  384. risultati.append(acc_measure_df_tot2['rmse'].mean())
  385. # %%
  386. # %% [markdown]
  387. #experiment: given the variables listed below, see what is the influence of lagging all variables of n periods.
  388. # vars: 'basal_t', 'CHO_t', 'insulin_t', 'basis_heart_rate_t', 'basis_gsr_t',
  389. # 'basis_skin_temperature_t', 'basis_air_temperature_t', 'basis_steps_t',
  390. # 'mealZone0-16_t', 'hour_t', 'minute_t'
  391. # error measure acc_measure_df_tot2['rmse'].mean() (include_in_tr = False)
  392. # goal: can i get ~40 rmse?
  393. # n = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30]
  394. # err = [62.073,62.033, 61.970,61.894,61.821,61.751,61.697,61.643,61.610,61.592,61.599,61.622,61.652,61.692,61.728,61.769,61.790,61.90588386098091,61.942797708253664,62.0236383456456,62.09158939797729,62.16508535646403,62.23932065366535,62.29116025720278,62.31366181763229,62.3977499808425,62.45356290217608,62.53219495620968,62.56722356450242,62.60939553293209,62.64062756203514]
  395. # result: for this particular set of variables, the best lag is around 10 but the improvement is small
  396. # %%
  397. #experiment setup
  398. def processing(data, vars):
  399. """
  400. vars = list of vars
  401. """
  402. data = data[vars]
  403. data_resampled = resample(data, 5)
  404. data_filled = fill_na(data_resampled)
  405. data_iterpolated = data_interpolation(data_filled, method = "polynomial", order = 1, limit = 4)
  406. data_feature_added = feature_eng(data_iterpolated, mealzone = True)
  407. data_manipulated = additional_manipulation(data_feature_added)
  408. data_sampled = create_samples_V2(data_manipulated.loc[:,('basis_gsr','glucose','datetime')],number_lags = 0,colonne_da_laggare=[],colonna_Y='glucose',pred_horizon=0)
  409. data_sampled.drop('glucose_t', axis = 1, inplace = True)
  410. data_sampled.dropna(inplace = True)
  411. x, y = extract_data(data_sampled, 0)
  412. x = final_x_manipulation(x)
  413. return x, y
  414. res_y_ypred_tot2, acc_measure_df_tot2 = recursive_conglobate_get_single_result(include_in_tr = False, valid_pid_stage1 = valid_pid_stage1, valid_pid_stage2 = valid_pid_stage2)
  415. acc_measure_df_tot2['rmse'].mean()
  416. # %% [markdown]
  417. #experiment: given the variables listed below, see what is the best subset.
  418. # here there is a focus on logic-driven hypothesis
  419. # vars: 'basal', 'CHO', 'insulin', 'basis_heart_rate', 'basis_gsr',
  420. # 'basis_skin_temperature', 'basis_air_temperature', 'basis_steps',
  421. # 'mealZone0-16_t', 'hour', 'minute'
  422. # error measure acc_measure_df_tot2['rmse'].mean() (include_in_tr = False)
  423. # goal: can i get ~40 rmse?
  424. # used = ["hour minute", "basis_heart_rate", "basis_gsr"]
  425. # result = [61.716, 62.180, 61.765]
  426. # %%
  427. #from http://www.science.smith.edu/~jcrouser/SDS293/labs/lab8-py.html
  428. #maybe there is other cool stuff there
  429. import itertools
  430. import time
  431. def processing(data, vars, feature_set):
  432. """
  433. vars = list of vars of the current hypothesis
  434. """
  435. data = data[vars]
  436. data_resampled = resample(data, 5)
  437. data_filled = fill_na(data_resampled)
  438. data_iterpolated = data_interpolation(data_filled, method = "polynomial", order = 1, limit = 4)
  439. data_feature_added = feature_eng(data_iterpolated, mealzone = True)
  440. data_manipulated = additional_manipulation(data_feature_added)
  441. data_sampled = create_samples_V2(data_manipulated.loc[:,('datetime','glucose')+tuple(feature_set)],number_lags = 0,colonne_da_laggare=[],colonna_Y='glucose',pred_horizon=0)
  442. data_sampled.drop('glucose_t', axis = 1, inplace = True)
  443. data_sampled.dropna(inplace = True)
  444. x, y = extract_data(data_sampled, 0)
  445. x = final_x_manipulation(x)
  446. return x, y
  447. def recursive_conglobate_get_single_result_subset_selection(feature_set, include_in_tr = True, valid_pid_stage1 = valid_pid_stage1, valid_pid_stage2 = valid_pid_stage2, valid_vars = var_used_4_current_hyp): #TODO: fix warning
  448. """
  449. for pid n, train the algorithm on all training and test data of the other n - 1.
  450. if include in training = True, the training data is n is used for training, otherwise
  451. all train and test data are used for testing
  452. result are given separately for each and every pid
  453. """
  454. res_y_ypred_tot = pd.DataFrame(columns = ['ytest', 'pred'])
  455. acc_measure_df_tot = pd.DataFrame()
  456. sogg = valid_pid_stage1 + valid_pid_stage2
  457. for num in sogg:
  458. group = [x for x in sogg if x != str(num)]
  459. # num = left out subject
  460. xtr = pd.DataFrame()
  461. ytr = pd.Series()
  462. for i in group:
  463. if i in valid_pid_stage1:
  464. data = all_df_train_stage1.get(i)
  465. xtrain, ytrain = processing(data , valid_vars, feature_set)
  466. xtr = xtr.append(xtrain)
  467. ytr = ytr.append(ytrain, ignore_index=True)
  468. data = all_df_test_stage1.get(i)
  469. xtrain, ytrain = processing(data , valid_vars, feature_set)
  470. xtr = xtr.append(xtrain)
  471. ytr = ytr.append(ytrain, ignore_index=True)
  472. else:
  473. data = all_df_train_stage2.get(i)
  474. xtrain, ytrain = processing(data , valid_vars, feature_set)
  475. xtr = xtr.append(xtrain)
  476. ytr = ytr.append(ytrain, ignore_index=True)
  477. data = all_df_test_stage2.get(i)
  478. xtrain, ytrain = processing(data , valid_vars, feature_set)
  479. xtr = xtr.append(xtrain)
  480. ytr = ytr.append(ytrain, ignore_index=True)
  481. if include_in_tr == False:
  482. if num in valid_pid_stage1:
  483. xte = pd.DataFrame()
  484. yte = pd.Series()
  485. data = all_df_train_stage1.get(num)
  486. xtrain, ytrain = processing(data , valid_vars, feature_set)
  487. xte = xte.append(xtrain)
  488. yte = yte.append(ytrain, ignore_index=True)
  489. data = all_df_test_stage1.get(num)
  490. xtrain, ytrain = processing(data , valid_vars, feature_set)
  491. xte = xte.append(xtrain)
  492. yte = yte.append(ytrain, ignore_index=True)
  493. else:
  494. xte = pd.DataFrame()
  495. yte = pd.Series()
  496. data = all_df_train_stage2.get(num)
  497. xtrain, ytrain = processing(data , valid_vars, feature_set)
  498. xte = xte.append(xtrain)
  499. yte = yte.append(ytrain, ignore_index=True)
  500. data = all_df_test_stage2.get(num)
  501. xtrain, ytrain = processing(data , valid_vars, feature_set)
  502. xte = xte.append(xtrain)
  503. yte = yte.append(ytrain, ignore_index=True)
  504. else:
  505. if num in valid_pid_stage1:
  506. data = all_df_train_stage1.get(num)
  507. xtrain, ytrain = processing(data , valid_vars, feature_set)
  508. xtr = xtr.append(xtrain)
  509. ytr = ytr.append(ytrain, ignore_index=True)
  510. data = all_df_test_stage1.get(num)
  511. xte, yte = processing(data , valid_vars, feature_set)
  512. else:
  513. data = all_df_train_stage2.get(num)
  514. xtrain, ytrain = processing(data , valid_vars, feature_set)
  515. xtr = xtr.append(xtrain)
  516. ytr = ytr.append(ytrain, ignore_index=True)
  517. data = all_df_test_stage2.get(num)
  518. xte, yte = processing(data , valid_vars, feature_set)
  519. res_y_ypred, acc_measure_df = modelling(xtr, xte, ytr, yte, str(num))
  520. res_y_ypred_tot = res_y_ypred_tot.append(res_y_ypred)
  521. acc_measure_df_tot = pd.concat([acc_measure_df_tot, acc_measure_df], axis = 0)
  522. #acc_measure_df_tot.columns = sogg
  523. acc_measure_df_tot.index = sogg
  524. #for some reason 0 are imputed instead of nans, correction:
  525. #acc_measure_df_tot = acc_measure_df_tot.fillna(0)
  526. return res_y_ypred_tot, acc_measure_df_tot
  527. def processSubset(feature_set):
  528. __, acc_measure_df_tot2 = recursive_conglobate_get_single_result_subset_selection(feature_set = feature_set , include_in_tr = False, valid_pid_stage1 = valid_pid_stage1, valid_pid_stage2 = valid_pid_stage2)
  529. return {"vars":feature_set, "error":acc_measure_df_tot2['rmse'].mean()}
  530. #columns for search = data_manipulated.columns - 'glucose' and 'datetime'
  531. def getBest(k):
  532. tic = time.time()
  533. results = []
  534. columns_to_search = ['basal', 'CHO', 'insulin', 'basis_heart_rate',
  535. 'basis_gsr', 'basis_skin_temperature', 'basis_air_temperature',
  536. 'basis_steps', 'mealZone0-16', 'hour', 'minute']
  537. for combo in itertools.combinations(columns_to_search, k):
  538. results.append(processSubset(combo))
  539. # Wrap everything up in a nice dataframe
  540. models = pd.DataFrame(results)
  541. # Choose the model with the highest RSS
  542. best_model = models.loc[models['error'].argmin()]
  543. toc = time.time()
  544. print("Processed", models.shape[0], "models on", k, "predictors in", (toc-tic), "seconds.")
  545. # Return the best model, along with some other useful information about the model
  546. return best_model, models
  547. best, df_result = getBest(5)
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...