Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

01_fit_dnn_vary_hyperparams.py 2.8 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
  1. import os
  2. from os.path import join as oj
  3. import sys
  4. sys.path.append('../src')
  5. import numpy as np
  6. import torch
  7. import scipy
  8. from matplotlib import pyplot as plt
  9. from sklearn import metrics
  10. import data
  11. from config import *
  12. from tqdm import tqdm
  13. import pickle as pkl
  14. import train_reg
  15. from copy import deepcopy
  16. import config
  17. import models
  18. import pandas as pd
  19. import features
  20. import outcomes
  21. import neural_networks
  22. from sklearn.model_selection import KFold
  23. from torch import nn, optim
  24. from torch.nn import functional as F
  25. from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
  26. from sklearn.linear_model import LinearRegression, RidgeCV
  27. from sklearn.svm import SVR
  28. from collections import defaultdict
  29. import pickle as pkl
  30. if __name__ == '__main__':
  31. print("loading data")
  32. dsets = ['clath_aux+gak_a7d2', 'clath_aux+gak', 'clath_aux+gak_a7d2_new', 'clath_aux+gak_new', 'clath_gak', 'clath_aux_dynamin']
  33. splits = ['train', 'test']
  34. #feat_names = ['X_same_length_normalized'] + data.select_final_feats(data.get_feature_names(df))
  35. #['mean_total_displacement', 'mean_square_displacement', 'lifetime']
  36. meta = ['cell_num', 'Y_sig_mean', 'Y_sig_mean_normalized', 'y_consec_thresh']
  37. for length in [40, 100, 200]:
  38. for padding in ['front', 'end']:
  39. dfs, feat_names = data.load_dfs_for_lstm(dsets=dsets,
  40. splits=splits,
  41. meta=meta,
  42. length=length,
  43. padding=padding)
  44. df_full = pd.concat([dfs[(k, s)]
  45. for (k, s) in dfs
  46. if s == 'train'])[feat_names + meta]
  47. np.random.seed(42)
  48. checkpoint_fname = f'../models/dnn_full_long_normalized_across_track_1_feat_dynamin_{length}_{padding}_tuning.pkl'
  49. valid_cells = ['A7D2/1',
  50. 'CLTA-TagRFP EGFP-Aux1 EGFP-GAK F6/1',
  51. 'CLTA-TagRFP EGFP-GAK A8/1',
  52. 'EGFP-GAK F6/1',
  53. '488-1.5mW 561-1.5mW 647-1.5mW Exp100ms Int1.5s_4_Pos0/1_1.5s',
  54. '488-1.5mW 561-1.5mW 647-1.5mW Exp100ms Int1.5s_4_Pos1/1_1.5s',
  55. '488-1.5mW 561-1.5mW 647-1.5mW Exp100ms Int1.5s_4_Pos2/1_1.5s']
  56. valid = df_full['cell_num'].isin(valid_cells)
  57. df_full_train = df_full[~valid]
  58. dnn = neural_networks.neural_net_sklearn(D_in=length, H=20, p=0, arch='lstm', epochs=200)
  59. dnn.fit(df_full_train[feat_names[:1]], df_full_train['Y_sig_mean_normalized'].values, verbose=True, checkpoint_fname=checkpoint_fname)
  60. pkl.dump({'model_state_dict': dnn.model.state_dict()}, open(checkpoint_fname, 'wb'))
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...