Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

model_selection.py 4.6 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
  1. import sys
  2. import os
  3. import errno
  4. import numpy as np
  5. import time
  6. import pathlib as pl
  7. import pandas as pd
  8. import yaml
  9. from sklearn.preprocessing import LabelEncoder
  10. from sklearn.model_selection import KFold
  11. from sklearn.model_selection import GridSearchCV
  12. from sklearn.neighbors import KNeighborsClassifier
  13. from sklearn import metrics
  14. from sklearn.svm import SVC
  15. import joblib
  16. def mkdir_p(path):
  17. try:
  18. os.makedirs(path)
  19. except OSError as exc: # Python >2.5
  20. if exc.errno == errno.EEXIST and os.path.isdir(path):
  21. pass
  22. else:
  23. raise
  24. def cross_validate_model(X, y, model, model_grid, scorer, ksplits=10, seed=42,**kwargs):
  25. kfold = KFold(n_splits=ksplits, random_state=seed, shuffle=True)
  26. grid = GridSearchCV(estimator=model, param_grid=model_grid, scoring=scorer, cv=kfold, n_jobs=-1, refit='accuracy')
  27. grid_result = grid.fit(X, y)
  28. print("Best %s: %f using %s" % (grid_result.refit, grid_result.best_score_, grid_result.best_params_))
  29. df_scores = pd.DataFrame([], columns=['metric', 'mean_test', 'std_test', 'params', 'rank_test'])
  30. for mtr in scorer:
  31. print(mtr.upper())
  32. means = grid_result.cv_results_['mean_test_' + mtr]
  33. stds = grid_result.cv_results_['std_test_' + mtr]
  34. params = grid_result.cv_results_['params']
  35. ranks = grid_result.cv_results_['rank_test_' + mtr]
  36. scores = zip(means, stds, params, ranks)
  37. for mean, stdev, param, rank in scores:
  38. print("#%d %f (%f) with: %r" % (rank, mean, stdev, param))
  39. # Creating dataframe with the several metrics achieved for the top performer refit (default/decisive) scorer
  40. df_scores = df_scores.append(pd.Series([mtr, means[0], stds[0], params[0], ranks[0]],
  41. index=['metric', 'mean_test', 'std_test', 'params', 'rank_test'],
  42. name=mtr),
  43. ignore_index=True)
  44. return grid_result, df_scores
  45. #TODO
  46. def tune_models(models, grids):
  47. for model, grid in zip(models, grids):
  48. start_cv = time.time()
  49. grid_result, scores = cross_validate_model(X=X_train, y=y_train, model=model, model_grid=grid,
  50. scorer=evaluation_metric)
  51. end_cv = time.time()
  52. joblib.dump(grid_result.best_estimator_, os.path.join(models_folder, type(model).__name__ + '_model.joblib'))
  53. # generate dataframe to be appended to selection_metrics.csv
  54. metrics = pd.DataFrame([[scores.metric[i], scores.mean_test[i], int(end_cv * 1000), scores.rank_test[i]]
  55. for i in range(len(scores))],
  56. columns=['Name', 'Value', 'Timestamp', 'Step'])
  57. if not os.path.exists(os.path.join(metrics_folder, 'selection_metrics.csv')):
  58. metrics.to_csv(os.path.join(metrics_folder, 'selection_metrics.csv'), index=False)
  59. else:
  60. metrics.to_csv(os.path.join(metrics_folder, 'selection_metrics.csv'), index=False, header=False, mode='a')
  61. # save tuned model parameters
  62. with open(params_path, 'a') as yaml_path:
  63. yaml.safe_dump({'model': type(model).__name__}, yaml_path)
  64. yaml.safe_dump(grid_result.best_params_, yaml_path)
  65. return
  66. #TODO
  67. def select_model():
  68. return 'knn_model'
  69. if __name__ == "__main__":
  70. input = sys.argv[1]
  71. #TODO:change output to folder only (also on stage)
  72. output = pl.Path(sys.argv[2])
  73. metrics_folder = pl.Path('metrics')
  74. params_path = os.path.join(output, 'selection_params.yaml')
  75. models_folder = os.path.join(output, 'models')
  76. if not os.path.exists(models_folder):
  77. mkdir_p(models_folder)
  78. if not os.path.exists(metrics_folder):
  79. mkdir_p(metrics_folder)
  80. evaluation_metric = ['accuracy', 'recall', 'precision']
  81. train = joblib.load(input)
  82. y_train = train['label'].copy(deep=True)
  83. X_train = train.drop('label', axis=1).astype(float)
  84. #TODO: iteratively tune_models() with fixed grids for gridsearch
  85. # (or random search with hyperotp see evernote)
  86. ######################################## models and grids definition ##############################################
  87. knn = KNeighborsClassifier()
  88. neighbors = [1, 3, 5, 7, 9, 11, 13]
  89. knn_grid = dict(n_neighbors=neighbors)
  90. c_values = [0.1, 0.3, 0.5, 0.7, 0.9, 1.0, 1.3, 1.5, 1.7, 2.0]
  91. kernel_values = ['linear', 'poly', 'rbf', 'sigmoid']
  92. svc_grid = dict(C=c_values, kernel=kernel_values)
  93. svc = SVC()
  94. clfs = [svc]
  95. clsf_grids = [svc_grid]
  96. tune_models(models=clfs, grids=clsf_grids)
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...