Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

main.py 3.9 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
  1. import dagshub
  2. import argparse
  3. import pandas as pd
  4. from sklearn.feature_extraction.text import TfidfVectorizer
  5. from sklearn.linear_model import SGDClassifier
  6. from sklearn.metrics import roc_auc_score, average_precision_score, accuracy_score, precision_score, recall_score, f1_score
  7. from sklearn.model_selection import train_test_split
  8. import joblib
  9. # Consts
  10. CLASS_LABEL = 'MachineLearning'
  11. train_df_path = 'data/train.csv.zip'
  12. test_df_path = 'data/test.csv.zip'
  13. def feature_engineering(raw_df):
  14. df = raw_df.copy()
  15. df['CreationDate'] = pd.to_datetime(df['CreationDate'])
  16. df['CreationDate_Epoch'] = df['CreationDate'].astype('int64') // 10 ** 9
  17. df = df.drop(columns=['Id', 'Tags'])
  18. df['Title_Len'] = df.Title.str.len()
  19. df['Body_Len'] = df.Body.str.len()
  20. # Drop the correlated features
  21. df = df.drop(columns=['FavoriteCount'])
  22. df['Text'] = df['Title'].fillna('') + ' ' + df['Body'].fillna('')
  23. return df
  24. def fit_tfidf(train_df, test_df):
  25. tfidf = TfidfVectorizer(max_features=25000)
  26. tfidf.fit(train_df['Text'])
  27. train_tfidf = tfidf.transform(train_df['Text'])
  28. test_tfidf = tfidf.transform(test_df['Text'])
  29. return train_tfidf, test_tfidf, tfidf
  30. def fit_model(train_X, train_y, random_state=42):
  31. clf_tfidf = SGDClassifier(loss='modified_huber', random_state=random_state)
  32. clf_tfidf.fit(train_X, train_y)
  33. return clf_tfidf
  34. def eval_model(clf, X, y):
  35. y_proba = clf.predict_proba(X)[:, 1]
  36. y_pred = clf.predict(X)
  37. return {
  38. 'roc_auc': roc_auc_score(y, y_proba),
  39. 'average_precision': average_precision_score(y, y_proba),
  40. 'accuracy': accuracy_score(y, y_pred),
  41. 'precision': precision_score(y, y_pred),
  42. 'recall': recall_score(y, y_pred),
  43. 'f1': f1_score(y, y_pred),
  44. }
  45. def split(random_state=42):
  46. print('Loading data...')
  47. df = pd.read_csv('data/CrossValidated-Questions.csv')
  48. df[CLASS_LABEL] = df['Tags'].str.contains('machine-learning').fillna(False)
  49. train_df, test_df = train_test_split(df, random_state=random_state, stratify=df[CLASS_LABEL])
  50. print('Saving split data...')
  51. train_df.to_csv(train_df_path)
  52. test_df.to_csv(test_df_path)
  53. def train():
  54. print('Loading data...')
  55. train_df = pd.read_csv(train_df_path)
  56. test_df = pd.read_csv(test_df_path)
  57. print('Engineering features...')
  58. train_df = feature_engineering(train_df)
  59. test_df = feature_engineering(test_df)
  60. with dagshub.dagshub_logger() as logger:
  61. print('Fitting TFIDF...')
  62. train_tfidf, test_tfidf, tfidf = fit_tfidf(train_df, test_df)
  63. print('Saving TFIDF object...')
  64. joblib.dump(tfidf, 'outputs/tfidf.joblib')
  65. logger.log_hyperparams({'tfidf': tfidf.get_params()})
  66. print('Training model...')
  67. train_y = train_df[CLASS_LABEL]
  68. model = fit_model(train_tfidf, train_y)
  69. print('Saving trained model...')
  70. joblib.dump(model, 'outputs/model.joblib')
  71. logger.log_hyperparams(model_class=type(model).__name__)
  72. logger.log_hyperparams({'model': model.get_params()})
  73. print('Evaluating model...')
  74. train_metrics = eval_model(model, train_tfidf, train_y)
  75. print('Train metrics:')
  76. print(train_metrics)
  77. logger.log_metrics({f'train__{k}': v for k,v in train_metrics.items()})
  78. test_metrics = eval_model(model, test_tfidf, test_df[CLASS_LABEL])
  79. print('Test metrics:')
  80. print(test_metrics)
  81. logger.log_metrics({f'test__{k}': v for k,v in test_metrics.items()})
  82. if __name__ == '__main__':
  83. parser = argparse.ArgumentParser()
  84. subparsers = parser.add_subparsers(title='Split or Train step:', dest='step')
  85. subparsers.required = True
  86. split_parser = subparsers.add_parser('split')
  87. split_parser.set_defaults(func=split)
  88. train_parser = subparsers.add_parser('train')
  89. train_parser.set_defaults(func=train)
  90. parser.parse_args().func()
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...