Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

main.py 4.0 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
  1. import pandas as pd
  2. import argparse
  3. from sklearn.feature_extraction.text import TfidfVectorizer
  4. from sklearn.metrics import roc_auc_score, average_precision_score, accuracy_score, precision_score, recall_score, \
  5. f1_score
  6. from sklearn.model_selection import train_test_split
  7. from sklearn.ensemble import RandomForestClassifier
  8. import joblib
  9. import dagshub
  10. CLASS_LABEL = 'MachineLearning'
  11. train_df_path = 'data/train.csv'
  12. test_df_path = 'data/test.csv'
  13. def feature_engineering(raw_df):
  14. df = raw_df.copy()
  15. df['CreationDate'] = pd.to_datetime(df['CreationDate'])
  16. df['CreationDate_Epoch'] = df['CreationDate'].astype('int64') // 10 ** 9
  17. df['MachineLearning'] = df['Tags'].str.contains('machine-learning').fillna(False)
  18. df = df.drop(columns=['Id', 'Tags'])
  19. df['Title_Len'] = df.Title.str.len()
  20. df['Body_Len'] = df.Body.str.len()
  21. # Drop the correlated features
  22. df = df.drop(columns=['FavoriteCount'])
  23. df['Text'] = df['Title'].fillna('') + ' ' + df['Body'].fillna('')
  24. return df
  25. def fit_tfidf(train_df, test_df):
  26. tfidf = TfidfVectorizer(max_features=25000)
  27. tfidf.fit(train_df['Text'])
  28. train_tfidf = tfidf.transform(train_df['Text'])
  29. test_tfidf = tfidf.transform(test_df['Text'])
  30. return train_tfidf, test_tfidf, tfidf
  31. def fit_model(train_X, train_y, random_state=42):
  32. clf_tfidf = RandomForestClassifier(random_state=random_state, class_weight='balanced', max_depth=50)
  33. clf_tfidf.fit(train_X, train_y)
  34. return clf_tfidf
  35. def eval_model(clf, X, y):
  36. y_proba = clf.predict_proba(X)[:, 1]
  37. y_pred = clf.predict(X)
  38. return {
  39. 'roc_auc': roc_auc_score(y, y_proba),
  40. 'average_precision': average_precision_score(y, y_proba),
  41. 'accuracy': accuracy_score(y, y_pred),
  42. 'precision': precision_score(y, y_pred),
  43. 'recall': recall_score(y, y_pred),
  44. 'f1': f1_score(y, y_pred),
  45. }
  46. def split(random_state=42):
  47. print('Loading data...')
  48. df = pd.read_csv('data/CrossValidated-Questions.csv')
  49. df[CLASS_LABEL] = df['Tags'].str.contains('machine-learning').fillna(False)
  50. train_df, test_df = train_test_split(df, random_state=random_state, stratify=df[CLASS_LABEL])
  51. print('Saving split data...')
  52. train_df.to_csv(train_df_path)
  53. test_df.to_csv(test_df_path)
  54. def train():
  55. print('Loading data...')
  56. train_df = pd.read_csv(train_df_path)
  57. test_df = pd.read_csv(test_df_path)
  58. print('Engineering features...')
  59. train_df = feature_engineering(train_df)
  60. test_df = feature_engineering(test_df)
  61. with dagshub.dagshub_logger() as logger:
  62. print('Fitting TFIDF...')
  63. train_tfidf, test_tfidf, tfidf = fit_tfidf(train_df, test_df)
  64. print('Saving TFIDF object...')
  65. joblib.dump(tfidf, 'outputs/tfidf.joblib')
  66. logger.log_hyperparams({'tfidf': tfidf.get_params()})
  67. print('Training model...')
  68. train_y = train_df[CLASS_LABEL]
  69. model = fit_model(train_tfidf, train_y)
  70. print('Saving trained model...')
  71. joblib.dump(model, 'outputs/model.joblib')
  72. logger.log_hyperparams(model_class=type(model).__name__)
  73. logger.log_hyperparams({'model': model.get_params()})
  74. print('Evaluating model...')
  75. train_metrics = eval_model(model, train_tfidf, train_y)
  76. print('Train metrics:')
  77. print(train_metrics)
  78. logger.log_metrics({f'train__{k}': v for k,v in train_metrics.items()})
  79. test_metrics = eval_model(model, test_tfidf, test_df[CLASS_LABEL])
  80. print('Test metrics:')
  81. print(test_metrics)
  82. logger.log_metrics({f'test__{k}': v for k,v in test_metrics.items()})
  83. if __name__ == '__main__':
  84. parser = argparse.ArgumentParser()
  85. subparsers = parser.add_subparsers(title='Split or Train step:', dest='step')
  86. subparsers.required = True
  87. split_parser = subparsers.add_parser('split')
  88. split_parser.set_defaults(func=split)
  89. train_parser = subparsers.add_parser('train')
  90. train_parser.set_defaults(func=train)
  91. parser.parse_args().func()
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...