Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

evaluate.py 2.7 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
  1. import joblib
  2. import json
  3. import pandas as pd
  4. from pathlib import Path
  5. from sklearn.metrics import confusion_matrix, f1_score
  6. from typing import Text, Dict
  7. import hydra
  8. from omegaconf.omegaconf import OmegaConf
  9. from stages.visualize import plot_confusion_matrix
  10. def convert_to_labels(indexes, labels):
  11. return [labels[i] for i in indexes]
  12. def write_confusion_matrix_data(y_true, predicted, labels, filename):
  13. assert len(predicted) == len(y_true)
  14. predicted_labels = convert_to_labels(predicted, labels)
  15. true_labels = convert_to_labels(y_true, labels)
  16. cf = pd.DataFrame(list(zip(true_labels, predicted_labels)), columns=["y_true", "predicted"])
  17. cf.to_csv(filename, index=False)
  18. @hydra.main(config_path="../configs", config_name="configs")
  19. def evaluate_model(cfg: Text) -> None:
  20. OmegaConf.to_yaml(cfg, resolve=True)
  21. """Evaluate model.
  22. Args:
  23. config_path {Text}: path to config
  24. """
  25. #logger = get_logger('EVALUATE', log_level=config['base']['log_level'])
  26. #logger.info('Load model')
  27. model_path = cfg.model.model_path
  28. model = joblib.load(model_path)
  29. #logger.info('Load test dataset')
  30. test_df = pd.read_csv(cfg.train.features_test_path)
  31. #logger.info('Evaluate (build report)')
  32. target_column=cfg.train.target_column
  33. y_test = test_df.loc[:, target_column].values
  34. X_test = test_df.drop(target_column, axis=1).values
  35. prediction = model.predict(X_test)
  36. f1 = f1_score(y_true=y_test, y_pred=prediction, average='macro')
  37. labels = ['Normal', 'Suspect','Pathological']
  38. cm = confusion_matrix(y_test, prediction)
  39. report = {
  40. 'f1': f1,
  41. 'cm': cm,
  42. 'actual': y_test,
  43. 'predicted': prediction
  44. }
  45. #logger.info('Save metrics')
  46. # save f1 metrics file
  47. reports_folder = Path(cfg.train.reports_dir)
  48. metrics_path = reports_folder / cfg.train.metrics_file
  49. json.dump(
  50. obj={'f1_score': report['f1']},
  51. fp=open(metrics_path, 'w')
  52. )
  53. #logger.info(f'F1 metrics file saved to : {metrics_path}')
  54. #logger.info('Save confusion matrix')
  55. # save confusion_matrix.png
  56. plt = plot_confusion_matrix(cm=report['cm'],
  57. target_names=labels,
  58. normalize=False)
  59. confusion_matrix_png_path = reports_folder / cfg.train.confusion_matrix_image
  60. plt.savefig(confusion_matrix_png_path)
  61. #logger.info(f'Confusion matrix saved to : {confusion_matrix_png_path}')
  62. confusion_matrix_data_path = reports_folder / cfg.train.confusion_matrix_data
  63. write_confusion_matrix_data(y_test, prediction, labels=labels, filename=confusion_matrix_data_path)
  64. #logger.info(f'Confusion matrix data saved to : {confusion_matrix_data_path}')
  65. if __name__ == '__main__':
  66. evaluate_model()
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...