Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

validate.py 2.7 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
  1. import os
  2. import sys
  3. import json
  4. import yaml
  5. import torch
  6. import importlib
  7. import numpy as np
  8. import pandas as pd
  9. from pathlib import Path
  10. from sklearn.model_selection import train_test_split
  11. from data_loader.data_loaders import DataFrameDataLoader
  12. from dotenv import load_dotenv
  13. import logging
  14. logging.basicConfig(
  15. level=logging.DEBUG,
  16. format="%(asctime)s [%(levelname)s] %(message)s",
  17. handlers=[
  18. logging.FileHandler("debug.log"),
  19. logging.StreamHandler()
  20. ]
  21. )
  22. load_dotenv('envs/.env')
  23. with open('params.yaml', 'r') as f:
  24. PARAMS = yaml.safe_load(f)
  25. config_path = Path(os.getenv('OUTPUT_PATH'), os.getenv('CONFIG_PATH'))
  26. with open(config_path, 'r') as f:
  27. CONFIG = json.load(f)
  28. def start_validating(method='lstm'):
  29. df = pd.read_csv('data/all.csv')
  30. train_df, valid_df = train_test_split(df, test_size=1. / PARAMS['validate']['kfold'], random_state=PARAMS['seed'])
  31. print(f"Train valid split")
  32. try:
  33. model_module = importlib.import_module(f'model.{method}')
  34. model = model_module.Model(**CONFIG, **PARAMS[method])
  35. except Exception as e:
  36. raise e
  37. if torch.cuda.is_available():
  38. device = torch.device('cuda', PARAMS.get('gpu', 0))
  39. else:
  40. device = torch.device('cpu')
  41. model.to(device)
  42. try:
  43. trainer_module = importlib.import_module(f'training.{method}')
  44. trainer = trainer_module.Trainer(model, mode='validate')
  45. except Exception as e:
  46. raise e
  47. train_dataloader = DataFrameDataLoader(
  48. train_df, batch_size=PARAMS['validate']['batch_size'],
  49. shuffle=PARAMS['validate']['shuffle'], use_bag=PARAMS[method]['use_bag'],
  50. use_eos=PARAMS[method].get('use_eos'), max_len=PARAMS[method].get('max_len')
  51. )
  52. valid_dataloader = DataFrameDataLoader(
  53. valid_df, batch_size=PARAMS['validate']['batch_size'],
  54. use_bag=PARAMS[method]['use_bag'], use_eos=PARAMS[method].get('use_eos'), max_len=PARAMS[method].get('max_len')
  55. )
  56. trainer.set_dataloader(train_dataloader, valid_dataloader)
  57. results, losses = trainer.validate()
  58. columns = list(losses[0].keys())
  59. losses_df = pd.DataFrame(losses, columns=columns)
  60. return results, losses_df
  61. if __name__ == '__main__':
  62. method = sys.argv[1]
  63. try:
  64. results, losses_df = start_validating(method)
  65. except Exception as e:
  66. logging.error(e)
  67. raise e
  68. results_path = Path(os.getenv('OUTPUT_PATH'), f'{method}_validate_{os.getenv("RESULTS_PATH")}')
  69. with open(results_path, 'w') as f:
  70. json.dump(results, f)
  71. plots_path = Path(os.getenv('OUTPUT_PATH'), f'{method}_validate_{os.getenv("PLOTS_PATH")}')
  72. losses_df.to_csv(plots_path, index=False)
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...