Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

validate_bert.py 2.8 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
  1. import os
  2. import sys
  3. import json
  4. import yaml
  5. import torch
  6. import importlib
  7. import numpy as np
  8. import pandas as pd
  9. from torch import nn
  10. from pathlib import Path
  11. from sklearn.model_selection import train_test_split
  12. from dotenv import load_dotenv
  13. load_dotenv('envs/.env')
  14. with open('params.yaml', 'r') as f:
  15. PARAMS = yaml.safe_load(f)
  16. def start_validating(bert_model, pretrained_model, method='basic'):
  17. df = pd.read_csv('data/all.csv')
  18. train_df, valid_df = train_test_split(df, test_size=1. / PARAMS['validate']['kfold'], random_state=PARAMS['seed'])
  19. print(f"Train valid split")
  20. try:
  21. model_module = importlib.import_module(f'model.{bert_model}.{method}')
  22. model = model_module.Model(
  23. **PARAMS[bert_model], **PARAMS[bert_model][method],
  24. pretrained_model=pretrained_model
  25. )
  26. except Exception as e:
  27. raise e
  28. if torch.cuda.is_available():
  29. device = torch.device('cuda', PARAMS.get('gpu', 0))
  30. else:
  31. device = torch.device('cpu')
  32. model.to(device)
  33. print(model)
  34. try:
  35. trainer = importlib.import_module(f'training.{bert_model}')\
  36. .Trainer(model, pretrained_model=pretrained_model, mode='validate')
  37. except Exception as e:
  38. raise e
  39. try:
  40. dataloader_module = importlib.import_module(f'data_loader.{bert_model}_dataloaders')
  41. except Exception as e:
  42. raise e
  43. train_dataloader = dataloader_module.DataFrameDataLoader(
  44. train_df, pretrained_model=pretrained_model,
  45. do_lower_case=PARAMS[bert_model]['do_lower_case'],
  46. batch_size=PARAMS['validate']['batch_size'],
  47. shuffle=PARAMS['validate']['shuffle'], max_len=PARAMS[bert_model]['max_len']
  48. )
  49. valid_dataloader = dataloader_module.DataFrameDataLoader(
  50. valid_df, pretrained_model=pretrained_model,
  51. do_lower_case=PARAMS[bert_model]['do_lower_case'],
  52. batch_size=PARAMS['validate']['batch_size'],
  53. shuffle=PARAMS['validate']['shuffle'], max_len=PARAMS[bert_model]['eval_max_len']
  54. )
  55. trainer.set_dataloader(train_dataloader, valid_dataloader)
  56. results, losses = trainer.validate()
  57. columns = list(losses[0].keys())
  58. losses_df = pd.DataFrame(losses, columns=columns)
  59. return results, losses_df
  60. if __name__ == '__main__':
  61. bert_model, pretrained_model, method = sys.argv[1], sys.argv[2], sys.argv[3]
  62. results, losses_df = start_validating(bert_model, pretrained_model, method)
  63. results_path = Path(
  64. os.getenv('OUTPUT_PATH'),
  65. f'{bert_model}-{pretrained_model}-{method}_validate_{os.getenv("RESULTS_PATH")}'
  66. )
  67. with open(results_path, 'w') as f:
  68. json.dump(results, f)
  69. plots_path = Path(
  70. os.getenv('OUTPUT_PATH'),
  71. f'{bert_model}-{pretrained_model}-{method}_validate_{os.getenv("PLOTS_PATH")}'
  72. )
  73. losses_df.to_csv(plots_path, index=False)
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...