Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

eval.py 4.8 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
  1. # silence pytorch lightning bolts UserWarning about missing gym package (as of v0.3.0)
  2. import warnings
  3. from pathlib import Path
  4. from typing import List, Optional
  5. import dotenv
  6. import hydra
  7. from deadtrees.utils import utils
  8. from deadtrees.utils.env import get_env
  9. from omegaconf import DictConfig
  10. from pytorch_lightning import (
  11. Callback,
  12. LightningDataModule,
  13. LightningModule,
  14. seed_everything,
  15. Trainer,
  16. )
  17. from pytorch_lightning.loggers import LightningLoggerBase
  18. # load environment variables from `.env` file if it exists
  19. # recursively searches for `.env` in all folders starting from work dir
  20. dotenv.load_dotenv(override=True)
  21. warnings.simplefilter(action="ignore", category=UserWarning)
  22. log = utils.get_logger(__name__)
  23. def evaluate(config: DictConfig) -> Optional[float]:
  24. """Contains training pipeline.
  25. Instantiates all PyTorch Lightning objects from config.
  26. Args:
  27. config (DictConfig): Configuration composed by Hydra.
  28. Returns:
  29. Optional[float]: Metric score for hyperparameter optimization.
  30. """
  31. # Set seed for random number generators in pytorch, numpy and python.random
  32. if config.get("seed"):
  33. seed_everything(config.seed, workers=True)
  34. # Init Lightning datamodule
  35. ddir = Path(get_env("TRAIN_DATASET_PATH"))
  36. subfolders = ["train", "val", "test"]
  37. if all([(ddir / d).is_dir() for d in subfolders]):
  38. # dataset/train, dataset/val, dataset/test layout
  39. log.info(
  40. f"Instantiating datamodule <{config.datamodule._target_}> with train, val, test folder layout"
  41. )
  42. datamodule: LightningDataModule = hydra.utils.instantiate(
  43. config.datamodule,
  44. data_dir=[str(ddir / d) for d in subfolders],
  45. pattern=config.datamodule.pattern,
  46. pattern_extra=config.datamodule.get("pattern_extra", None),
  47. batch_size_extra=config.datamodule.get("batch_size_extra", None),
  48. )
  49. else:
  50. log.info(
  51. f"Instantiating datamodule <{config.datamodule._target_}> with single folder layout"
  52. )
  53. datamodule: LightningDataModule = hydra.utils.instantiate(
  54. config.datamodule,
  55. data_dir=get_env("TRAIN_DATASET_PATH"),
  56. pattern=config.datamodule.pattern,
  57. pattern_extra=config.datamodule.get("pattern_extra", None),
  58. batch_size_extra=config.datamodule.get("batch_size_extra", None),
  59. )
  60. datamodule.setup(
  61. in_channels=config.model.network.in_channels,
  62. classes=len(config.model.network.classes),
  63. )
  64. # Init Lightning model
  65. log.info(f"Instantiating model <{config.model._target_}>")
  66. model: LightningModule = hydra.utils.instantiate(config.model)
  67. # Init Lightning callbacks
  68. callbacks: List[Callback] = []
  69. if "callbacks" in config:
  70. for _, cb_conf in config.callbacks.items():
  71. if "_target_" in cb_conf:
  72. log.info(f"Instantiating callback <{cb_conf._target_}>")
  73. callbacks.append(hydra.utils.instantiate(cb_conf))
  74. # Init Lightning loggers
  75. logger: List[LightningLoggerBase] = []
  76. if "logger" in config:
  77. for _, lg_conf in config.logger.items():
  78. if "_target_" in lg_conf:
  79. log.info(f"Instantiating logger <{lg_conf._target_}>")
  80. logger.append(hydra.utils.instantiate(lg_conf))
  81. # Init Lightning trainer
  82. log.info(f"Instantiating trainer <{config.trainer._target_}>")
  83. trainer: Trainer = hydra.utils.instantiate(
  84. config.trainer, callbacks=callbacks, logger=logger, _convert_="partial"
  85. )
  86. # Send some parameters from config to all lightning loggers
  87. log.info("Logging hyperparameters!")
  88. utils.log_hyperparameters(
  89. config=config,
  90. model=model,
  91. datamodule=datamodule,
  92. trainer=trainer,
  93. callbacks=callbacks,
  94. logger=logger,
  95. )
  96. # Train the model
  97. log.info("Starting testing!")
  98. log.info(f"{Path.cwd()}")
  99. trainer.test(
  100. model=model, datamodule=datamodule, ckpt_path=config.bestmodel, verbose=True
  101. )
  102. @hydra.main(config_path="configs/", config_name="config.yaml")
  103. def main(config: DictConfig):
  104. # Imports can be nested inside @hydra.main to optimize tab completion
  105. # https://github.com/facebookresearch/hydra/issues/934
  106. from deadtrees.utils import utils
  107. # A couple of optional utilities:
  108. # - disabling python warnings
  109. # - forcing debug-friendly configuration
  110. # - verifying experiment name is set when running in experiment mode
  111. # You can safely get rid of this line if you don't want those
  112. utils.extras(config)
  113. # Pretty print config using Rich library
  114. if config.get("print_config"):
  115. utils.print_config(config, resolve=True)
  116. # Train model
  117. return evaluate(config)
  118. if __name__ == "__main__":
  119. main()
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...