Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

#549 Feature/infra 1481 call integration tests

Merged
Ghost merged 1 commits into Deci-AI:master from deci-ai:feature/infra-1481_call_integration_tests
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
  1. from super_gradients.training.utils.callbacks import PhaseCallback, Phase, PhaseContext
  2. from typing import Optional
  3. import torch
  4. import numpy as np
  5. from super_gradients.common.abstractions.abstract_logger import get_logger
  6. logger = get_logger(__name__)
  7. class EarlyStop(PhaseCallback):
  8. """
  9. Callback to monitor a metric and stop training when it stops improving.
  10. Inspired by pytorch_lightning.callbacks.early_stopping and tf.keras.callbacks.EarlyStopping
  11. """
  12. mode_dict = {"min": torch.lt, "max": torch.gt}
  13. supported_phases = (Phase.VALIDATION_EPOCH_END, Phase.TRAIN_EPOCH_END)
  14. def __init__(self,
  15. phase: Phase,
  16. monitor: str,
  17. mode: str = "min",
  18. min_delta: float = 0.0,
  19. patience: int = 3,
  20. check_finite: bool = True,
  21. threshold: Optional[float] = None,
  22. verbose: bool = False,
  23. strict: bool = True
  24. ):
  25. """
  26. :param phase: Callback phase event.
  27. :param monitor: name of the metric to be monitored.
  28. :param mode: one of 'min', 'max'. In 'min' mode, training will stop when the quantity
  29. monitored has stopped decreasing and in 'max' mode it will stop when the quantity
  30. monitored has stopped increasing.
  31. :param min_delta: minimum change in the monitored quantity to qualify as an improvement, i.e. an absolute
  32. change of less than `min_delta`, will count as no improvement.
  33. :param patience: number of checks with no improvement after which training will be stopped.
  34. One check happens after every phase event.
  35. :param check_finite: When set ``True``, stops training when the monitor becomes NaN or infinite.
  36. :param threshold: Stop training immediately once the monitored quantity reaches this threshold. For mode 'min'
  37. stops training when below threshold, For mode 'max' stops training when above threshold.
  38. :param verbose: If `True` print logs.
  39. :param strict: whether to crash the training if `monitor` is not found in the metrics.
  40. """
  41. super(EarlyStop, self).__init__(phase)
  42. if phase not in self.supported_phases:
  43. raise ValueError(f"EarlyStop doesn't support phase: {phase}, "
  44. f"excepted {', '.join([str(x) for x in self.supported_phases])}")
  45. self.phase = phase
  46. self.monitor_key = monitor
  47. self.min_delta = min_delta
  48. self.patience = patience
  49. self.mode = mode
  50. self.check_finite = check_finite
  51. self.threshold = threshold
  52. self.verbose = verbose
  53. self.strict = strict
  54. self.wait_count = 0
  55. if self.mode not in self.mode_dict:
  56. raise Exception(f"`mode` can be {', '.join(self.mode_dict.keys())}, got {self.mode}")
  57. self.monitor_op = self.mode_dict[self.mode]
  58. self.min_delta *= 1 if self.monitor_op == torch.gt else -1
  59. torch_inf = torch.tensor(np.Inf)
  60. self.best_score = torch_inf if self.monitor_op == torch.lt else -torch_inf
  61. def _get_metric_value(self, metrics_dict):
  62. if self.monitor_key not in metrics_dict.keys():
  63. msg = f"Can't find EarlyStop monitor {self.monitor_key} in metrics_dict: {metrics_dict.keys()}"
  64. exception_cls = RuntimeError if self.strict else MissingMonitorKeyException
  65. raise exception_cls(msg)
  66. return metrics_dict[self.monitor_key]
  67. def _check_for_early_stop(self, current: torch.Tensor):
  68. should_stop = False
  69. # check if current value is Nan or inf
  70. if self.check_finite and not torch.isfinite(current):
  71. should_stop = True
  72. reason = (
  73. f"Monitored metric {self.monitor_key} = {current} is not finite."
  74. f" Previous best value was {self.best_score:.3f}. Signaling Trainer to stop."
  75. )
  76. # check if current value reached threshold value
  77. elif self.threshold is not None and self.monitor_op(current, self.threshold):
  78. should_stop = True
  79. reason = (
  80. "Stopping threshold reached:"
  81. f" {self.monitor_key} = {current} {self.monitor_op} {self.threshold}."
  82. " Signaling Trainer to stop."
  83. )
  84. # check if current is an improvement of monitor_key metric.
  85. elif self.monitor_op(current - self.min_delta, self.best_score.to(current.device)):
  86. should_stop = False
  87. if torch.isfinite(self.best_score):
  88. reason = (
  89. f"Metric {self.monitor_key} improved by {abs(self.best_score - current):.3f} >="
  90. f" min_delta = {abs(self.min_delta)}. New best score: {current:.3f}"
  91. )
  92. else:
  93. reason = f"Metric {self.monitor_key} improved. New best score: {current:.3f}"
  94. self.best_score = current
  95. self.wait_count = 0
  96. # no improvement in monitor_key metric, check if wait_count is bigger than patience.
  97. else:
  98. self.wait_count += 1
  99. reason = f"Monitored metric {self.monitor_key} did not improve in the last {self.wait_count} records."
  100. if self.wait_count >= self.patience:
  101. should_stop = True
  102. reason += f" Best score: {self.best_score:.3f}. Signaling Trainer to stop."
  103. return reason, should_stop
  104. def __call__(self, context: PhaseContext):
  105. try:
  106. current = self._get_metric_value(context.metrics_dict)
  107. except MissingMonitorKeyException as e:
  108. logger.warning(e)
  109. return
  110. if not isinstance(current, torch.Tensor):
  111. current = torch.tensor(current)
  112. reason, self.should_stop = self._check_for_early_stop(current)
  113. # log reason message, and signal early stop if should_stop=True.
  114. if self.should_stop:
  115. self._signal_early_stop(context, reason)
  116. elif self.verbose:
  117. logger.info(reason)
  118. def _signal_early_stop(self, context: PhaseContext, reason: str):
  119. logger.info(reason)
  120. context.update_context(stop_training=True)
  121. class MissingMonitorKeyException(Exception):
  122. """
  123. Exception raised for missing monitor key in metrics_dict.
  124. """
  125. pass
Discard
Tip!

Press p or to see the previous file or, n or to see the next file