Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

#378 Feature/sg 281 add kd notebook

Merged
Ghost merged 1 commits into Deci-AI:master from deci-ai:feature/SG-281-add_kd_notebook
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
  1. import os
  2. import sys
  3. import socket
  4. import time
  5. from dataclasses import dataclass
  6. from multiprocessing import Process
  7. from pathlib import Path
  8. from typing import Tuple, Union, Dict, List, Sequence
  9. import random
  10. import inspect
  11. from super_gradients.common.abstractions.abstract_logger import get_logger
  12. from treelib import Tree
  13. from termcolor import colored
  14. import torch
  15. from torch.utils.tensorboard import SummaryWriter
  16. from super_gradients.training.exceptions.dataset_exceptions import UnsupportedBatchItemsFormat
  17. # TODO: These utils should move to sg_trainer package as internal (private) helper functions
  18. IS_BETTER_COLOR = {True: "green", False: "red"}
  19. IS_GREATER_SYMBOLS = {True: "↗", False: "↘"}
  20. logger = get_logger(__name__)
  21. @dataclass
  22. class MonitoredValue:
  23. """Store a value and some indicators relative to its past iterations.
  24. The value can be a metric/loss, and the iteration can be epochs/batch.
  25. """
  26. name: str
  27. greater_is_better: bool
  28. current: float = None
  29. previous: float = None
  30. best: float = None
  31. change_from_previous: float = None
  32. change_from_best: float = None
  33. @property
  34. def is_better_than_previous(self):
  35. if self.greater_is_better is None or self.change_from_best is None:
  36. return None
  37. elif self.greater_is_better:
  38. return self.change_from_previous >= 0
  39. else:
  40. return self.change_from_previous < 0
  41. @property
  42. def is_best_value(self):
  43. if self.greater_is_better is None or self.change_from_best is None:
  44. return None
  45. elif self.greater_is_better:
  46. return self.change_from_best >= 0
  47. else:
  48. return self.change_from_best < 0
  49. def update_monitored_value(previous_monitored_value: MonitoredValue, new_value: float) -> MonitoredValue:
  50. """Update the given ValueToMonitor object (could be a loss or a metric) with the new value
  51. :param previous_monitored_value: The stats about the value that is monitored throughout epochs.
  52. :param new_value: The value of the current epoch that will be used to update previous_monitored_value
  53. :return:
  54. """
  55. previous_value, previous_best_value = previous_monitored_value.current, previous_monitored_value.best
  56. name, greater_is_better = previous_monitored_value.name, previous_monitored_value.greater_is_better
  57. if previous_best_value is None:
  58. previous_best_value = previous_value
  59. elif greater_is_better:
  60. previous_best_value = max(previous_value, previous_best_value)
  61. else:
  62. previous_best_value = min(previous_value, previous_best_value)
  63. if previous_value is None:
  64. change_from_previous = None
  65. change_from_best = None
  66. else:
  67. change_from_previous = new_value - previous_value
  68. change_from_best = new_value - previous_best_value
  69. return MonitoredValue(name=name, current=new_value, previous=previous_value, best=previous_best_value,
  70. change_from_previous=change_from_previous, change_from_best=change_from_best,
  71. greater_is_better=greater_is_better)
  72. def update_monitored_values_dict(monitored_values_dict: Dict[str, MonitoredValue],
  73. new_values_dict: Dict[str, float]) -> Dict[str, MonitoredValue]:
  74. """Update the given ValueToMonitor object (could be a loss or a metric) with the new value
  75. :param monitored_values_dict: Dict mapping value names to their stats throughout epochs.
  76. :param new_values_dict: Dict mapping value names to their new (i.e. current epoch) value.
  77. :return: Updated monitored_values_dict
  78. """
  79. for monitored_value_name in monitored_values_dict.keys():
  80. monitored_values_dict[monitored_value_name] = update_monitored_value(
  81. new_value=new_values_dict[monitored_value_name],
  82. previous_monitored_value=monitored_values_dict[monitored_value_name],
  83. )
  84. return monitored_values_dict
  85. def display_epoch_summary(epoch: int, n_digits: int,
  86. train_monitored_values: Dict[str, MonitoredValue],
  87. valid_monitored_values: Dict[str, MonitoredValue]) -> None:
  88. """Display a summary of loss/metric of interest, for a given epoch.
  89. :param epoch: the number of epoch.
  90. :param n_digits: number of digits to display on screen for float values
  91. :param train_monitored_values: mapping of loss/metric with their stats that will be displayed
  92. :param valid_monitored_values: mapping of loss/metric with their stats that will be displayed
  93. :return:
  94. """
  95. def _format_to_str(val: float) -> str:
  96. return str(round(val, n_digits))
  97. def _generate_tree(value_name: str, monitored_value: MonitoredValue) -> Tree:
  98. """Generate a tree that represents the stats of a given loss/metric."""
  99. current = _format_to_str(monitored_value.current)
  100. root_id = str(hash(f"{value_name} = {current}")) + str(random.random())
  101. tree = Tree()
  102. tree.create_node(tag=f"{value_name.capitalize()} = {current}", identifier=root_id)
  103. if monitored_value.previous is not None:
  104. previous = _format_to_str(monitored_value.previous)
  105. best = _format_to_str(monitored_value.best)
  106. change_from_previous = _format_to_str(monitored_value.change_from_previous)
  107. change_from_best = _format_to_str(monitored_value.change_from_best)
  108. diff_with_prev_colored = colored(
  109. text=f"{IS_GREATER_SYMBOLS[monitored_value.change_from_previous > 0]} {change_from_previous}",
  110. color=IS_BETTER_COLOR[monitored_value.is_better_than_previous]
  111. )
  112. diff_with_best_colored = colored(
  113. text=f"{IS_GREATER_SYMBOLS[monitored_value.change_from_best > 0]} {change_from_best}",
  114. color=IS_BETTER_COLOR[monitored_value.is_best_value]
  115. )
  116. tree.create_node(
  117. tag=f"Epoch N-1 = {previous:6} ({diff_with_prev_colored:8})",
  118. identifier=f"0_previous_{root_id}",
  119. parent=root_id
  120. )
  121. tree.create_node(
  122. tag=f"Best until now = {best:6} ({diff_with_best_colored:8})",
  123. identifier=f"1_best_{root_id}",
  124. parent=root_id
  125. )
  126. return tree
  127. train_tree = Tree()
  128. train_tree.create_node("Training", "Training")
  129. for name, value in train_monitored_values.items():
  130. train_tree.paste('Training', new_tree=_generate_tree(name, monitored_value=value))
  131. valid_tree = Tree()
  132. valid_tree.create_node("Validation", "Validation")
  133. for name, value in valid_monitored_values.items():
  134. valid_tree.paste('Validation', new_tree=_generate_tree(name, monitored_value=value))
  135. summary_tree = Tree()
  136. summary_tree.create_node(f"SUMMARY OF EPOCH {epoch}", "Summary")
  137. summary_tree.paste("Summary", train_tree)
  138. summary_tree.paste("Summary", valid_tree)
  139. summary_tree.show()
  140. def try_port(port):
  141. """
  142. try_port - Helper method for tensorboard port binding
  143. :param port:
  144. :return:
  145. """
  146. sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
  147. is_port_available = False
  148. try:
  149. sock.bind(("localhost", port))
  150. is_port_available = True
  151. except Exception as ex:
  152. print('Port ' + str(port) + ' is in use' + str(ex))
  153. sock.close()
  154. return is_port_available
  155. def launch_tensorboard_process(checkpoints_dir_path: str, sleep_postpone: bool = True, port: int = None) -> Tuple[Process, int]:
  156. """
  157. launch_tensorboard_process - Default behavior is to scan all free ports from 6006-6016 and try using them
  158. unless port is defined by the user
  159. :param checkpoints_dir_path:
  160. :param sleep_postpone:
  161. :param port:
  162. :return: tuple of tb process, port
  163. """
  164. logdir_path = str(Path(checkpoints_dir_path).parent.absolute())
  165. tb_cmd = 'tensorboard --logdir=' + logdir_path + ' --bind_all'
  166. if port is not None:
  167. tb_ports = [port]
  168. else:
  169. tb_ports = range(6006, 6016)
  170. for tb_port in tb_ports:
  171. if not try_port(tb_port):
  172. continue
  173. else:
  174. print('Starting Tensor-Board process on port: ' + str(tb_port))
  175. tensor_board_process = Process(target=os.system, args=([tb_cmd + ' --port=' + str(tb_port)]))
  176. tensor_board_process.daemon = True
  177. tensor_board_process.start()
  178. # LET THE TENSORBOARD PROCESS START
  179. if sleep_postpone:
  180. time.sleep(3)
  181. return tensor_board_process, tb_port
  182. # RETURNING IRRELEVANT VALUES
  183. print('Failed to initialize Tensor-Board process on port: ' + ', '.join(map(str, tb_ports)))
  184. return None, -1
  185. def init_summary_writer(tb_dir, checkpoint_loaded, user_prompt=False):
  186. """Remove previous tensorboard files from directory and launch a tensor board process"""
  187. # If the training is from scratch, Walk through destination folder and delete existing tensorboard logs
  188. user = ''
  189. if not checkpoint_loaded:
  190. for filename in os.listdir(tb_dir):
  191. if 'events' in filename:
  192. if not user_prompt:
  193. print('"{}" will not be deleted'.format(filename))
  194. continue
  195. while True:
  196. # Verify with user before deleting old tensorboard files
  197. user = input('\nOLDER TENSORBOARD FILES EXISTS IN EXPERIMENT FOLDER:\n"{}"\n'
  198. 'DO YOU WANT TO DELETE THEM? [y/n]'
  199. .format(filename)) if (user != 'n' or user != 'y') else user
  200. if user == 'y':
  201. os.remove('{}/{}'.format(tb_dir, filename))
  202. print('DELETED: {}!'.format(filename))
  203. break
  204. elif user == 'n':
  205. print('"{}" will not be deleted'.format(filename))
  206. break
  207. print('Unknown answer...')
  208. # Launch a tensorboard process
  209. return SummaryWriter(tb_dir)
  210. def add_log_to_file(filename, results_titles_list, results_values_list, epoch, max_epochs):
  211. """Add a message to the log file"""
  212. # -Note: opening and closing the file every time is in-efficient. It is done for experimental purposes
  213. with open(filename, 'a') as f:
  214. f.write('\nEpoch (%d/%d) - ' % (epoch, max_epochs))
  215. for result_title, result_value in zip(results_titles_list, results_values_list):
  216. if isinstance(result_value, torch.Tensor):
  217. result_value = result_value.item()
  218. f.write(result_title + ': ' + str(result_value) + '\t')
  219. def write_training_results(writer, results_titles_list, results_values_list, epoch):
  220. """Stores the training and validation loss and accuracy for current epoch in a tensorboard file"""
  221. for res_key, res_val in zip(results_titles_list, results_values_list):
  222. # USE ONLY LOWER-CASE LETTERS AND REPLACE SPACES WITH '_' TO AVOID MANY TITLES FOR THE SAME KEY
  223. corrected_res_key = res_key.lower().replace(' ', '_')
  224. writer.add_scalar(corrected_res_key, res_val, epoch)
  225. writer.flush()
  226. def write_hpms(writer, hpmstructs=[], special_conf={}):
  227. """Stores the training and dataset hyper params in the tensorboard file"""
  228. hpm_string = ""
  229. for hpm in hpmstructs:
  230. for key, val in hpm.__dict__.items():
  231. hpm_string += '{}: {} \n '.format(key, val)
  232. for key, val in special_conf.items():
  233. hpm_string += '{}: {} \n '.format(key, val)
  234. writer.add_text("Hyper_parameters", hpm_string)
  235. writer.flush()
  236. # TODO: This should probably move into datasets/datasets_utils.py?
  237. def unpack_batch_items(batch_items: Union[tuple, torch.Tensor]):
  238. """
  239. Adds support for unpacking batch items in train/validation loop.
  240. @param batch_items: (Union[tuple, torch.Tensor]) returned by the data loader, which is expected to be in one of
  241. the following formats:
  242. 1. torch.Tensor or tuple, s.t inputs = batch_items[0], targets = batch_items[1] and len(batch_items) = 2
  243. 2. tuple: (inputs, targets, additional_batch_items)
  244. where inputs are fed to the network, targets are their corresponding labels and additional_batch_items is a
  245. dictionary (format {additional_batch_item_i_name: additional_batch_item_i ...}) which can be accessed through
  246. the phase context under the attribute additional_batch_item_i_name, using a phase callback.
  247. @return: inputs, target, additional_batch_items
  248. """
  249. additional_batch_items = {}
  250. if len(batch_items) == 2:
  251. inputs, target = batch_items
  252. elif len(batch_items) == 3:
  253. inputs, target, additional_batch_items = batch_items
  254. else:
  255. raise UnsupportedBatchItemsFormat()
  256. return inputs, target, additional_batch_items
  257. def log_uncaught_exceptions(logger):
  258. """
  259. Makes logger log uncaught exceptions
  260. @param logger: logging.Logger
  261. @return: None
  262. """
  263. def handle_exception(exc_type, exc_value, exc_traceback):
  264. if issubclass(exc_type, KeyboardInterrupt):
  265. sys.__excepthook__(exc_type, exc_value, exc_traceback)
  266. return
  267. logger.error("Uncaught exception", exc_info=(exc_type, exc_value, exc_traceback))
  268. sys.excepthook = handle_exception
  269. def parse_args(cfg, arg_names: Union[List[str], callable]) -> dict:
  270. """
  271. parse args from a config.
  272. unlike get_param(), in this case only parameters that appear in the config will override default params from the function's signature
  273. """
  274. if not isinstance(arg_names, Sequence):
  275. arg_names = list(inspect.signature(arg_names).parameters.keys())
  276. kwargs_dict = {}
  277. for arg_name in arg_names:
  278. if hasattr(cfg, arg_name) and getattr(cfg, arg_name) is not None:
  279. kwargs_dict[arg_name] = getattr(cfg, arg_name)
  280. return kwargs_dict
Discard
Tip!

Press p or to see the previous file or, n or to see the next file