Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

#869 Add DagsHub Logger to Super Gradients

Merged
Ghost merged 1 commits into Deci-AI:master from timho102003:dagshub_logger
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
  1. import torch
  2. import torchmetrics
  3. from torchmetrics import Metric
  4. from super_gradients.common.object_names import Metrics
  5. from super_gradients.common.registry.registry import register_metric
  6. from super_gradients.training.utils import convert_to_tensor
  7. def accuracy(output, target, topk=(1,)):
  8. """Computes the precision@k for the specified values of k
  9. :param output: Tensor / Numpy / List
  10. The prediction
  11. :param target: Tensor / Numpy / List
  12. The corresponding lables
  13. :param topk: tuple
  14. The type of accuracy to calculate, e.g. topk=(1,5) returns accuracy for top-1 and top-5"""
  15. # Convert to tensor
  16. output = convert_to_tensor(output)
  17. target = convert_to_tensor(target)
  18. # Get the maximal value of the accuracy measurment and the batch size
  19. maxk = max(topk)
  20. batch_size = target.size(0)
  21. # Get the top k predictions
  22. _, pred = output.topk(maxk, 1, True, True)
  23. pred = pred.t()
  24. # Count the number of correct predictions only for the highest k
  25. correct = pred.eq(target.view(1, -1).expand_as(pred))
  26. res = []
  27. for k in topk:
  28. # Count the number of correct prediction for the different K (the top predictions) values
  29. correct_k = correct[:k].reshape(-1).float().sum(0)
  30. res.append(correct_k.mul_(100.0 / batch_size).item())
  31. return res
  32. @register_metric(Metrics.ACCURACY)
  33. class Accuracy(torchmetrics.Accuracy):
  34. def __init__(self, dist_sync_on_step=False):
  35. super().__init__(dist_sync_on_step=dist_sync_on_step)
  36. self.greater_is_better = True
  37. def update(self, preds: torch.Tensor, target: torch.Tensor):
  38. if target.shape == preds.shape:
  39. target = target.argmax(1) # supports smooth labels
  40. super().update(preds=preds.argmax(1), target=target)
  41. @register_metric(Metrics.TOP5)
  42. class Top5(Metric):
  43. def __init__(self, dist_sync_on_step=False):
  44. super().__init__(dist_sync_on_step=dist_sync_on_step)
  45. self.greater_is_better = True
  46. self.add_state("correct", default=torch.tensor(0.0), dist_reduce_fx="sum")
  47. self.add_state("total", default=torch.tensor(0), dist_reduce_fx="sum")
  48. def update(self, preds: torch.Tensor, target: torch.Tensor):
  49. if target.shape == preds.shape:
  50. target = target.argmax(1) # supports smooth labels
  51. # Get the maximal value of the accuracy measurment and the batch size
  52. batch_size = target.size(0)
  53. # Get the top k predictions
  54. _, pred = preds.topk(5, 1, True, True)
  55. pred = pred.t()
  56. # Count the number of correct predictions only for the highest 5
  57. correct = pred.eq(target.view(1, -1).expand_as(pred))
  58. correct5 = correct[:5].reshape(-1).float().sum(0)
  59. self.correct += correct5
  60. self.total += batch_size
  61. def compute(self):
  62. return self.correct.float() / self.total
  63. class ToyTestClassificationMetric(Metric):
  64. """
  65. Dummy classification Mettric object returning 0 always (for testing).
  66. """
  67. def __init__(self, dist_sync_on_step=False):
  68. super().__init__(dist_sync_on_step=dist_sync_on_step)
  69. def update(self, preds: torch.Tensor, target: torch.Tensor) -> None:
  70. pass
  71. def compute(self):
  72. return 0
Discard
Tip!

Press p or to see the previous file or, n or to see the next file