Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

strictload_enum_test.py 6.5 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
  1. import os
  2. import shutil
  3. import tempfile
  4. import unittest
  5. from super_gradients.common.object_names import Models
  6. from super_gradients.common.sg_loggers.base_sg_logger import BaseSGLogger
  7. from super_gradients.training import Trainer
  8. import torch
  9. import torch.nn as nn
  10. import torch.nn.functional as F
  11. from super_gradients.training import models
  12. from super_gradients.training.sg_trainer.sg_trainer import StrictLoad
  13. from super_gradients.training.utils import HpmStruct
  14. class Net(nn.Module):
  15. def __init__(self):
  16. super(Net, self).__init__()
  17. self.conv1 = nn.Conv2d(3, 6, 3)
  18. self.pool = nn.MaxPool2d(2, 2)
  19. self.conv2 = nn.Conv2d(6, 16, 3)
  20. self.fc1 = nn.Linear(16 * 3 * 3, 120)
  21. self.fc2 = nn.Linear(120, 84)
  22. self.fc3 = nn.Linear(84, 10)
  23. def forward(self, x):
  24. x = self.pool(F.relu(self.conv1(x)))
  25. x = self.pool(F.relu(self.conv2(x)))
  26. x = x.view(-1, 16 * 3 * 3)
  27. x = F.relu(self.fc1(x))
  28. x = F.relu(self.fc2(x))
  29. x = self.fc3(x)
  30. return x
  31. class StrictLoadEnumTest(unittest.TestCase):
  32. @classmethod
  33. def setUpClass(cls):
  34. cls.temp_working_file_dir = tempfile.TemporaryDirectory(prefix="strict_load_test").name
  35. if not os.path.isdir(cls.temp_working_file_dir):
  36. os.mkdir(cls.temp_working_file_dir)
  37. cls.experiment_name = "load_checkpoint_test"
  38. cls.checkpoint_diff_keys_name = "strict_load_test_diff_keys.pth"
  39. cls.checkpoint_diff_keys_path = cls.temp_working_file_dir + "/" + cls.checkpoint_diff_keys_name
  40. # Setup the model
  41. cls.original_torch_model = Net()
  42. # Save the model's state_dict checkpoint with different keys
  43. torch.save(cls.change_state_dict_keys(cls.original_torch_model.state_dict()), cls.checkpoint_diff_keys_path)
  44. # Save the model's state_dict checkpoint in Trainer format
  45. cls.trainer = Trainer("load_checkpoint_test") # Saves in /checkpoints
  46. cls.trainer.set_net(cls.original_torch_model)
  47. # FIXME: after uniting init and build_model we should remove this
  48. cls.trainer.sg_logger = BaseSGLogger(
  49. "project_name",
  50. "load_checkpoint_test",
  51. "local",
  52. resumed=False,
  53. training_params=HpmStruct(max_epochs=10),
  54. checkpoints_dir_path=cls.trainer.checkpoints_dir_path,
  55. monitor_system=False,
  56. )
  57. cls.trainer._save_checkpoint()
  58. @classmethod
  59. def tearDownClass(cls):
  60. if os.path.isdir(cls.temp_working_file_dir):
  61. shutil.rmtree(cls.temp_working_file_dir)
  62. @classmethod
  63. def change_state_dict_keys(self, state_dict):
  64. new_ckpt_dict = {}
  65. for i, (ckpt_key, ckpt_val) in enumerate(state_dict.items()):
  66. new_ckpt_dict[str(i)] = ckpt_val
  67. return new_ckpt_dict
  68. def check_models_have_same_weights(self, model_1, model_2):
  69. model_1, model_2 = model_1.to("cpu"), model_2.to("cpu")
  70. models_differ = 0
  71. for key_item_1, key_item_2 in zip(model_1.state_dict().items(), model_2.state_dict().items()):
  72. if torch.equal(key_item_1[1], key_item_2[1]):
  73. pass
  74. else:
  75. models_differ += 1
  76. if key_item_1[0] == key_item_2[0]:
  77. print("Mismtach found at", key_item_1[0])
  78. else:
  79. raise Exception
  80. if models_differ == 0:
  81. return True
  82. else:
  83. return False
  84. def test_strict_load_on(self):
  85. # Define Model
  86. model = models.get(Models.RESNET18, arch_params={"num_classes": 1000})
  87. pretrained_model = models.get(Models.RESNET18, arch_params={"num_classes": 1000}, pretrained_weights="imagenet")
  88. # Make sure we initialized a model with different weights
  89. assert not self.check_models_have_same_weights(model, pretrained_model)
  90. pretrained_sd_path = os.path.join(self.temp_working_file_dir, "pretrained_net_strict_load_on.pth")
  91. torch.save(pretrained_model.state_dict(), pretrained_sd_path)
  92. model = models.get(Models.RESNET18, arch_params={"num_classes": 1000}, checkpoint_path=pretrained_sd_path, strict_load=StrictLoad.ON)
  93. # Assert the weights were loaded correctly
  94. assert self.check_models_have_same_weights(model, pretrained_model)
  95. def test_strict_load_off(self):
  96. # Define Model
  97. model = models.get(Models.RESNET18, arch_params={"num_classes": 1000})
  98. pretrained_model = models.get(Models.RESNET18, arch_params={"num_classes": 1000}, pretrained_weights="imagenet")
  99. # Make sure we initialized a model with different weights
  100. assert not self.check_models_have_same_weights(model, pretrained_model)
  101. pretrained_sd_path = os.path.join(self.temp_working_file_dir, "pretrained_net_strict_load_off.pth")
  102. del pretrained_model.linear
  103. torch.save(pretrained_model.state_dict(), pretrained_sd_path)
  104. with self.assertRaises(RuntimeError):
  105. models.get(Models.RESNET18, arch_params={"num_classes": 1000}, checkpoint_path=pretrained_sd_path, strict_load=StrictLoad.ON)
  106. model = models.get(Models.RESNET18, arch_params={"num_classes": 1000}, checkpoint_path=pretrained_sd_path, strict_load=StrictLoad.OFF)
  107. del model.linear
  108. # Assert the weights were loaded correctly
  109. assert self.check_models_have_same_weights(model, pretrained_model)
  110. def test_strict_load_no_key_matching_sg_checkpoint(self):
  111. # Define Model
  112. model = models.get(Models.RESNET18, arch_params={"num_classes": 1000})
  113. pretrained_model = models.get(Models.RESNET18, arch_params={"num_classes": 1000}, pretrained_weights="imagenet")
  114. # Make sure we initialized a model with different weights
  115. assert not self.check_models_have_same_weights(model, pretrained_model)
  116. pretrained_sd_path = os.path.join(self.temp_working_file_dir, "pretrained_model_strict_load_soft.pth")
  117. torch.save(self.change_state_dict_keys(pretrained_model.state_dict()), pretrained_sd_path)
  118. with self.assertRaises(RuntimeError):
  119. models.get(Models.RESNET18, arch_params={"num_classes": 1000}, checkpoint_path=pretrained_sd_path, strict_load=StrictLoad.ON)
  120. model = models.get(Models.RESNET18, arch_params={"num_classes": 1000}, checkpoint_path=pretrained_sd_path, strict_load=StrictLoad.NO_KEY_MATCHING)
  121. # Assert the weights were loaded correctly
  122. assert self.check_models_have_same_weights(model, pretrained_model)
  123. if __name__ == "__main__":
  124. unittest.main()
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...