Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

strictload_enum_test.py 6.6 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
  1. import os
  2. import shutil
  3. import tempfile
  4. import unittest
  5. from super_gradients.common.sg_loggers import BaseSGLogger
  6. from super_gradients.training import Trainer
  7. import torch
  8. import torch.nn as nn
  9. import torch.nn.functional as F
  10. from super_gradients.training import models
  11. from super_gradients.training.sg_trainer.sg_trainer import StrictLoad
  12. from super_gradients.training.utils import HpmStruct
  13. class Net(nn.Module):
  14. def __init__(self):
  15. super(Net, self).__init__()
  16. self.conv1 = nn.Conv2d(3, 6, 3)
  17. self.pool = nn.MaxPool2d(2, 2)
  18. self.conv2 = nn.Conv2d(6, 16, 3)
  19. self.fc1 = nn.Linear(16 * 3 * 3, 120)
  20. self.fc2 = nn.Linear(120, 84)
  21. self.fc3 = nn.Linear(84, 10)
  22. def forward(self, x):
  23. x = self.pool(F.relu(self.conv1(x)))
  24. x = self.pool(F.relu(self.conv2(x)))
  25. x = x.view(-1, 16 * 3 * 3)
  26. x = F.relu(self.fc1(x))
  27. x = F.relu(self.fc2(x))
  28. x = self.fc3(x)
  29. return x
  30. class StrictLoadEnumTest(unittest.TestCase):
  31. @classmethod
  32. def setUpClass(cls):
  33. cls.temp_working_file_dir = tempfile.TemporaryDirectory(prefix='strict_load_test').name
  34. if not os.path.isdir(cls.temp_working_file_dir):
  35. os.mkdir(cls.temp_working_file_dir)
  36. cls.experiment_name = 'load_checkpoint_test'
  37. cls.checkpoint_diff_keys_name = 'strict_load_test_diff_keys.pth'
  38. cls.checkpoint_diff_keys_path = cls.temp_working_file_dir + '/' + cls.checkpoint_diff_keys_name
  39. # Setup the model
  40. cls.original_torch_model = Net()
  41. # Save the model's state_dict checkpoint with different keys
  42. torch.save(cls.change_state_dict_keys(cls.original_torch_model.state_dict()), cls.checkpoint_diff_keys_path)
  43. # Save the model's state_dict checkpoint in Trainer format
  44. cls.trainer = Trainer("load_checkpoint_test", model_checkpoints_location='local') # Saves in /checkpoints
  45. cls.trainer.set_net(cls.original_torch_model)
  46. # FIXME: after uniting init and build_model we should remove this
  47. cls.trainer.sg_logger = BaseSGLogger('project_name', 'load_checkpoint_test', 'local', resumed=False,
  48. training_params=HpmStruct(max_epochs=10),
  49. checkpoints_dir_path=cls.trainer.checkpoints_dir_path)
  50. cls.trainer._save_checkpoint()
  51. @classmethod
  52. def tearDownClass(cls):
  53. if os.path.isdir(cls.temp_working_file_dir):
  54. shutil.rmtree(cls.temp_working_file_dir)
  55. @classmethod
  56. def change_state_dict_keys(self, state_dict):
  57. new_ckpt_dict = {}
  58. for i, (ckpt_key, ckpt_val) in enumerate(state_dict.items()):
  59. new_ckpt_dict[str(i)] = ckpt_val
  60. return new_ckpt_dict
  61. def check_models_have_same_weights(self, model_1, model_2):
  62. model_1, model_2 = model_1.to('cpu'), model_2.to('cpu')
  63. models_differ = 0
  64. for key_item_1, key_item_2 in zip(model_1.state_dict().items(), model_2.state_dict().items()):
  65. if torch.equal(key_item_1[1], key_item_2[1]):
  66. pass
  67. else:
  68. models_differ += 1
  69. if (key_item_1[0] == key_item_2[0]):
  70. print('Mismtach found at', key_item_1[0])
  71. else:
  72. raise Exception
  73. if models_differ == 0:
  74. return True
  75. else:
  76. return False
  77. def test_strict_load_on(self):
  78. # Define Model
  79. model = models.get('resnet18', arch_params={"num_classes": 1000})
  80. pretrained_model = models.get('resnet18', arch_params={"num_classes": 1000},
  81. pretrained_weights="imagenet")
  82. # Make sure we initialized a model with different weights
  83. assert not self.check_models_have_same_weights(model, pretrained_model)
  84. pretrained_sd_path = os.path.join(self.temp_working_file_dir, "pretrained_net_strict_load_on.pth")
  85. torch.save(pretrained_model.state_dict(), pretrained_sd_path)
  86. model = models.get('resnet18', arch_params={"num_classes": 1000},
  87. checkpoint_path=pretrained_sd_path, strict_load=StrictLoad.ON)
  88. # Assert the weights were loaded correctly
  89. assert self.check_models_have_same_weights(model, pretrained_model)
  90. def test_strict_load_off(self):
  91. # Define Model
  92. model = models.get('resnet18', arch_params={"num_classes": 1000})
  93. pretrained_model = models.get('resnet18', arch_params={"num_classes": 1000},
  94. pretrained_weights="imagenet")
  95. # Make sure we initialized a model with different weights
  96. assert not self.check_models_have_same_weights(model, pretrained_model)
  97. pretrained_sd_path = os.path.join(self.temp_working_file_dir, "pretrained_net_strict_load_off.pth")
  98. del pretrained_model.linear
  99. torch.save(pretrained_model.state_dict(), pretrained_sd_path)
  100. with self.assertRaises(RuntimeError):
  101. models.get('resnet18', arch_params={"num_classes": 1000},
  102. checkpoint_path=pretrained_sd_path, strict_load=StrictLoad.ON)
  103. model = models.get('resnet18', arch_params={"num_classes": 1000},
  104. checkpoint_path=pretrained_sd_path, strict_load=StrictLoad.OFF)
  105. del model.linear
  106. # Assert the weights were loaded correctly
  107. assert self.check_models_have_same_weights(model, pretrained_model)
  108. def test_strict_load_no_key_matching_sg_checkpoint(self):
  109. # Define Model
  110. model = models.get('resnet18', arch_params={"num_classes": 1000})
  111. pretrained_model = models.get('resnet18', arch_params={"num_classes": 1000},
  112. pretrained_weights="imagenet")
  113. # Make sure we initialized a model with different weights
  114. assert not self.check_models_have_same_weights(model, pretrained_model)
  115. pretrained_sd_path = os.path.join(self.temp_working_file_dir, "pretrained_model_strict_load_soft.pth")
  116. torch.save(self.change_state_dict_keys(pretrained_model.state_dict()), pretrained_sd_path)
  117. with self.assertRaises(RuntimeError):
  118. models.get('resnet18', arch_params={"num_classes": 1000},
  119. checkpoint_path=pretrained_sd_path, strict_load=StrictLoad.ON)
  120. model = models.get('resnet18', arch_params={"num_classes": 1000},
  121. checkpoint_path=pretrained_sd_path, strict_load=StrictLoad.NO_KEY_MATCHING)
  122. # Assert the weights were loaded correctly
  123. assert self.check_models_have_same_weights(model, pretrained_model)
  124. if __name__ == '__main__':
  125. unittest.main()
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...