Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

#598 Feature/sg 132 models convert

Merged
Ghost merged 1 commits into Deci-AI:master from deci-ai:feature/SG-132_models_convert
@@ -56,6 +56,7 @@ class Transforms:
     RandAugmentTransform = "RandAugmentTransform"
     RandAugmentTransform = "RandAugmentTransform"
     Lighting = "Lighting"
     Lighting = "Lighting"
     RandomErase = "RandomErase"
     RandomErase = "RandomErase"
+    Standardize = "Standardize"
 
 
     # From torch
     # From torch
     Compose = "Compose"
     Compose = "Compose"
Discard
    Discard
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    1. """
    2. Example code for running SuperGradient's recipes.
    3. General use: python convert_recipe_example.py --config-name=DESIRED_RECIPE'S_CONVERSION_PARAMS experiment_name=DESIRED_RECIPE'S_EXPERIMENT_NAME.
    4. For more optoins see : super_gradients/recipes/conversion_params/default_conversion_params.yaml.
    5. Note: conversion_params yaml file should reside under super_gradients/recipes/conversion_params
    6. """
    7. from omegaconf import DictConfig
    8. import hydra
    9. import pkg_resources
    10. from super_gradients import init_trainer
    11. from super_gradients.training import models
    12. @hydra.main(config_path=pkg_resources.resource_filename("super_gradients.recipes.conversion_params", ""), version_base="1.2")
    13. def main(cfg: DictConfig) -> None:
    14. # INSTANTIATE ALL OBJECTS IN CFG
    15. models.convert_from_config(cfg)
    16. def run():
    17. init_trainer()
    18. main()
    19. if __name__ == "__main__":
    20. run()
    Discard
    @@ -30,7 +30,8 @@ architecture: resnet18_cifar
     
     
     experiment_name: resnet18_cifar
     experiment_name: resnet18_cifar
     
     
    -
    +multi_gpu: Off
    +num_gpus: 1
     # THE FOLLOWING PARAMS ARE DIRECTLY USED BY HYDRA
     # THE FOLLOWING PARAMS ARE DIRECTLY USED BY HYDRA
     hydra:
     hydra:
       run:
       run:
    Discard
      Discard
      1
      2
      3
      4
      5
      6
      7
      8
      9
      10
      11
      12
      13
      14
      15
      16
      17
      18
      19
      20
      21
      22
      23
      24
      25
      26
      27
      28
      29
      30
      31
      32
      33
      34
      35
      36
      1. # Example conversion parameters, to be used with super_gradients/examples/convert_recipe_example/convert_recipe_example.py
      2. # Suppose you trained cifar10_resnet using train_from_recipe beforehand, Then:
      3. # python convert_recipe_example.py --config-name=cifar10_conversion_params experiment_name=YOUR_EXPERIMENT_NAME.
      4. # Alternatively (or if ckpts are located anywhere else from the default checkpoints dir), you can give the full checkpoint path:
      5. # python convert_recipe_example.py --config-name=cifar10_conversion_params checkpoint_path=YOUR_CHECKPOINT_PATH
      6. defaults:
      7. - default_conversion_params
      8. - _self_
      9. experiment_name: resnet18_cifar # The experiment name used to train the model (optional- ignored when checkpoint_path is given)
      10. # CONVERSION RELATED PARAMS
      11. out_path: # str, Destination path for the .onnx file. When None- out_path will be the resolved checkpoint path replacing .ckpt suffix with .onnx.
      12. input_shape: # input shape, not including batch_size. Always channels first (i.e (3, 224, 224)).
      13. - 3
      14. - 32
      15. - 32
      16. pre_process: # Preprocessing pipeline, will be resolved by TransformsFactory(), and will be baked into the converted model (optional).
      17. Compose:
      18. transforms:
      19. - Standardize
      20. - Normalize:
      21. mean:
      22. - 0.4914
      23. - 0.4822
      24. - 0.4465
      25. std:
      26. - 0.2023
      27. - 0.1994
      28. - 0.2010
      29. post_process: # Postprocessing pipeline, will be resolved by TransformsFactory(), and will be baked into the converted model (optional).
      30. prep_model_for_conversion_kwargs: # For SgModules, args to be passed to model.prep_model_for_conversion prior to torch.onnx.export call.
      31. torch_onnx_export_kwargs: # kwargs (EXCLUDING: FIRST 3 KWARGS- MODEL, F, ARGS). to be unpacked in torch.onnx.export call
      32. opset_version: 16
      Discard
      1
      2
      3
      4
      5
      6
      7
      8
      9
      10
      11
      12
      13
      14
      15
      16
      17
      18
      19
      1. experiment_name: # The experiment name used to train the model (optional- ignored when checkpoint_path is given)
      2. ckpt_root_dir: # The checkpoint root directory, s.t ckpt_root_dir/experiment_name/ckpt_name resides.
      3. # Can be ignored if the checkpoints directory is the default (i.e path to checkpoints module from contents root), or when checkpoint_path is given
      4. ckpt_name: ckpt_best.pth # Name of the checkpoint to export ("ckpt_latest.pth", "average_model.pth" or "ckpt_best.pth" for instance).
      5. checkpoint_path:
      6. strict_load: no_key_matching # One of [On, Off, no_key_matching] (case insensitive) See super_gradients/common/data_types/enum/strict_load.py
      7. # NOTES ON: ckpt_root_dir, checkpoint_path, and ckpt_name:
      8. # - ckpt_root_dir, experiment_name and ckpt_name are only used when checkpoint_path is None.
      9. # - when checkpoint_path is None, the model will be vuilt according to the output yaml config inside ckpt_root_dir/experiment_name/ckpt_name. Also note that in
      10. # this case its also legal not to pass ckpt_root_dir, which will be resolved to the default SG ckpt dir.
      11. # CONVERSION RELATED PARAMS
      12. out_path: # str, Destination path for the .onnx file. When None- will be set to the checkpoint_path.replace(".ckpt",".onnx").
      13. input_shape: # input shape, not including batch_size. Always channels first (i.e (3, 224, 224)).
      14. pre_process: # Preprocessing pipeline, will be resolved by TransformsFactory(), and will be baked into the converted model (optional).
      15. post_process: # Postprocessing pipeline, will be resolved by TransformsFactory(), and will be baked into the converted model (optional).
      16. prep_model_for_conversion_kwargs: # For SgModules, args to be passed to model.prep_model_for_conversion prior to torch.onnx.export call.
      17. torch_onnx_export_kwargs: # kwargs (EXCLUDING: FIRST 3 KWARGS- MODEL, F, ARGS). to be unpacked in torch.onnx.export call
      Discard
      @@ -21,3 +21,4 @@ from super_gradients.training.models.all_architectures import ARCHITECTURES, Mod
       from super_gradients.training.models.user_models import *
       from super_gradients.training.models.user_models import *
       from super_gradients.training.models.model_factory import get
       from super_gradients.training.models.model_factory import get
       from super_gradients.training.models.arch_params_factory import get_arch_params
       from super_gradients.training.models.arch_params_factory import get_arch_params
      +from super_gradients.training.models.conversion import convert_to_onnx, convert_from_config
      Discard
      1
      2
      3
      4
      5
      6
      7
      8
      9
      10
      11
      12
      13
      14
      15
      16
      17
      18
      19
      20
      21
      22
      23
      24
      25
      26
      27
      28
      29
      30
      31
      32
      33
      34
      35
      36
      37
      38
      39
      40
      41
      42
      43
      44
      45
      46
      47
      48
      49
      50
      51
      52
      53
      54
      55
      56
      57
      58
      59
      60
      61
      62
      63
      64
      65
      66
      67
      68
      69
      70
      71
      72
      73
      74
      75
      76
      77
      78
      79
      80
      81
      82
      83
      84
      85
      86
      87
      88
      89
      90
      91
      92
      93
      94
      95
      96
      97
      98
      99
      100
      101
      102
      103
      104
      105
      106
      107
      108
      109
      110
      111
      112
      113
      114
      115
      116
      117
      118
      119
      120
      121
      122
      123
      124
      125
      126
      127
      128
      129
      130
      1. from pathlib import Path
      2. import hydra
      3. import torch
      4. from omegaconf import DictConfig
      5. import numpy as np
      6. from torch.nn import Identity
      7. from super_gradients.common.abstractions.abstract_logger import get_logger
      8. from super_gradients.common.decorators.factory_decorator import resolve_param
      9. from super_gradients.common.factories.transforms_factory import TransformsFactory
      10. from super_gradients.training import models
      11. from super_gradients.training.utils.checkpoint_utils import get_checkpoints_dir_path
      12. from super_gradients.training.utils.hydra_utils import load_experiment_cfg
      13. from super_gradients.training.utils.sg_trainer_utils import parse_args
      14. import os
      15. import pathlib
      16. logger = get_logger(__name__)
      17. class ConvertableCompletePipelineModel(torch.nn.Module):
      18. """
      19. Exportable nn.Module that wraps the model, preprocessing and postprocessing.
      20. Args:
      21. model: torch.nn.Module, the main model. takes input from pre_process' output, and feeds pre_process.
      22. pre_process: torch.nn.Module, preprocessing module, its output will be model's input. When none (default), set to Identity().
      23. pre_process: torch.nn.Module, postprocessing module, its output is the final output. When none (default), set to Identity().
      24. **prep_model_for_conversion_kwargs: for SgModules- args to be passed to model.prep_model_for_conversion
      25. prior to torch.onnx.export call.
      26. """
      27. def __init__(self, model: torch.nn.Module, pre_process: torch.nn.Module = None, post_process: torch.nn.Module = None, **prep_model_for_conversion_kwargs):
      28. super(ConvertableCompletePipelineModel, self).__init__()
      29. model.eval()
      30. pre_process = pre_process or Identity()
      31. post_process = post_process or Identity()
      32. if hasattr(model, "prep_model_for_conversion"):
      33. model.prep_model_for_conversion(**prep_model_for_conversion_kwargs)
      34. self.model = model
      35. self.pre_process = pre_process
      36. self.post_process = post_process
      37. def forward(self, x):
      38. return self.post_process(self.model(self.pre_process(x)))
      39. @resolve_param("pre_process", TransformsFactory())
      40. @resolve_param("post_process", TransformsFactory())
      41. def convert_to_onnx(
      42. model: torch.nn.Module,
      43. out_path: str,
      44. input_shape: tuple,
      45. pre_process: torch.nn.Module = None,
      46. post_process: torch.nn.Module = None,
      47. prep_model_for_conversion_kwargs=None,
      48. torch_onnx_export_kwargs=None,
      49. ):
      50. """
      51. Exports model to ONNX.
      52. :param model: torch.nn.Module, model to export to ONNX.
      53. :param out_path: str, destination path for the .onnx file.
      54. :param input_shape: tuple, input shape, excluding batch_size (i.e (3, 224, 224)).
      55. :param pre_process: torch.nn.Module, preprocessing pipeline, will be resolved by TransformsFactory()
      56. :param post_process: torch.nn.Module, postprocessing pipeline, will be resolved by TransformsFactory()
      57. :param prep_model_for_conversion_kwargs: dict, for SgModules- args to be passed to model.prep_model_for_conversion
      58. prior to torch.onnx.export call.
      59. :param torch_onnx_export_kwargs: kwargs (EXCLUDING: FIRST 3 KWARGS- MODEL, F, ARGS). to be unpacked in torch.onnx.export call
      60. :return: out_path
      61. """
      62. if not os.path.isdir(pathlib.Path(out_path).parent.resolve()):
      63. raise FileNotFoundError(f"Could not find destination directory {out_path} for the ONNX file.")
      64. torch_onnx_export_kwargs = torch_onnx_export_kwargs or dict()
      65. prep_model_for_conversion_kwargs = prep_model_for_conversion_kwargs or dict()
      66. onnx_input = torch.Tensor(np.zeros([1, *input_shape]))
      67. if not out_path.endswith(".onnx"):
      68. out_path = out_path + ".onnx"
      69. complete_model = ConvertableCompletePipelineModel(model, pre_process, post_process, **prep_model_for_conversion_kwargs)
      70. torch.onnx.export(model=complete_model, args=onnx_input, f=out_path, **torch_onnx_export_kwargs)
      71. return out_path
      72. def prepare_conversion_cfgs(cfg: DictConfig):
      73. """
      74. Builds the cfg (i.e conversion_params) and experiment_cfg (i.e recipe config according to cfg.experiment_name)
      75. to be used by convert_recipe_example
      76. :param cfg: DictConfig, converion_params config
      77. :return: cfg, experiment_cfg
      78. """
      79. cfg = hydra.utils.instantiate(cfg)
      80. # CREATE THE EXPERIMENT CFG
      81. experiment_cfg = load_experiment_cfg(cfg.experiment_name, cfg.ckpt_root_dir)
      82. hydra.utils.instantiate(experiment_cfg)
      83. if cfg.checkpoint_path is None:
      84. logger.info(
      85. "checkpoint_params.checkpoint_path was not provided, so the model will be converted using weights from "
      86. "checkpoints_dir/training_hyperparams.ckpt_name "
      87. )
      88. checkpoints_dir = Path(get_checkpoints_dir_path(experiment_name=cfg.experiment_name, ckpt_root_dir=cfg.ckpt_root_dir))
      89. cfg.checkpoint_path = str(checkpoints_dir / cfg.ckpt_name)
      90. cfg.out_path = cfg.out_path or cfg.checkpoint_path.replace(".ckpt", ".onnx")
      91. logger.info(f"Exporting checkpoint: {cfg.checkpoint_path} to ONNX.")
      92. return cfg, experiment_cfg
      93. def convert_from_config(cfg: DictConfig) -> str:
      94. """
      95. Exports model according to cfg.
      96. See:
      97. super_gradients/recipes/conversion_params/default_conversion_params.yaml for the full cfg content documentation,
      98. and super_gradients/examples/convert_recipe_example/convert_recipe_example.py for usage.
      99. :param cfg:
      100. :return: out_path, the path of the saved .onnx file.
      101. """
      102. cfg, experiment_cfg = prepare_conversion_cfgs(cfg)
      103. model = models.get(
      104. model_name=experiment_cfg.architecture,
      105. num_classes=experiment_cfg.arch_params.num_classes,
      106. arch_params=experiment_cfg.arch_params,
      107. strict_load=cfg.strict_load,
      108. checkpoint_path=cfg.checkpoint_path,
      109. )
      110. cfg = parse_args(cfg, models.convert_to_onnx)
      111. out_path = models.convert_to_onnx(model=model, **cfg)
      112. return out_path
      Discard
      @@ -6,6 +6,7 @@ from super_gradients.training.transforms.transforms import (
           DetectionHSV,
           DetectionHSV,
           DetectionPaddedRescale,
           DetectionPaddedRescale,
           DetectionTargetsFormatTransform,
           DetectionTargetsFormatTransform,
      +    Standardize,
       )
       )
       from super_gradients.training.transforms.all_transforms import (
       from super_gradients.training.transforms.all_transforms import (
           TRANSFORMS,
           TRANSFORMS,
      @@ -26,6 +27,7 @@ __all__ = [
           "DetectionPaddedRescale",
           "DetectionPaddedRescale",
           "DetectionTargetsFormatTransform",
           "DetectionTargetsFormatTransform",
           "imported_albumentations_failure",
           "imported_albumentations_failure",
      +    "Standardize",
       ]
       ]
       
       
       cv2.setNumThreads(0)
       cv2.setNumThreads(0)
      Discard
      @@ -25,6 +25,7 @@ from super_gradients.training.transforms.transforms import (
           DetectionTargetsFormat,
           DetectionTargetsFormat,
           DetectionPaddedRescale,
           DetectionPaddedRescale,
           DetectionTargetsFormatTransform,
           DetectionTargetsFormatTransform,
      +    Standardize,
       )
       )
       from torchvision.transforms import (
       from torchvision.transforms import (
           Compose,
           Compose,
      @@ -123,6 +124,7 @@ TRANSFORMS = {
           Transforms.RandomAdjustSharpness: RandomAdjustSharpness,
           Transforms.RandomAdjustSharpness: RandomAdjustSharpness,
           Transforms.RandomAutocontrast: RandomAutocontrast,
           Transforms.RandomAutocontrast: RandomAutocontrast,
           Transforms.RandomEqualize: RandomEqualize,
           Transforms.RandomEqualize: RandomEqualize,
      +    Transforms.Standardize: Standardize,
       }
       }
       logger = get_logger(__name__)
       logger = get_logger(__name__)
       
       
      Discard
      @@ -3,6 +3,7 @@ import math
       import random
       import random
       from typing import Optional, Union, Tuple, List, Sequence, Dict
       from typing import Optional, Union, Tuple, List, Sequence, Dict
       
       
      +import torch.nn
       from PIL import Image, ImageFilter, ImageOps
       from PIL import Image, ImageFilter, ImageOps
       from torchvision import transforms as transforms
       from torchvision import transforms as transforms
       import numpy as np
       import numpy as np
      @@ -1104,3 +1105,20 @@ def rescale_and_pad_to_size(img, input_size, swap=(2, 0, 1), pad_val=114):
           padded_img = padded_img.transpose(swap)
           padded_img = padded_img.transpose(swap)
           padded_img = np.ascontiguousarray(padded_img, dtype=np.float32)
           padded_img = np.ascontiguousarray(padded_img, dtype=np.float32)
           return padded_img, r
           return padded_img, r
      +
      +
      +class Standardize(torch.nn.Module):
      +    """
      +    Standardize image pixel values.
      +    :return img/max_val
      +
      +    attributes:
      +        max_val: float, value to as described above (default=255)
      +    """
      +
      +    def __init__(self, max_val=255.0):
      +        super(Standardize, self).__init__()
      +        self.max_val = max_val
      +
      +    def forward(self, img):
      +        return img / self.max_val
      Discard