Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

#12 Implement MLFlow and mmdetection

Merged
Ghost merged 1 commits into vdanilov:main from vdanilov:mmdetection_pipeline
100 changed files with 7562 additions and 1 deletions
  1. 3
    0
      .gitignore
  2. 0
    0
      calculations/.gitkeep
  3. BIN
      calculations/visualization/abs_hsv/2019_08_30_09_48_23_SpecCube.mp4
  4. 5
    1
      requirements.txt
  5. 34
    0
      src/models/mmdetection/.circleci/config.yml
  6. 11
    0
      src/models/mmdetection/.circleci/docker/Dockerfile
  7. 19
    0
      src/models/mmdetection/.circleci/scripts/get_mmcv_var.sh
  8. 189
    0
      src/models/mmdetection/.circleci/test.yml
  9. 359
    0
      src/models/mmdetection/.dev_scripts/batch_test_list.py
  10. 66
    0
      src/models/mmdetection/.dev_scripts/batch_train_list.txt
  11. 167
    0
      src/models/mmdetection/.dev_scripts/benchmark_filter.py
  12. 170
    0
      src/models/mmdetection/.dev_scripts/benchmark_inference_fps.py
  13. 102
    0
      src/models/mmdetection/.dev_scripts/benchmark_test_image.py
  14. 157
    0
      src/models/mmdetection/.dev_scripts/check_links.py
  15. 119
    0
      src/models/mmdetection/.dev_scripts/convert_test_benchmark_script.py
  16. 99
    0
      src/models/mmdetection/.dev_scripts/convert_train_benchmark_script.py
  17. 342
    0
      src/models/mmdetection/.dev_scripts/gather_models.py
  18. 96
    0
      src/models/mmdetection/.dev_scripts/gather_test_benchmark_metric.py
  19. 150
    0
      src/models/mmdetection/.dev_scripts/gather_train_benchmark_metric.py
  20. 3
    0
      src/models/mmdetection/.dev_scripts/linter.sh
  21. 119
    0
      src/models/mmdetection/.dev_scripts/test_benchmark.sh
  22. 181
    0
      src/models/mmdetection/.dev_scripts/test_init_backbone.py
  23. 134
    0
      src/models/mmdetection/.dev_scripts/train_benchmark.sh
  24. 76
    0
      src/models/mmdetection/.github/CODE_OF_CONDUCT.md
  25. 1
    0
      src/models/mmdetection/.github/CONTRIBUTING.md
  26. 105
    0
      src/models/mmdetection/.github/ISSUE_TEMPLATE/1-bug-report.yml
  27. 31
    0
      src/models/mmdetection/.github/ISSUE_TEMPLATE/2-feature-request.yml
  28. 32
    0
      src/models/mmdetection/.github/ISSUE_TEMPLATE/3-new-model.yml
  29. 34
    0
      src/models/mmdetection/.github/ISSUE_TEMPLATE/4-documentation.yml
  30. 89
    0
      src/models/mmdetection/.github/ISSUE_TEMPLATE/5-reimplementation.yml
  31. 9
    0
      src/models/mmdetection/.github/ISSUE_TEMPLATE/config.yml
  32. 25
    0
      src/models/mmdetection/.github/pull_request_template.md
  33. 286
    0
      src/models/mmdetection/.github/workflows/build.yml
  34. 31
    0
      src/models/mmdetection/.github/workflows/build_pat.yml
  35. 31
    0
      src/models/mmdetection/.github/workflows/deploy.yml
  36. 30
    0
      src/models/mmdetection/.github/workflows/lint.yml
  37. 31
    0
      src/models/mmdetection/.github/workflows/stale.yml
  38. 50
    0
      src/models/mmdetection/.github/workflows/test_mim.yml
  39. 124
    0
      src/models/mmdetection/.gitignore
  40. 13
    0
      src/models/mmdetection/.owners.yml
  41. 50
    0
      src/models/mmdetection/.pre-commit-config.yaml
  42. 9
    0
      src/models/mmdetection/.readthedocs.yml
  43. 8
    0
      src/models/mmdetection/CITATION.cff
  44. 203
    0
      src/models/mmdetection/LICENSE
  45. 6
    0
      src/models/mmdetection/MANIFEST.in
  46. 395
    0
      src/models/mmdetection/README.md
  47. 413
    0
      src/models/mmdetection/README_zh-CN.md
  48. 56
    0
      src/models/mmdetection/configs/_base_/datasets/cityscapes_detection.py
  49. 56
    0
      src/models/mmdetection/configs/_base_/datasets/cityscapes_instance.py
  50. 49
    0
      src/models/mmdetection/configs/_base_/datasets/coco_detection.py
  51. 49
    0
      src/models/mmdetection/configs/_base_/datasets/coco_instance.py
  52. 54
    0
      src/models/mmdetection/configs/_base_/datasets/coco_instance_semantic.py
  53. 59
    0
      src/models/mmdetection/configs/_base_/datasets/coco_panoptic.py
  54. 53
    0
      src/models/mmdetection/configs/_base_/datasets/deepfashion.py
  55. 24
    0
      src/models/mmdetection/configs/_base_/datasets/lvis_v0.5_instance.py
  56. 24
    0
      src/models/mmdetection/configs/_base_/datasets/lvis_v1_instance.py
  57. 49
    0
      src/models/mmdetection/configs/_base_/datasets/objects365v1_detection.py
  58. 49
    0
      src/models/mmdetection/configs/_base_/datasets/objects365v2_detection.py
  59. 65
    0
      src/models/mmdetection/configs/_base_/datasets/openimages_detection.py
  60. 55
    0
      src/models/mmdetection/configs/_base_/datasets/voc0712.py
  61. 63
    0
      src/models/mmdetection/configs/_base_/datasets/wider_face.py
  62. 33
    0
      src/models/mmdetection/configs/_base_/default_runtime.py
  63. 60
    0
      src/models/mmdetection/configs/_base_/models/ascend_retinanet_r50_fpn.py
  64. 56
    0
      src/models/mmdetection/configs/_base_/models/ascend_ssd300.py
  65. 196
    0
      src/models/mmdetection/configs/_base_/models/cascade_mask_rcnn_r50_fpn.py
  66. 179
    0
      src/models/mmdetection/configs/_base_/models/cascade_rcnn_r50_fpn.py
  67. 62
    0
      src/models/mmdetection/configs/_base_/models/fast_rcnn_r50_fpn.py
  68. 117
    0
      src/models/mmdetection/configs/_base_/models/faster_rcnn_r50_caffe_c4.py
  69. 105
    0
      src/models/mmdetection/configs/_base_/models/faster_rcnn_r50_caffe_dc5.py
  70. 108
    0
      src/models/mmdetection/configs/_base_/models/faster_rcnn_r50_fpn.py
  71. 125
    0
      src/models/mmdetection/configs/_base_/models/mask_rcnn_r50_caffe_c4.py
  72. 120
    0
      src/models/mmdetection/configs/_base_/models/mask_rcnn_r50_fpn.py
  73. 60
    0
      src/models/mmdetection/configs/_base_/models/retinanet_r50_fpn.py
  74. 58
    0
      src/models/mmdetection/configs/_base_/models/rpn_r50_caffe_c4.py
  75. 58
    0
      src/models/mmdetection/configs/_base_/models/rpn_r50_fpn.py
  76. 56
    0
      src/models/mmdetection/configs/_base_/models/ssd300.py
  77. 11
    0
      src/models/mmdetection/configs/_base_/schedules/schedule_1x.py
  78. 11
    0
      src/models/mmdetection/configs/_base_/schedules/schedule_20e.py
  79. 11
    0
      src/models/mmdetection/configs/_base_/schedules/schedule_2x.py
  80. 31
    0
      src/models/mmdetection/configs/albu_example/README.md
  81. 73
    0
      src/models/mmdetection/configs/albu_example/mask_rcnn_r50_fpn_albu_1x_coco.py
  82. 31
    0
      src/models/mmdetection/configs/atss/README.md
  83. 6
    0
      src/models/mmdetection/configs/atss/atss_r101_fpn_1x_coco.py
  84. 62
    0
      src/models/mmdetection/configs/atss/atss_r50_fpn_1x_coco.py
  85. 60
    0
      src/models/mmdetection/configs/atss/metafile.yml
  86. 35
    0
      src/models/mmdetection/configs/autoassign/README.md
  87. 85
    0
      src/models/mmdetection/configs/autoassign/autoassign_r50_fpn_8x2_1x_coco.py
  88. 33
    0
      src/models/mmdetection/configs/autoassign/metafile.yml
  89. 42
    0
      src/models/mmdetection/configs/carafe/README.md
  90. 50
    0
      src/models/mmdetection/configs/carafe/faster_rcnn_r50_fpn_carafe_1x_coco.py
  91. 60
    0
      src/models/mmdetection/configs/carafe/mask_rcnn_r50_fpn_carafe_1x_coco.py
  92. 55
    0
      src/models/mmdetection/configs/carafe/metafile.yml
  93. 79
    0
      src/models/mmdetection/configs/cascade_rcnn/README.md
  94. 7
    0
      src/models/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_r101_caffe_fpn_1x_coco.py
  95. 7
    0
      src/models/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_r101_caffe_fpn_mstrain_3x_coco.py
  96. 6
    0
      src/models/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_r101_fpn_1x_coco.py
  97. 6
    0
      src/models/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_r101_fpn_20e_coco.py
  98. 6
    0
      src/models/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_r101_fpn_mstrain_3x_coco.py
  99. 41
    0
      src/models/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_r50_caffe_fpn_1x_coco.py
  100. 49
    0
      src/models/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_r50_caffe_fpn_mstrain_3x_coco.py
@@ -163,3 +163,6 @@ logs/
 
 # Directory with temporary files
 temp/
+
+# MLFlow
+/mlruns/
Discard
    Discard
    Discard
    @@ -7,9 +7,11 @@ ipython==8.3.0
     joblib==1.2.0
     jupyter==1.0.0
     matplotlib==3.6.2
    -numpy==1.24.1
    +mlflow==2.2.1
    +numpy==1.23.5
     omegaconf==2.3.0
     opencv-python==4.7.0.68
    +openmim==0.3.6
     openpyxl==3.0.10
     pandas==1.5.2
     Pillow==9.4.0
    @@ -21,5 +23,7 @@ scipy==1.10.0
     supervisely==6.69.2
     toml==0.10.2
     tomli==2.0.1
    +torch==1.13.1
    +torchvision==0.14.1
     tqdm==4.64.1
     xlsxwriter==3.0.3
    Discard
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    1. version: 2.1
    2. # this allows you to use CircleCI's dynamic configuration feature
    3. setup: true
    4. # the path-filtering orb is required to continue a pipeline based on
    5. # the path of an updated fileset
    6. orbs:
    7. path-filtering: circleci/path-filtering@0.1.2
    8. workflows:
    9. # the always-run workflow is always triggered, regardless of the pipeline parameters.
    10. always-run:
    11. jobs:
    12. # the path-filtering/filter job determines which pipeline
    13. # parameters to update.
    14. - path-filtering/filter:
    15. name: check-updated-files
    16. # 3-column, whitespace-delimited mapping. One mapping per
    17. # line:
    18. # <regex path-to-test> <parameter-to-set> <value-of-pipeline-parameter>
    19. mapping: |
    20. mmdet/.* lint_only false
    21. requirements/.* lint_only false
    22. tests/.* lint_only false
    23. tools/.* lint_only false
    24. configs/.* lint_only false
    25. .circleci/.* lint_only false
    26. base-revision: master
    27. # this is the path of the configuration we should trigger once
    28. # path filtering and pipeline parameter value updates are
    29. # complete. In this case, we are using the parent dynamic
    30. # configuration itself.
    31. config-path: .circleci/test.yml
    Discard
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    1. ARG PYTORCH="1.8.1"
    2. ARG CUDA="10.2"
    3. ARG CUDNN="7"
    4. FROM pytorch/pytorch:${PYTORCH}-cuda${CUDA}-cudnn${CUDNN}-devel
    5. # To fix GPG key error when running apt-get update
    6. RUN apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/3bf863cc.pub
    7. RUN apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64/7fa2af80.pub
    8. RUN apt-get update && apt-get install -y ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 libgl1-mesa-glx git
    Discard
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    1. #!/bin/bash
    2. TORCH=$1
    3. CUDA=$2
    4. # 10.2 -> cu102
    5. MMCV_CUDA="cu`echo ${CUDA} | tr -d '.'`"
    6. # MMCV only provides pre-compiled packages for torch 1.x.0
    7. # which works for any subversions of torch 1.x.
    8. # We force the torch version to be 1.x.0 to ease package searching
    9. # and avoid unnecessary rebuild during MMCV's installation.
    10. TORCH_VER_ARR=(${TORCH//./ })
    11. TORCH_VER_ARR[2]=0
    12. printf -v MMCV_TORCH "%s." "${TORCH_VER_ARR[@]}"
    13. MMCV_TORCH=${MMCV_TORCH%?} # Remove the last dot
    14. echo "export MMCV_CUDA=${MMCV_CUDA}" >> $BASH_ENV
    15. echo "export MMCV_TORCH=${MMCV_TORCH}" >> $BASH_ENV
    Discard
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    77
    78
    79
    80
    81
    82
    83
    84
    85
    86
    87
    88
    89
    90
    91
    92
    93
    94
    95
    96
    97
    98
    99
    100
    101
    102
    103
    104
    105
    106
    107
    108
    109
    110
    111
    112
    113
    114
    115
    116
    117
    118
    119
    120
    121
    122
    123
    124
    125
    126
    127
    128
    129
    130
    131
    132
    133
    134
    135
    136
    137
    138
    139
    140
    141
    142
    143
    144
    145
    146
    147
    148
    149
    150
    151
    152
    153
    154
    155
    156
    157
    158
    159
    160
    161
    162
    163
    164
    165
    166
    167
    168
    169
    170
    171
    172
    173
    174
    175
    176
    177
    178
    179
    180
    181
    182
    183
    184
    185
    186
    187
    188
    189
    1. version: 2.1
    2. # the default pipeline parameters, which will be updated according to
    3. # the results of the path-filtering orb
    4. parameters:
    5. lint_only:
    6. type: boolean
    7. default: true
    8. jobs:
    9. lint:
    10. docker:
    11. - image: cimg/python:3.7.4
    12. steps:
    13. - checkout
    14. - run:
    15. name: Install pre-commit hook
    16. command: |
    17. pip install pre-commit
    18. pre-commit install
    19. - run:
    20. name: Linting
    21. command: pre-commit run --all-files
    22. - run:
    23. name: Check docstring coverage
    24. command: |
    25. pip install interrogate
    26. interrogate -v --ignore-init-method --ignore-module --ignore-nested-functions --ignore-regex "__repr__" --fail-under 50 mmdet
    27. build_cpu:
    28. parameters:
    29. # The python version must match available image tags in
    30. # https://circleci.com/developer/images/image/cimg/python
    31. python:
    32. type: string
    33. torch:
    34. type: string
    35. torchvision:
    36. type: string
    37. docker:
    38. - image: cimg/python:<< parameters.python >>
    39. resource_class: large
    40. steps:
    41. - checkout
    42. - run:
    43. name: Get MMCV_TORCH as environment variables
    44. command: |
    45. . .circleci/scripts/get_mmcv_var.sh << parameters.torch >>
    46. source $BASH_ENV
    47. - run:
    48. name: Install Libraries
    49. command: |
    50. sudo apt-get update
    51. sudo apt-get install -y ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 libgl1-mesa-glx libjpeg-dev zlib1g-dev libtinfo-dev libncurses5 libgeos-dev cmake git
    52. - run:
    53. name: Configure Python & pip
    54. command: |
    55. python -m pip install --upgrade pip
    56. python -m pip install wheel
    57. - run:
    58. name: Install PyTorch
    59. command: |
    60. python -V
    61. python -m pip install torch==<< parameters.torch >>+cpu torchvision==<< parameters.torchvision >>+cpu -f https://download.pytorch.org/whl/torch_stable.html
    62. - run:
    63. name: Install mmdet dependencies
    64. command: |
    65. python -m pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/cpu/torch${MMCV_TORCH}/index.html
    66. python -m pip install -r requirements.txt
    67. python -m pip install albumentations --no-binary qudida,albumentations
    68. python -m pip install git+https://github.com/cocodataset/panopticapi.git
    69. - run:
    70. name: Build and install
    71. command: |
    72. python -m pip install -e .
    73. - run:
    74. name: Run unittests
    75. command: |
    76. python -m coverage run --branch --source mmdet -m pytest tests/
    77. python -m coverage xml
    78. python -m coverage report -m
    79. build_cuda:
    80. parameters:
    81. torch:
    82. type: string
    83. cuda:
    84. type: enum
    85. enum: ["10.1", "10.2", "11.1"]
    86. cudnn:
    87. type: integer
    88. default: 7
    89. machine:
    90. image: ubuntu-2004-cuda-11.4:202110-01
    91. docker_layer_caching: true
    92. resource_class: gpu.nvidia.small
    93. steps:
    94. - checkout
    95. - run:
    96. name: Get MMCV_TORCH and MMCV_CUDA as environment variables
    97. command: |
    98. . .circleci/scripts/get_mmcv_var.sh << parameters.torch >> << parameters.cuda >>
    99. source $BASH_ENV
    100. - run:
    101. name: Build Docker image
    102. command: |
    103. docker build .circleci/docker -t mmdet:gpu --build-arg PYTORCH=<< parameters.torch >> --build-arg CUDA=<< parameters.cuda >> --build-arg CUDNN=<< parameters.cudnn >>
    104. docker run --gpus all -t -d -v /home/circleci/project:/mmdet -w /mmdet --name mmdet mmdet:gpu
    105. - run:
    106. name: Install mmdet dependencies
    107. command: |
    108. docker exec mmdet pip install --upgrade pip
    109. docker exec mmdet pip install wheel
    110. docker exec mmdet pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/${MMCV_CUDA}/torch${MMCV_TORCH}/index.html
    111. docker exec mmdet pip install -r requirements.txt
    112. docker exec mmdet pip install typing-extensions -U
    113. docker exec mmdet pip install albumentations --use-pep517 qudida albumentations
    114. docker exec mmdet python -c 'import albumentations; print(albumentations.__version__)'
    115. docker exec mmdet pip install git+https://github.com/cocodataset/panopticapi.git
    116. - run:
    117. name: Build and install
    118. command: |
    119. docker exec mmdet pip install -e .
    120. - run:
    121. name: Run unittests
    122. command: |
    123. docker exec mmdet python -m pytest tests/
    124. workflows:
    125. pr_stage_lint:
    126. when: << pipeline.parameters.lint_only >>
    127. jobs:
    128. - lint:
    129. name: lint
    130. filters:
    131. branches:
    132. ignore:
    133. - master
    134. pr_stage_test:
    135. when:
    136. not:
    137. << pipeline.parameters.lint_only >>
    138. jobs:
    139. - lint:
    140. name: lint
    141. filters:
    142. branches:
    143. ignore:
    144. - master
    145. - build_cpu:
    146. name: minimum_version_cpu
    147. torch: 1.6.0
    148. torchvision: 0.7.0
    149. python: 3.7.7
    150. requires:
    151. - lint
    152. - build_cpu:
    153. name: maximum_version_cpu
    154. torch: 1.9.0
    155. torchvision: 0.10.0
    156. python: 3.8.0
    157. requires:
    158. - minimum_version_cpu
    159. - hold:
    160. type: approval
    161. requires:
    162. - maximum_version_cpu
    163. - build_cuda:
    164. name: mainstream_version_gpu
    165. torch: 1.8.1
    166. # Use double quotation mark to explicitly specify its type
    167. # as string instead of number
    168. cuda: "10.2"
    169. requires:
    170. - hold
    171. merge_stage_test:
    172. when:
    173. not:
    174. << pipeline.parameters.lint_only >>
    175. jobs:
    176. - build_cuda:
    177. name: minimum_version_gpu
    178. torch: 1.6.0
    179. # Use double quotation mark to explicitly specify its type
    180. # as string instead of number
    181. cuda: "10.1"
    182. filters:
    183. branches:
    184. only:
    185. - master
    Discard
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    77
    78
    79
    80
    81
    82
    83
    84
    85
    86
    87
    88
    89
    90
    91
    92
    93
    94
    95
    96
    97
    98
    99
    100
    101
    102
    103
    104
    105
    106
    107
    108
    109
    110
    111
    112
    113
    114
    115
    116
    117
    118
    119
    120
    121
    122
    123
    124
    125
    126
    127
    128
    129
    130
    131
    132
    133
    134
    135
    136
    137
    138
    139
    140
    141
    142
    143
    144
    145
    146
    147
    148
    149
    150
    151
    152
    153
    154
    155
    156
    157
    158
    159
    160
    161
    162
    163
    164
    165
    166
    167
    168
    169
    170
    171
    172
    173
    174
    175
    176
    177
    178
    179
    180
    181
    182
    183
    184
    185
    186
    187
    188
    189
    190
    191
    192
    193
    194
    195
    196
    197
    198
    199
    200
    201
    202
    203
    204
    205
    206
    207
    208
    209
    210
    211
    212
    213
    214
    215
    216
    217
    218
    219
    220
    221
    222
    223
    224
    225
    226
    227
    228
    229
    230
    231
    232
    233
    234
    235
    236
    237
    238
    239
    240
    241
    242
    243
    244
    245
    246
    247
    248
    249
    250
    251
    252
    253
    254
    255
    256
    257
    258
    259
    260
    261
    262
    263
    264
    265
    266
    267
    268
    269
    270
    271
    272
    273
    274
    275
    276
    277
    278
    279
    280
    281
    282
    283
    284
    285
    286
    287
    288
    289
    290
    291
    292
    293
    294
    295
    296
    297
    298
    299
    300
    301
    302
    303
    304
    305
    306
    307
    308
    309
    310
    311
    312
    313
    314
    315
    316
    317
    318
    319
    320
    321
    322
    323
    324
    325
    326
    327
    328
    329
    330
    331
    332
    333
    334
    335
    336
    337
    338
    339
    340
    341
    342
    343
    344
    345
    346
    347
    348
    349
    350
    351
    352
    353
    354
    355
    356
    357
    358
    359
    1. # Copyright (c) OpenMMLab. All rights reserved.
    2. # yapf: disable
    3. atss = dict(
    4. config='configs/atss/atss_r50_fpn_1x_coco.py',
    5. checkpoint='atss_r50_fpn_1x_coco_20200209-985f7bd0.pth',
    6. eval='bbox',
    7. metric=dict(bbox_mAP=39.4),
    8. )
    9. autoassign = dict(
    10. config='configs/autoassign/autoassign_r50_fpn_8x2_1x_coco.py',
    11. checkpoint='auto_assign_r50_fpn_1x_coco_20210413_115540-5e17991f.pth',
    12. eval='bbox',
    13. metric=dict(bbox_mAP=40.4),
    14. )
    15. carafe = dict(
    16. config='configs/carafe/faster_rcnn_r50_fpn_carafe_1x_coco.py',
    17. checkpoint='faster_rcnn_r50_fpn_carafe_1x_coco_bbox_mAP-0.386_20200504_175733-385a75b7.pth', # noqa
    18. eval='bbox',
    19. metric=dict(bbox_mAP=38.6),
    20. )
    21. cascade_rcnn = [
    22. dict(
    23. config='configs/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.py',
    24. checkpoint='cascade_rcnn_r50_fpn_1x_coco_20200316-3dc56deb.pth',
    25. eval='bbox',
    26. metric=dict(bbox_mAP=40.3),
    27. ),
    28. dict(
    29. config='configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py',
    30. checkpoint='cascade_mask_rcnn_r50_fpn_1x_coco_20200203-9d4dcb24.pth',
    31. eval=['bbox', 'segm'],
    32. metric=dict(bbox_mAP=41.2, segm_mAP=35.9),
    33. ),
    34. ]
    35. cascade_rpn = dict(
    36. config='configs/cascade_rpn/crpn_faster_rcnn_r50_caffe_fpn_1x_coco.py',
    37. checkpoint='crpn_faster_rcnn_r50_caffe_fpn_1x_coco-c8283cca.pth',
    38. eval='bbox',
    39. metric=dict(bbox_mAP=40.4),
    40. )
    41. centripetalnet = dict(
    42. config='configs/centripetalnet/centripetalnet_hourglass104_mstest_16x6_210e_coco.py', # noqa
    43. checkpoint='centripetalnet_hourglass104_mstest_16x6_210e_coco_20200915_204804-3ccc61e5.pth', # noqa
    44. eval='bbox',
    45. metric=dict(bbox_mAP=44.7),
    46. )
    47. cornernet = dict(
    48. config='configs/cornernet/cornernet_hourglass104_mstest_8x6_210e_coco.py',
    49. checkpoint='cornernet_hourglass104_mstest_8x6_210e_coco_20200825_150618-79b44c30.pth', # noqa
    50. eval='bbox',
    51. metric=dict(bbox_mAP=41.2),
    52. )
    53. dcn = dict(
    54. config='configs/dcn/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py',
    55. checkpoint='faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco_20200130-d68aed1e.pth',
    56. eval='bbox',
    57. metric=dict(bbox_mAP=41.3),
    58. )
    59. deformable_detr = dict(
    60. config='configs/deformable_detr/deformable_detr_r50_16x2_50e_coco.py',
    61. checkpoint='deformable_detr_r50_16x2_50e_coco_20210419_220030-a12b9512.pth', # noqa
    62. eval='bbox',
    63. metric=dict(bbox_mAP=44.5),
    64. )
    65. detectors = dict(
    66. config='configs/detectors/detectors_htc_r50_1x_coco.py',
    67. checkpoint='detectors_htc_r50_1x_coco-329b1453.pth',
    68. eval=['bbox', 'segm'],
    69. metric=dict(bbox_mAP=49.1, segm_mAP=42.6),
    70. )
    71. detr = dict(
    72. config='configs/detr/detr_r50_8x2_150e_coco.py',
    73. checkpoint='detr_r50_8x2_150e_coco_20201130_194835-2c4b8974.pth',
    74. eval='bbox',
    75. metric=dict(bbox_mAP=40.1),
    76. )
    77. double_heads = dict(
    78. config='configs/double_heads/dh_faster_rcnn_r50_fpn_1x_coco.py',
    79. checkpoint='dh_faster_rcnn_r50_fpn_1x_coco_20200130-586b67df.pth',
    80. eval='bbox',
    81. metric=dict(bbox_mAP=40.0),
    82. )
    83. dynamic_rcnn = dict(
    84. config='configs/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x_coco.py',
    85. checkpoint='dynamic_rcnn_r50_fpn_1x-62a3f276.pth',
    86. eval='bbox',
    87. metric=dict(bbox_mAP=38.9),
    88. )
    89. empirical_attention = dict(
    90. config='configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_1x_coco.py', # noqa
    91. checkpoint='faster_rcnn_r50_fpn_attention_1111_1x_coco_20200130-403cccba.pth', # noqa
    92. eval='bbox',
    93. metric=dict(bbox_mAP=40.0),
    94. )
    95. faster_rcnn = dict(
    96. config='configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py',
    97. checkpoint='faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth',
    98. eval='bbox',
    99. metric=dict(bbox_mAP=37.4),
    100. )
    101. fcos = dict(
    102. config='configs/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco.py', # noqa
    103. checkpoint='fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco-0a0d75a8.pth', # noqa
    104. eval='bbox',
    105. metric=dict(bbox_mAP=38.7),
    106. )
    107. foveabox = dict(
    108. config='configs/foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco.py',
    109. checkpoint='fovea_align_r50_fpn_gn-head_4x4_2x_coco_20200203-8987880d.pth',
    110. eval='bbox',
    111. metric=dict(bbox_mAP=37.9),
    112. )
    113. free_anchor = dict(
    114. config='configs/free_anchor/retinanet_free_anchor_r50_fpn_1x_coco.py',
    115. checkpoint='retinanet_free_anchor_r50_fpn_1x_coco_20200130-0f67375f.pth',
    116. eval='bbox',
    117. metric=dict(bbox_mAP=38.7),
    118. )
    119. fsaf = dict(
    120. config='configs/fsaf/fsaf_r50_fpn_1x_coco.py',
    121. checkpoint='fsaf_r50_fpn_1x_coco-94ccc51f.pth',
    122. eval='bbox',
    123. metric=dict(bbox_mAP=37.4),
    124. )
    125. gcnet = dict(
    126. config='configs/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py', # noqa
    127. checkpoint='mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco_20200202-587b99aa.pth', # noqa
    128. eval=['bbox', 'segm'],
    129. metric=dict(bbox_mAP=40.4, segm_mAP=36.2),
    130. )
    131. gfl = dict(
    132. config='configs/gfl/gfl_r50_fpn_1x_coco.py',
    133. checkpoint='gfl_r50_fpn_1x_coco_20200629_121244-25944287.pth',
    134. eval='bbox',
    135. metric=dict(bbox_mAP=40.2),
    136. )
    137. gn = dict(
    138. config='configs/gn/mask_rcnn_r50_fpn_gn-all_2x_coco.py',
    139. checkpoint='mask_rcnn_r50_fpn_gn-all_2x_coco_20200206-8eee02a6.pth',
    140. eval=['bbox', 'segm'],
    141. metric=dict(bbox_mAP=40.1, segm_mAP=36.4),
    142. )
    143. gn_ws = dict(
    144. config='configs/gn+ws/faster_rcnn_r50_fpn_gn_ws-all_1x_coco.py',
    145. checkpoint='faster_rcnn_r50_fpn_gn_ws-all_1x_coco_20200130-613d9fe2.pth',
    146. eval='bbox',
    147. metric=dict(bbox_mAP=39.7),
    148. )
    149. grid_rcnn = dict(
    150. config='configs/grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco.py',
    151. checkpoint='grid_rcnn_r50_fpn_gn-head_2x_coco_20200130-6cca8223.pth',
    152. eval='bbox',
    153. metric=dict(bbox_mAP=40.4),
    154. )
    155. groie = dict(
    156. config='configs/groie/faster_rcnn_r50_fpn_groie_1x_coco.py',
    157. checkpoint='faster_rcnn_r50_fpn_groie_1x_coco_20200604_211715-66ee9516.pth', # noqa
    158. eval='bbox',
    159. metric=dict(bbox_mAP=38.3),
    160. )
    161. guided_anchoring = [
    162. dict(
    163. config='configs/guided_anchoring/ga_retinanet_r50_caffe_fpn_1x_coco.py', # noqa
    164. checkpoint='ga_retinanet_r50_caffe_fpn_1x_coco_20201020-39581c6f.pth',
    165. eval='bbox',
    166. metric=dict(bbox_mAP=36.9),
    167. ),
    168. dict(
    169. config='configs/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco.py',
    170. checkpoint='ga_faster_r50_caffe_fpn_1x_coco_20200702_000718-a11ccfe6.pth', # noqa
    171. eval='bbox',
    172. metric=dict(bbox_mAP=39.6),
    173. ),
    174. ]
    175. hrnet = dict(
    176. config='configs/hrnet/faster_rcnn_hrnetv2p_w18_1x_coco.py',
    177. checkpoint='faster_rcnn_hrnetv2p_w18_1x_coco_20200130-56651a6d.pth',
    178. eval='bbox',
    179. metric=dict(bbox_mAP=36.9),
    180. )
    181. htc = dict(
    182. config='configs/htc/htc_r50_fpn_1x_coco.py',
    183. checkpoint='htc_r50_fpn_1x_coco_20200317-7332cf16.pth',
    184. eval=['bbox', 'segm'],
    185. metric=dict(bbox_mAP=42.3, segm_mAP=37.4),
    186. )
    187. libra_rcnn = dict(
    188. config='configs/libra_rcnn/libra_faster_rcnn_r50_fpn_1x_coco.py',
    189. checkpoint='libra_faster_rcnn_r50_fpn_1x_coco_20200130-3afee3a9.pth',
    190. eval='bbox',
    191. metric=dict(bbox_mAP=38.3),
    192. )
    193. mask_rcnn = dict(
    194. config='configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py',
    195. checkpoint='mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth',
    196. eval=['bbox', 'segm'],
    197. metric=dict(bbox_mAP=38.2, segm_mAP=34.7),
    198. )
    199. ms_rcnn = dict(
    200. config='configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x_coco.py',
    201. checkpoint='ms_rcnn_r50_caffe_fpn_1x_coco_20200702_180848-61c9355e.pth',
    202. eval=['bbox', 'segm'],
    203. metric=dict(bbox_mAP=38.2, segm_mAP=36.0),
    204. )
    205. nas_fcos = dict(
    206. config='configs/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco.py', # noqa
    207. checkpoint='nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco_20200520-1bdba3ce.pth', # noqa
    208. eval='bbox',
    209. metric=dict(bbox_mAP=39.4),
    210. )
    211. nas_fpn = dict(
    212. config='configs/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco.py',
    213. checkpoint='retinanet_r50_nasfpn_crop640_50e_coco-0ad1f644.pth',
    214. eval='bbox',
    215. metric=dict(bbox_mAP=40.5),
    216. )
    217. paa = dict(
    218. config='configs/paa/paa_r50_fpn_1x_coco.py',
    219. checkpoint='paa_r50_fpn_1x_coco_20200821-936edec3.pth',
    220. eval='bbox',
    221. metric=dict(bbox_mAP=40.4),
    222. )
    223. pafpn = dict(
    224. config='configs/pafpn/faster_rcnn_r50_pafpn_1x_coco.py',
    225. checkpoint='faster_rcnn_r50_pafpn_1x_coco_bbox_mAP-0.375_20200503_105836-b7b4b9bd.pth', # noqa
    226. eval='bbox',
    227. metric=dict(bbox_mAP=37.5),
    228. )
    229. pisa = dict(
    230. config='configs/pisa/pisa_faster_rcnn_r50_fpn_1x_coco.py',
    231. checkpoint='pisa_faster_rcnn_r50_fpn_1x_coco-dea93523.pth',
    232. eval='bbox',
    233. metric=dict(bbox_mAP=38.4),
    234. )
    235. point_rend = dict(
    236. config='configs/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco.py',
    237. checkpoint='point_rend_r50_caffe_fpn_mstrain_1x_coco-1bcb5fb4.pth',
    238. eval=['bbox', 'segm'],
    239. metric=dict(bbox_mAP=38.4, segm_mAP=36.3),
    240. )
    241. regnet = dict(
    242. config='configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py',
    243. checkpoint='mask_rcnn_regnetx-3.2GF_fpn_1x_coco_20200520_163141-2a9d1814.pth', # noqa
    244. eval=['bbox', 'segm'],
    245. metric=dict(bbox_mAP=40.4, segm_mAP=36.7),
    246. )
    247. reppoints = dict(
    248. config='configs/reppoints/reppoints_moment_r50_fpn_1x_coco.py',
    249. checkpoint='reppoints_moment_r50_fpn_1x_coco_20200330-b73db8d1.pth',
    250. eval='bbox',
    251. metric=dict(bbox_mAP=37.0),
    252. )
    253. res2net = dict(
    254. config='configs/res2net/faster_rcnn_r2_101_fpn_2x_coco.py',
    255. checkpoint='faster_rcnn_r2_101_fpn_2x_coco-175f1da6.pth',
    256. eval='bbox',
    257. metric=dict(bbox_mAP=43.0),
    258. )
    259. resnest = dict(
    260. config='configs/resnest/faster_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py', # noqa
    261. checkpoint='faster_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco_20200926_125502-20289c16.pth', # noqa
    262. eval='bbox',
    263. metric=dict(bbox_mAP=42.0),
    264. )
    265. retinanet = dict(
    266. config='configs/retinanet/retinanet_r50_fpn_1x_coco.py',
    267. checkpoint='retinanet_r50_fpn_1x_coco_20200130-c2398f9e.pth',
    268. eval='bbox',
    269. metric=dict(bbox_mAP=36.5),
    270. )
    271. rpn = dict(
    272. config='configs/rpn/rpn_r50_fpn_1x_coco.py',
    273. checkpoint='rpn_r50_fpn_1x_coco_20200218-5525fa2e.pth',
    274. eval='proposal_fast',
    275. metric=dict(AR_1000=58.2),
    276. )
    277. sabl = [
    278. dict(
    279. config='configs/sabl/sabl_retinanet_r50_fpn_1x_coco.py',
    280. checkpoint='sabl_retinanet_r50_fpn_1x_coco-6c54fd4f.pth',
    281. eval='bbox',
    282. metric=dict(bbox_mAP=37.7),
    283. ),
    284. dict(
    285. config='configs/sabl/sabl_faster_rcnn_r50_fpn_1x_coco.py',
    286. checkpoint='sabl_faster_rcnn_r50_fpn_1x_coco-e867595b.pth',
    287. eval='bbox',
    288. metric=dict(bbox_mAP=39.9),
    289. ),
    290. ]
    291. scnet = dict(
    292. config='configs/scnet/scnet_r50_fpn_1x_coco.py',
    293. checkpoint='scnet_r50_fpn_1x_coco-c3f09857.pth',
    294. eval='bbox',
    295. metric=dict(bbox_mAP=43.5),
    296. )
    297. sparse_rcnn = dict(
    298. config='configs/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco.py',
    299. checkpoint='sparse_rcnn_r50_fpn_1x_coco_20201222_214453-dc79b137.pth',
    300. eval='bbox',
    301. metric=dict(bbox_mAP=37.9),
    302. )
    303. ssd = [
    304. dict(
    305. config='configs/ssd/ssd300_coco.py',
    306. checkpoint='ssd300_coco_20210803_015428-d231a06e.pth',
    307. eval='bbox',
    308. metric=dict(bbox_mAP=25.5),
    309. ),
    310. dict(
    311. config='configs/ssd/ssdlite_mobilenetv2_scratch_600e_coco.py',
    312. checkpoint='ssdlite_mobilenetv2_scratch_600e_coco_20210629_110627-974d9307.pth',# noqa
    313. eval='bbox',
    314. metric=dict(bbox_mAP=21.3),
    315. ),
    316. ]
    317. tridentnet = dict(
    318. config='configs/tridentnet/tridentnet_r50_caffe_1x_coco.py',
    319. checkpoint='tridentnet_r50_caffe_1x_coco_20201230_141838-2ec0b530.pth',
    320. eval='bbox',
    321. metric=dict(bbox_mAP=37.6),
    322. )
    323. vfnet = dict(
    324. config='configs/vfnet/vfnet_r50_fpn_1x_coco.py',
    325. checkpoint='vfnet_r50_fpn_1x_coco_20201027-38db6f58.pth',
    326. eval='bbox',
    327. metric=dict(bbox_mAP=41.6),
    328. )
    329. yolact = dict(
    330. config='configs/yolact/yolact_r50_1x8_coco.py',
    331. checkpoint='yolact_r50_1x8_coco_20200908-f38d58df.pth',
    332. eval=['bbox', 'segm'],
    333. metric=dict(bbox_mAP=31.2, segm_mAP=29.0),
    334. )
    335. yolo = dict(
    336. config='configs/yolo/yolov3_d53_320_273e_coco.py',
    337. checkpoint='yolov3_d53_320_273e_coco-421362b6.pth',
    338. eval='bbox',
    339. metric=dict(bbox_mAP=27.9),
    340. )
    341. yolof = dict(
    342. config='configs/yolof/yolof_r50_c5_8x8_1x_coco.py',
    343. checkpoint='yolof_r50_c5_8x8_1x_coco_20210425_024427-8e864411.pth',
    344. eval='bbox',
    345. metric=dict(bbox_mAP=37.5),
    346. )
    347. centernet = dict(
    348. config='configs/centernet/centernet_resnet18_dcnv2_140e_coco.py',
    349. checkpoint='centernet_resnet18_dcnv2_140e_coco_20210702_155131-c8cd631f.pth', # noqa
    350. eval='bbox',
    351. metric=dict(bbox_mAP=29.5),
    352. )
    353. yolox = dict(
    354. config='configs/yolox/yolox_tiny_8x8_300e_coco.py',
    355. checkpoint='yolox_tiny_8x8_300e_coco_20210806_234250-4ff3b67e.pth', # noqa
    356. eval='bbox',
    357. metric=dict(bbox_mAP=31.5),
    358. )
    359. # yapf: enable
    Discard
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    1. configs/atss/atss_r50_fpn_1x_coco.py
    2. configs/autoassign/autoassign_r50_fpn_8x2_1x_coco.py
    3. configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py
    4. configs/cascade_rpn/crpn_faster_rcnn_r50_caffe_fpn_1x_coco.py
    5. configs/centripetalnet/centripetalnet_hourglass104_mstest_16x6_210e_coco.py
    6. configs/cornernet/cornernet_hourglass104_mstest_8x6_210e_coco.py
    7. configs/detectors/detectors_htc_r50_1x_coco.py
    8. configs/deformable_detr/deformable_detr_r50_16x2_50e_coco.py
    9. configs/detr/detr_r50_8x2_150e_coco.py
    10. configs/double_heads/dh_faster_rcnn_r50_fpn_1x_coco.py
    11. configs/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x_coco.py
    12. configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py
    13. configs/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_1x_coco.py
    14. configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py
    15. configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco.py
    16. configs/faster_rcnn/faster_rcnn_r50_fpn_ohem_1x_coco.py
    17. configs/foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco.py
    18. configs/mask_rcnn/mask_rcnn_r50_fpn_fp16_1x_coco.py
    19. configs/retinanet/retinanet_r50_fpn_fp16_1x_coco.py
    20. configs/free_anchor/retinanet_free_anchor_r50_fpn_1x_coco.py
    21. configs/fsaf/fsaf_r50_fpn_1x_coco.py
    22. configs/gfl/gfl_r50_fpn_1x_coco.py
    23. configs/ghm/retinanet_ghm_r50_fpn_1x_coco.py
    24. configs/grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco.py
    25. configs/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco.py
    26. configs/htc/htc_r50_fpn_1x_coco.py
    27. configs/ld/ld_r18_gflv1_r101_fpn_coco_1x.py
    28. configs/libra_rcnn/libra_faster_rcnn_r50_fpn_1x_coco.py
    29. configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py
    30. configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x_coco.py
    31. configs/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco.py
    32. configs/paa/paa_r50_fpn_1x_coco.py
    33. configs/pisa/pisa_mask_rcnn_r50_fpn_1x_coco.py
    34. configs/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco.py
    35. configs/reppoints/reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py
    36. configs/retinanet/retinanet_r50_caffe_fpn_1x_coco.py
    37. configs/rpn/rpn_r50_fpn_1x_coco.py
    38. configs/sabl/sabl_retinanet_r50_fpn_1x_coco.py
    39. configs/ssd/ssd300_coco.py
    40. configs/tridentnet/tridentnet_r50_caffe_1x_coco.py
    41. configs/vfnet/vfnet_r50_fpn_1x_coco.py
    42. configs/yolact/yolact_r50_8x8_coco.py
    43. configs/yolo/yolov3_d53_320_273e_coco.py
    44. configs/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco.py
    45. configs/scnet/scnet_r50_fpn_1x_coco.py
    46. configs/yolof/yolof_r50_c5_8x8_1x_coco.py
    47. configs/carafe/mask_rcnn_r50_fpn_carafe_1x_coco.py
    48. configs/dcn/faster_rcnn_r50_fpn_mdpool_1x_coco.py
    49. configs/dcn/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py
    50. configs/dcn/faster_rcnn_r50_fpn_dpool_1x_coco.py
    51. configs/dcn/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py
    52. configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco.py
    53. configs/gcnet/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco.py
    54. configs/gn/mask_rcnn_r50_fpn_gn-all_2x_coco.py
    55. configs/gn+ws/mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py
    56. configs/hrnet/mask_rcnn_hrnetv2p_w18_1x_coco.py
    57. configs/pafpn/faster_rcnn_r50_pafpn_1x_coco.py
    58. configs/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco.py
    59. configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py
    60. configs/resnest/mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco.py
    61. configs/res2net/faster_rcnn_r2_101_fpn_2x_coco.py
    62. configs/groie/faster_rcnn_r50_fpn_groie_1x_coco.py
    63. configs/centernet/centernet_resnet18_dcnv2_140e_coco.py
    64. configs/yolox/yolox_tiny_8x8_300e_coco.py
    65. configs/ssd/ssdlite_mobilenetv2_scratch_600e_coco.py
    66. configs/panoptic_fpn/panoptic_fpn_r50_fpn_1x_coco.py
    Discard
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    77
    78
    79
    80
    81
    82
    83
    84
    85
    86
    87
    88
    89
    90
    91
    92
    93
    94
    95
    96
    97
    98
    99
    100
    101
    102
    103
    104
    105
    106
    107
    108
    109
    110
    111
    112
    113
    114
    115
    116
    117
    118
    119
    120
    121
    122
    123
    124
    125
    126
    127
    128
    129
    130
    131
    132
    133
    134
    135
    136
    137
    138
    139
    140
    141
    142
    143
    144
    145
    146
    147
    148
    149
    150
    151
    152
    153
    154
    155
    156
    157
    158
    159
    160
    161
    162
    163
    164
    165
    166
    167
    1. # Copyright (c) OpenMMLab. All rights reserved.
    2. import argparse
    3. import os
    4. import os.path as osp
    5. def parse_args():
    6. parser = argparse.ArgumentParser(description='Filter configs to train')
    7. parser.add_argument(
    8. '--basic-arch',
    9. action='store_true',
    10. help='to train models in basic arch')
    11. parser.add_argument(
    12. '--datasets', action='store_true', help='to train models in dataset')
    13. parser.add_argument(
    14. '--data-pipeline',
    15. action='store_true',
    16. help='to train models related to data pipeline, e.g. augmentations')
    17. parser.add_argument(
    18. '--nn-module',
    19. action='store_true',
    20. help='to train models related to neural network modules')
    21. parser.add_argument(
    22. '--model-options',
    23. nargs='+',
    24. help='custom options to special model benchmark')
    25. parser.add_argument(
    26. '--out',
    27. type=str,
    28. default='batch_train_list.txt',
    29. help='output path of gathered metrics to be stored')
    30. args = parser.parse_args()
    31. return args
    32. basic_arch_root = [
    33. 'atss', 'autoassign', 'cascade_rcnn', 'cascade_rpn', 'centripetalnet',
    34. 'cornernet', 'detectors', 'deformable_detr', 'detr', 'double_heads',
    35. 'dynamic_rcnn', 'faster_rcnn', 'fcos', 'foveabox', 'fp16', 'free_anchor',
    36. 'fsaf', 'gfl', 'ghm', 'grid_rcnn', 'guided_anchoring', 'htc', 'ld',
    37. 'libra_rcnn', 'mask_rcnn', 'ms_rcnn', 'nas_fcos', 'paa', 'pisa',
    38. 'point_rend', 'reppoints', 'retinanet', 'rpn', 'sabl', 'ssd', 'tridentnet',
    39. 'vfnet', 'yolact', 'yolo', 'sparse_rcnn', 'scnet', 'yolof', 'centernet'
    40. ]
    41. datasets_root = [
    42. 'wider_face', 'pascal_voc', 'cityscapes', 'lvis', 'deepfashion'
    43. ]
    44. data_pipeline_root = ['albu_example', 'instaboost']
    45. nn_module_root = [
    46. 'carafe', 'dcn', 'empirical_attention', 'gcnet', 'gn', 'gn+ws', 'hrnet',
    47. 'pafpn', 'nas_fpn', 'regnet', 'resnest', 'res2net', 'groie'
    48. ]
    49. benchmark_pool = [
    50. 'configs/albu_example/mask_rcnn_r50_fpn_albu_1x_coco.py',
    51. 'configs/atss/atss_r50_fpn_1x_coco.py',
    52. 'configs/autoassign/autoassign_r50_fpn_8x2_1x_coco.py',
    53. 'configs/carafe/mask_rcnn_r50_fpn_carafe_1x_coco.py',
    54. 'configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py',
    55. 'configs/cascade_rpn/crpn_faster_rcnn_r50_caffe_fpn_1x_coco.py',
    56. 'configs/centernet/centernet_resnet18_dcnv2_140e_coco.py',
    57. 'configs/centripetalnet/'
    58. 'centripetalnet_hourglass104_mstest_16x6_210e_coco.py',
    59. 'configs/cityscapes/mask_rcnn_r50_fpn_1x_cityscapes.py',
    60. 'configs/cornernet/'
    61. 'cornernet_hourglass104_mstest_8x6_210e_coco.py',
    62. 'configs/dcn/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py',
    63. 'configs/dcn/faster_rcnn_r50_fpn_dpool_1x_coco.py',
    64. 'configs/dcn/faster_rcnn_r50_fpn_mdpool_1x_coco.py',
    65. 'configs/dcn/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py',
    66. 'configs/deformable_detr/deformable_detr_r50_16x2_50e_coco.py',
    67. 'configs/detectors/detectors_htc_r50_1x_coco.py',
    68. 'configs/detr/detr_r50_8x2_150e_coco.py',
    69. 'configs/double_heads/dh_faster_rcnn_r50_fpn_1x_coco.py',
    70. 'configs/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x_coco.py',
    71. 'configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco.py', # noqa
    72. 'configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py',
    73. 'configs/faster_rcnn/faster_rcnn_r50_fpn_ohem_1x_coco.py',
    74. 'configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco.py',
    75. 'configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py',
    76. 'configs/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_1x_coco.py',
    77. 'configs/fcos/fcos_center_r50_caffe_fpn_gn-head_4x4_1x_coco.py',
    78. 'configs/foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco.py',
    79. 'configs/retinanet/retinanet_r50_fpn_fp16_1x_coco.py',
    80. 'configs/mask_rcnn/mask_rcnn_r50_fpn_fp16_1x_coco.py',
    81. 'configs/free_anchor/retinanet_free_anchor_r50_fpn_1x_coco.py',
    82. 'configs/fsaf/fsaf_r50_fpn_1x_coco.py',
    83. 'configs/gcnet/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco.py',
    84. 'configs/gfl/gfl_r50_fpn_1x_coco.py',
    85. 'configs/ghm/retinanet_ghm_r50_fpn_1x_coco.py',
    86. 'configs/gn/mask_rcnn_r50_fpn_gn-all_2x_coco.py',
    87. 'configs/gn+ws/mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py',
    88. 'configs/grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco.py',
    89. 'configs/groie/faster_rcnn_r50_fpn_groie_1x_coco.py',
    90. 'configs/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco.py',
    91. 'configs/hrnet/mask_rcnn_hrnetv2p_w18_1x_coco.py',
    92. 'configs/htc/htc_r50_fpn_1x_coco.py',
    93. 'configs/instaboost/mask_rcnn_r50_fpn_instaboost_4x_coco.py',
    94. 'configs/ld/ld_r18_gflv1_r101_fpn_coco_1x.py',
    95. 'configs/libra_rcnn/libra_faster_rcnn_r50_fpn_1x_coco.py',
    96. 'configs/lvis/mask_rcnn_r50_fpn_sample1e-3_mstrain_1x_lvis_v1.py',
    97. 'configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py',
    98. 'configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x_coco.py',
    99. 'configs/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco.py',
    100. 'configs/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco.py',
    101. 'configs/paa/paa_r50_fpn_1x_coco.py',
    102. 'configs/pafpn/faster_rcnn_r50_pafpn_1x_coco.py',
    103. 'configs/pisa/pisa_mask_rcnn_r50_fpn_1x_coco.py',
    104. 'configs/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco.py',
    105. 'configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py',
    106. 'configs/reppoints/reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py',
    107. 'configs/res2net/faster_rcnn_r2_101_fpn_2x_coco.py',
    108. 'configs/resnest/'
    109. 'mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco.py',
    110. 'configs/retinanet/retinanet_r50_caffe_fpn_1x_coco.py',
    111. 'configs/rpn/rpn_r50_fpn_1x_coco.py',
    112. 'configs/sabl/sabl_retinanet_r50_fpn_1x_coco.py',
    113. 'configs/ssd/ssd300_coco.py',
    114. 'configs/tridentnet/tridentnet_r50_caffe_1x_coco.py',
    115. 'configs/vfnet/vfnet_r50_fpn_1x_coco.py',
    116. 'configs/yolact/yolact_r50_1x8_coco.py',
    117. 'configs/yolo/yolov3_d53_320_273e_coco.py',
    118. 'configs/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco.py',
    119. 'configs/scnet/scnet_r50_fpn_1x_coco.py',
    120. 'configs/yolof/yolof_r50_c5_8x8_1x_coco.py',
    121. ]
    122. def main():
    123. args = parse_args()
    124. benchmark_type = []
    125. if args.basic_arch:
    126. benchmark_type += basic_arch_root
    127. if args.datasets:
    128. benchmark_type += datasets_root
    129. if args.data_pipeline:
    130. benchmark_type += data_pipeline_root
    131. if args.nn_module:
    132. benchmark_type += nn_module_root
    133. special_model = args.model_options
    134. if special_model is not None:
    135. benchmark_type += special_model
    136. config_dpath = 'configs/'
    137. benchmark_configs = []
    138. for cfg_root in benchmark_type:
    139. cfg_dir = osp.join(config_dpath, cfg_root)
    140. configs = os.scandir(cfg_dir)
    141. for cfg in configs:
    142. config_path = osp.join(cfg_dir, cfg.name)
    143. if (config_path in benchmark_pool
    144. and config_path not in benchmark_configs):
    145. benchmark_configs.append(config_path)
    146. print(f'Totally found {len(benchmark_configs)} configs to benchmark')
    147. with open(args.out, 'w') as f:
    148. for config in benchmark_configs:
    149. f.write(config + '\n')
    150. if __name__ == '__main__':
    151. main()
    Discard
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    77
    78
    79
    80
    81
    82
    83
    84
    85
    86
    87
    88
    89
    90
    91
    92
    93
    94
    95
    96
    97
    98
    99
    100
    101
    102
    103
    104
    105
    106
    107
    108
    109
    110
    111
    112
    113
    114
    115
    116
    117
    118
    119
    120
    121
    122
    123
    124
    125
    126
    127
    128
    129
    130
    131
    132
    133
    134
    135
    136
    137
    138
    139
    140
    141
    142
    143
    144
    145
    146
    147
    148
    149
    150
    151
    152
    153
    154
    155
    156
    157
    158
    159
    160
    161
    162
    163
    164
    165
    166
    167
    168
    169
    170
    1. # Copyright (c) OpenMMLab. All rights reserved.
    2. import argparse
    3. import os
    4. import os.path as osp
    5. import mmcv
    6. from mmcv import Config, DictAction
    7. from mmcv.runner import init_dist
    8. from terminaltables import GithubFlavoredMarkdownTable
    9. from tools.analysis_tools.benchmark import repeat_measure_inference_speed
    10. def parse_args():
    11. parser = argparse.ArgumentParser(
    12. description='MMDet benchmark a model of FPS')
    13. parser.add_argument('config', help='test config file path')
    14. parser.add_argument('checkpoint_root', help='Checkpoint file root path')
    15. parser.add_argument(
    16. '--round-num',
    17. type=int,
    18. default=1,
    19. help='round a number to a given precision in decimal digits')
    20. parser.add_argument(
    21. '--repeat-num',
    22. type=int,
    23. default=1,
    24. help='number of repeat times of measurement for averaging the results')
    25. parser.add_argument(
    26. '--out', type=str, help='output path of gathered fps to be stored')
    27. parser.add_argument(
    28. '--max-iter', type=int, default=2000, help='num of max iter')
    29. parser.add_argument(
    30. '--log-interval', type=int, default=50, help='interval of logging')
    31. parser.add_argument(
    32. '--fuse-conv-bn',
    33. action='store_true',
    34. help='Whether to fuse conv and bn, this will slightly increase'
    35. 'the inference speed')
    36. parser.add_argument(
    37. '--cfg-options',
    38. nargs='+',
    39. action=DictAction,
    40. help='override some settings in the used config, the key-value pair '
    41. 'in xxx=yyy format will be merged into config file. If the value to '
    42. 'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
    43. 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
    44. 'Note that the quotation marks are necessary and that no white space '
    45. 'is allowed.')
    46. parser.add_argument(
    47. '--launcher',
    48. choices=['none', 'pytorch', 'slurm', 'mpi'],
    49. default='none',
    50. help='job launcher')
    51. parser.add_argument('--local_rank', type=int, default=0)
    52. args = parser.parse_args()
    53. if 'LOCAL_RANK' not in os.environ:
    54. os.environ['LOCAL_RANK'] = str(args.local_rank)
    55. return args
    56. def results2markdown(result_dict):
    57. table_data = []
    58. is_multiple_results = False
    59. for cfg_name, value in result_dict.items():
    60. name = cfg_name.replace('configs/', '')
    61. fps = value['fps']
    62. ms_times_pre_image = value['ms_times_pre_image']
    63. if isinstance(fps, list):
    64. is_multiple_results = True
    65. mean_fps = value['mean_fps']
    66. mean_times_pre_image = value['mean_times_pre_image']
    67. fps_str = ','.join([str(s) for s in fps])
    68. ms_times_pre_image_str = ','.join(
    69. [str(s) for s in ms_times_pre_image])
    70. table_data.append([
    71. name, fps_str, mean_fps, ms_times_pre_image_str,
    72. mean_times_pre_image
    73. ])
    74. else:
    75. table_data.append([name, fps, ms_times_pre_image])
    76. if is_multiple_results:
    77. table_data.insert(0, [
    78. 'model', 'fps', 'mean_fps', 'times_pre_image(ms)',
    79. 'mean_times_pre_image(ms)'
    80. ])
    81. else:
    82. table_data.insert(0, ['model', 'fps', 'times_pre_image(ms)'])
    83. table = GithubFlavoredMarkdownTable(table_data)
    84. print(table.table, flush=True)
    85. if __name__ == '__main__':
    86. args = parse_args()
    87. assert args.round_num >= 0
    88. assert args.repeat_num >= 1
    89. config = Config.fromfile(args.config)
    90. if args.launcher == 'none':
    91. raise NotImplementedError('Only supports distributed mode')
    92. else:
    93. init_dist(args.launcher)
    94. result_dict = {}
    95. for model_key in config:
    96. model_infos = config[model_key]
    97. if not isinstance(model_infos, list):
    98. model_infos = [model_infos]
    99. for model_info in model_infos:
    100. record_metrics = model_info['metric']
    101. cfg_path = model_info['config'].strip()
    102. cfg = Config.fromfile(cfg_path)
    103. checkpoint = osp.join(args.checkpoint_root,
    104. model_info['checkpoint'].strip())
    105. try:
    106. fps = repeat_measure_inference_speed(cfg, checkpoint,
    107. args.max_iter,
    108. args.log_interval,
    109. args.fuse_conv_bn,
    110. args.repeat_num)
    111. if args.repeat_num > 1:
    112. fps_list = [round(fps_, args.round_num) for fps_ in fps]
    113. times_pre_image_list = [
    114. round(1000 / fps_, args.round_num) for fps_ in fps
    115. ]
    116. mean_fps = round(
    117. sum(fps_list) / len(fps_list), args.round_num)
    118. mean_times_pre_image = round(
    119. sum(times_pre_image_list) / len(times_pre_image_list),
    120. args.round_num)
    121. print(
    122. f'{cfg_path} '
    123. f'Overall fps: {fps_list}[{mean_fps}] img / s, '
    124. f'times per image: '
    125. f'{times_pre_image_list}[{mean_times_pre_image}] '
    126. f'ms / img',
    127. flush=True)
    128. result_dict[cfg_path] = dict(
    129. fps=fps_list,
    130. mean_fps=mean_fps,
    131. ms_times_pre_image=times_pre_image_list,
    132. mean_times_pre_image=mean_times_pre_image)
    133. else:
    134. print(
    135. f'{cfg_path} fps : {fps:.{args.round_num}f} img / s, '
    136. f'times per image: {1000 / fps:.{args.round_num}f} '
    137. f'ms / img',
    138. flush=True)
    139. result_dict[cfg_path] = dict(
    140. fps=round(fps, args.round_num),
    141. ms_times_pre_image=round(1000 / fps, args.round_num))
    142. except Exception as e:
    143. print(f'{cfg_path} error: {repr(e)}')
    144. if args.repeat_num > 1:
    145. result_dict[cfg_path] = dict(
    146. fps=[0],
    147. mean_fps=0,
    148. ms_times_pre_image=[0],
    149. mean_times_pre_image=0)
    150. else:
    151. result_dict[cfg_path] = dict(fps=0, ms_times_pre_image=0)
    152. if args.out:
    153. mmcv.mkdir_or_exist(args.out)
    154. mmcv.dump(result_dict, osp.join(args.out, 'batch_inference_fps.json'))
    155. results2markdown(result_dict)
    Discard
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    77
    78
    79
    80
    81
    82
    83
    84
    85
    86
    87
    88
    89
    90
    91
    92
    93
    94
    95
    96
    97
    98
    99
    100
    101
    102
    1. # Copyright (c) OpenMMLab. All rights reserved.
    2. import logging
    3. import os.path as osp
    4. from argparse import ArgumentParser
    5. from mmcv import Config
    6. from mmdet.apis import inference_detector, init_detector, show_result_pyplot
    7. from mmdet.utils import get_root_logger
    8. def parse_args():
    9. parser = ArgumentParser()
    10. parser.add_argument('config', help='test config file path')
    11. parser.add_argument('checkpoint_root', help='Checkpoint file root path')
    12. parser.add_argument('--img', default='demo/demo.jpg', help='Image file')
    13. parser.add_argument('--aug', action='store_true', help='aug test')
    14. parser.add_argument('--model-name', help='model name to inference')
    15. parser.add_argument('--show', action='store_true', help='show results')
    16. parser.add_argument(
    17. '--wait-time',
    18. type=float,
    19. default=1,
    20. help='the interval of show (s), 0 is block')
    21. parser.add_argument(
    22. '--device', default='cuda:0', help='Device used for inference')
    23. parser.add_argument(
    24. '--score-thr', type=float, default=0.3, help='bbox score threshold')
    25. args = parser.parse_args()
    26. return args
    27. def inference_model(config_name, checkpoint, args, logger=None):
    28. cfg = Config.fromfile(config_name)
    29. if args.aug:
    30. if 'flip' in cfg.data.test.pipeline[1]:
    31. cfg.data.test.pipeline[1].flip = True
    32. else:
    33. if logger is not None:
    34. logger.error(f'{config_name}: unable to start aug test')
    35. else:
    36. print(f'{config_name}: unable to start aug test', flush=True)
    37. model = init_detector(cfg, checkpoint, device=args.device)
    38. # test a single image
    39. result = inference_detector(model, args.img)
    40. # show the results
    41. if args.show:
    42. show_result_pyplot(
    43. model,
    44. args.img,
    45. result,
    46. score_thr=args.score_thr,
    47. wait_time=args.wait_time)
    48. return result
    49. # Sample test whether the inference code is correct
    50. def main(args):
    51. config = Config.fromfile(args.config)
    52. # test single model
    53. if args.model_name:
    54. if args.model_name in config:
    55. model_infos = config[args.model_name]
    56. if not isinstance(model_infos, list):
    57. model_infos = [model_infos]
    58. model_info = model_infos[0]
    59. config_name = model_info['config'].strip()
    60. print(f'processing: {config_name}', flush=True)
    61. checkpoint = osp.join(args.checkpoint_root,
    62. model_info['checkpoint'].strip())
    63. # build the model from a config file and a checkpoint file
    64. inference_model(config_name, checkpoint, args)
    65. return
    66. else:
    67. raise RuntimeError('model name input error.')
    68. # test all model
    69. logger = get_root_logger(
    70. log_file='benchmark_test_image.log', log_level=logging.ERROR)
    71. for model_key in config:
    72. model_infos = config[model_key]
    73. if not isinstance(model_infos, list):
    74. model_infos = [model_infos]
    75. for model_info in model_infos:
    76. print('processing: ', model_info['config'], flush=True)
    77. config_name = model_info['config'].strip()
    78. checkpoint = osp.join(args.checkpoint_root,
    79. model_info['checkpoint'].strip())
    80. try:
    81. # build the model from a config file and a checkpoint file
    82. inference_model(config_name, checkpoint, args, logger)
    83. except Exception as e:
    84. logger.error(f'{config_name} " : {repr(e)}')
    85. if __name__ == '__main__':
    86. args = parse_args()
    87. main(args)
    Discard
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    77
    78
    79
    80
    81
    82
    83
    84
    85
    86
    87
    88
    89
    90
    91
    92
    93
    94
    95
    96
    97
    98
    99
    100
    101
    102
    103
    104
    105
    106
    107
    108
    109
    110
    111
    112
    113
    114
    115
    116
    117
    118
    119
    120
    121
    122
    123
    124
    125
    126
    127
    128
    129
    130
    131
    132
    133
    134
    135
    136
    137
    138
    139
    140
    141
    142
    143
    144
    145
    146
    147
    148
    149
    150
    151
    152
    153
    154
    155
    156
    157
    1. # Modified from:
    2. # https://github.com/allenai/allennlp/blob/main/scripts/check_links.py
    3. import argparse
    4. import logging
    5. import os
    6. import pathlib
    7. import re
    8. import sys
    9. from multiprocessing.dummy import Pool
    10. from typing import NamedTuple, Optional, Tuple
    11. import requests
    12. from mmcv.utils import get_logger
    13. def parse_args():
    14. parser = argparse.ArgumentParser(
    15. description='Goes through all the inline-links '
    16. 'in markdown files and reports the breakages')
    17. parser.add_argument(
    18. '--num-threads',
    19. type=int,
    20. default=100,
    21. help='Number of processes to confirm the link')
    22. parser.add_argument('--https-proxy', type=str, help='https proxy')
    23. parser.add_argument(
    24. '--out',
    25. type=str,
    26. default='link_reports.txt',
    27. help='output path of reports')
    28. args = parser.parse_args()
    29. return args
    30. OK_STATUS_CODES = (
    31. 200,
    32. 401, # the resource exists but may require some sort of login.
    33. 403, # ^ same
    34. 405, # HEAD method not allowed.
    35. # the resource exists, but our default 'Accept-' header may not
    36. # match what the server can provide.
    37. 406,
    38. )
    39. class MatchTuple(NamedTuple):
    40. source: str
    41. name: str
    42. link: str
    43. def check_link(
    44. match_tuple: MatchTuple,
    45. http_session: requests.Session,
    46. logger: logging = None) -> Tuple[MatchTuple, bool, Optional[str]]:
    47. reason: Optional[str] = None
    48. if match_tuple.link.startswith('http'):
    49. result_ok, reason = check_url(match_tuple, http_session)
    50. else:
    51. result_ok = check_path(match_tuple)
    52. if logger is None:
    53. print(f" {'✓' if result_ok else '✗'} {match_tuple.link}")
    54. else:
    55. logger.info(f" {'✓' if result_ok else '✗'} {match_tuple.link}")
    56. return match_tuple, result_ok, reason
    57. def check_url(match_tuple: MatchTuple,
    58. http_session: requests.Session) -> Tuple[bool, str]:
    59. """Check if a URL is reachable."""
    60. try:
    61. result = http_session.head(
    62. match_tuple.link, timeout=5, allow_redirects=True)
    63. return (
    64. result.ok or result.status_code in OK_STATUS_CODES,
    65. f'status code = {result.status_code}',
    66. )
    67. except (requests.ConnectionError, requests.Timeout):
    68. return False, 'connection error'
    69. def check_path(match_tuple: MatchTuple) -> bool:
    70. """Check if a file in this repository exists."""
    71. relative_path = match_tuple.link.split('#')[0]
    72. full_path = os.path.join(
    73. os.path.dirname(str(match_tuple.source)), relative_path)
    74. return os.path.exists(full_path)
    75. def main():
    76. args = parse_args()
    77. # setup logger
    78. logger = get_logger(name='mmdet', log_file=args.out)
    79. # setup https_proxy
    80. if args.https_proxy:
    81. os.environ['https_proxy'] = args.https_proxy
    82. # setup http_session
    83. http_session = requests.Session()
    84. for resource_prefix in ('http://', 'https://'):
    85. http_session.mount(
    86. resource_prefix,
    87. requests.adapters.HTTPAdapter(
    88. max_retries=5,
    89. pool_connections=20,
    90. pool_maxsize=args.num_threads),
    91. )
    92. logger.info('Finding all markdown files in the current directory...')
    93. project_root = (pathlib.Path(__file__).parent / '..').resolve()
    94. markdown_files = project_root.glob('**/*.md')
    95. all_matches = set()
    96. url_regex = re.compile(r'\[([^!][^\]]+)\]\(([^)(]+)\)')
    97. for markdown_file in markdown_files:
    98. with open(markdown_file) as handle:
    99. for line in handle.readlines():
    100. matches = url_regex.findall(line)
    101. for name, link in matches:
    102. if 'localhost' not in link:
    103. all_matches.add(
    104. MatchTuple(
    105. source=str(markdown_file),
    106. name=name,
    107. link=link))
    108. logger.info(f' {len(all_matches)} markdown files found')
    109. logger.info('Checking to make sure we can retrieve each link...')
    110. with Pool(processes=args.num_threads) as pool:
    111. results = pool.starmap(check_link, [(match, http_session, logger)
    112. for match in list(all_matches)])
    113. # collect unreachable results
    114. unreachable_results = [(match_tuple, reason)
    115. for match_tuple, success, reason in results
    116. if not success]
    117. if unreachable_results:
    118. logger.info('================================================')
    119. logger.info(f'Unreachable links ({len(unreachable_results)}):')
    120. for match_tuple, reason in unreachable_results:
    121. logger.info(' > Source: ' + match_tuple.source)
    122. logger.info(' Name: ' + match_tuple.name)
    123. logger.info(' Link: ' + match_tuple.link)
    124. if reason is not None:
    125. logger.info(' Reason: ' + reason)
    126. sys.exit(1)
    127. logger.info('No Unreachable link found.')
    128. if __name__ == '__main__':
    129. main()
    Discard
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    77
    78
    79
    80
    81
    82
    83
    84
    85
    86
    87
    88
    89
    90
    91
    92
    93
    94
    95
    96
    97
    98
    99
    100
    101
    102
    103
    104
    105
    106
    107
    108
    109
    110
    111
    112
    113
    114
    115
    116
    117
    118
    119
    1. # Copyright (c) OpenMMLab. All rights reserved.
    2. import argparse
    3. import os
    4. import os.path as osp
    5. from mmcv import Config
    6. def parse_args():
    7. parser = argparse.ArgumentParser(
    8. description='Convert benchmark model list to script')
    9. parser.add_argument('config', help='test config file path')
    10. parser.add_argument('--port', type=int, default=29666, help='dist port')
    11. parser.add_argument(
    12. '--work-dir',
    13. default='tools/batch_test',
    14. help='the dir to save metric')
    15. parser.add_argument(
    16. '--run', action='store_true', help='run script directly')
    17. parser.add_argument(
    18. '--out', type=str, help='path to save model benchmark script')
    19. args = parser.parse_args()
    20. return args
    21. def process_model_info(model_info, work_dir):
    22. config = model_info['config'].strip()
    23. fname, _ = osp.splitext(osp.basename(config))
    24. job_name = fname
    25. work_dir = osp.join(work_dir, fname)
    26. checkpoint = model_info['checkpoint'].strip()
    27. if not isinstance(model_info['eval'], list):
    28. evals = [model_info['eval']]
    29. else:
    30. evals = model_info['eval']
    31. eval = ' '.join(evals)
    32. return dict(
    33. config=config,
    34. job_name=job_name,
    35. work_dir=work_dir,
    36. checkpoint=checkpoint,
    37. eval=eval)
    38. def create_test_bash_info(commands, model_test_dict, port, script_name,
    39. partition):
    40. config = model_test_dict['config']
    41. job_name = model_test_dict['job_name']
    42. checkpoint = model_test_dict['checkpoint']
    43. work_dir = model_test_dict['work_dir']
    44. eval = model_test_dict['eval']
    45. echo_info = f' \necho \'{config}\' &'
    46. commands.append(echo_info)
    47. commands.append('\n')
    48. command_info = f'GPUS=8 GPUS_PER_NODE=8 ' \
    49. f'CPUS_PER_TASK=2 {script_name} '
    50. command_info += f'{partition} '
    51. command_info += f'{job_name} '
    52. command_info += f'{config} '
    53. command_info += f'$CHECKPOINT_DIR/{checkpoint} '
    54. command_info += f'--work-dir {work_dir} '
    55. command_info += f'--eval {eval} '
    56. command_info += f'--cfg-option dist_params.port={port} '
    57. command_info += ' &'
    58. commands.append(command_info)
    59. def main():
    60. args = parse_args()
    61. if args.out:
    62. out_suffix = args.out.split('.')[-1]
    63. assert args.out.endswith('.sh'), \
    64. f'Expected out file path suffix is .sh, but get .{out_suffix}'
    65. assert args.out or args.run, \
    66. ('Please specify at least one operation (save/run/ the '
    67. 'script) with the argument "--out" or "--run"')
    68. commands = []
    69. partition_name = 'PARTITION=$1 '
    70. commands.append(partition_name)
    71. commands.append('\n')
    72. checkpoint_root = 'CHECKPOINT_DIR=$2 '
    73. commands.append(checkpoint_root)
    74. commands.append('\n')
    75. script_name = osp.join('tools', 'slurm_test.sh')
    76. port = args.port
    77. work_dir = args.work_dir
    78. cfg = Config.fromfile(args.config)
    79. for model_key in cfg:
    80. model_infos = cfg[model_key]
    81. if not isinstance(model_infos, list):
    82. model_infos = [model_infos]
    83. for model_info in model_infos:
    84. print('processing: ', model_info['config'])
    85. model_test_dict = process_model_info(model_info, work_dir)
    86. create_test_bash_info(commands, model_test_dict, port, script_name,
    87. '$PARTITION')
    88. port += 1
    89. command_str = ''.join(commands)
    90. if args.out:
    91. with open(args.out, 'w') as f:
    92. f.write(command_str)
    93. if args.run:
    94. os.system(command_str)
    95. if __name__ == '__main__':
    96. main()
    Discard
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    77
    78
    79
    80
    81
    82
    83
    84
    85
    86
    87
    88
    89
    90
    91
    92
    93
    94
    95
    96
    97
    98
    99
    1. # Copyright (c) OpenMMLab. All rights reserved.
    2. import argparse
    3. import os
    4. import os.path as osp
    5. def parse_args():
    6. parser = argparse.ArgumentParser(
    7. description='Convert benchmark model json to script')
    8. parser.add_argument(
    9. 'txt_path', type=str, help='txt path output by benchmark_filter')
    10. parser.add_argument(
    11. '--partition',
    12. type=str,
    13. default='openmmlab',
    14. help='slurm partition name')
    15. parser.add_argument(
    16. '--max-keep-ckpts',
    17. type=int,
    18. default=1,
    19. help='The maximum checkpoints to keep')
    20. parser.add_argument(
    21. '--run', action='store_true', help='run script directly')
    22. parser.add_argument(
    23. '--out', type=str, help='path to save model benchmark script')
    24. args = parser.parse_args()
    25. return args
    26. def main():
    27. args = parse_args()
    28. if args.out:
    29. out_suffix = args.out.split('.')[-1]
    30. assert args.out.endswith('.sh'), \
    31. f'Expected out file path suffix is .sh, but get .{out_suffix}'
    32. assert args.out or args.run, \
    33. ('Please specify at least one operation (save/run/ the '
    34. 'script) with the argument "--out" or "--run"')
    35. partition = args.partition # cluster name
    36. root_name = './tools'
    37. train_script_name = osp.join(root_name, 'slurm_train.sh')
    38. # stdout is no output
    39. stdout_cfg = '>/dev/null'
    40. max_keep_ckpts = args.max_keep_ckpts
    41. commands = []
    42. with open(args.txt_path, 'r') as f:
    43. model_cfgs = f.readlines()
    44. for i, cfg in enumerate(model_cfgs):
    45. cfg = cfg.strip()
    46. if len(cfg) == 0:
    47. continue
    48. # print cfg name
    49. echo_info = f'echo \'{cfg}\' &'
    50. commands.append(echo_info)
    51. commands.append('\n')
    52. fname, _ = osp.splitext(osp.basename(cfg))
    53. out_fname = osp.join(root_name, 'work_dir', fname)
    54. # default setting
    55. if cfg.find('16x') >= 0:
    56. command_info = f'GPUS=16 GPUS_PER_NODE=8 ' \
    57. f'CPUS_PER_TASK=2 {train_script_name} '
    58. elif cfg.find('gn-head_4x4_1x_coco.py') >= 0 or \
    59. cfg.find('gn-head_4x4_2x_coco.py') >= 0:
    60. command_info = f'GPUS=4 GPUS_PER_NODE=4 ' \
    61. f'CPUS_PER_TASK=2 {train_script_name} '
    62. else:
    63. command_info = f'GPUS=8 GPUS_PER_NODE=8 ' \
    64. f'CPUS_PER_TASK=2 {train_script_name} '
    65. command_info += f'{partition} '
    66. command_info += f'{fname} '
    67. command_info += f'{cfg} '
    68. command_info += f'{out_fname} '
    69. if max_keep_ckpts:
    70. command_info += f'--cfg-options ' \
    71. f'checkpoint_config.max_keep_ckpts=' \
    72. f'{max_keep_ckpts}' + ' '
    73. command_info += f'{stdout_cfg} &'
    74. commands.append(command_info)
    75. if i < len(model_cfgs):
    76. commands.append('\n')
    77. command_str = ''.join(commands)
    78. if args.out:
    79. with open(args.out, 'w') as f:
    80. f.write(command_str)
    81. if args.run:
    82. os.system(command_str)
    83. if __name__ == '__main__':
    84. main()
    Discard
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    77
    78
    79
    80
    81
    82
    83
    84
    85
    86
    87
    88
    89
    90
    91
    92
    93
    94
    95
    96
    97
    98
    99
    100
    101
    102
    103
    104
    105
    106
    107
    108
    109
    110
    111
    112
    113
    114
    115
    116
    117
    118
    119
    120
    121
    122
    123
    124
    125
    126
    127
    128
    129
    130
    131
    132
    133
    134
    135
    136
    137
    138
    139
    140
    141
    142
    143
    144
    145
    146
    147
    148
    149
    150
    151
    152
    153
    154
    155
    156
    157
    158
    159
    160
    161
    162
    163
    164
    165
    166
    167
    168
    169
    170
    171
    172
    173
    174
    175
    176
    177
    178
    179
    180
    181
    182
    183
    184
    185
    186
    187
    188
    189
    190
    191
    192
    193
    194
    195
    196
    197
    198
    199
    200
    201
    202
    203
    204
    205
    206
    207
    208
    209
    210
    211
    212
    213
    214
    215
    216
    217
    218
    219
    220
    221
    222
    223
    224
    225
    226
    227
    228
    229
    230
    231
    232
    233
    234
    235
    236
    237
    238
    239
    240
    241
    242
    243
    244
    245
    246
    247
    248
    249
    250
    251
    252
    253
    254
    255
    256
    257
    258
    259
    260
    261
    262
    263
    264
    265
    266
    267
    268
    269
    270
    271
    272
    273
    274
    275
    276
    277
    278
    279
    280
    281
    282
    283
    284
    285
    286
    287
    288
    289
    290
    291
    292
    293
    294
    295
    296
    297
    298
    299
    300
    301
    302
    303
    304
    305
    306
    307
    308
    309
    310
    311
    312
    313
    314
    315
    316
    317
    318
    319
    320
    321
    322
    323
    324
    325
    326
    327
    328
    329
    330
    331
    332
    333
    334
    335
    336
    337
    338
    339
    340
    341
    342
    1. # Copyright (c) OpenMMLab. All rights reserved.
    2. import argparse
    3. import glob
    4. import json
    5. import os.path as osp
    6. import shutil
    7. import subprocess
    8. from collections import OrderedDict
    9. import mmcv
    10. import torch
    11. import yaml
    12. def ordered_yaml_dump(data, stream=None, Dumper=yaml.SafeDumper, **kwds):
    13. class OrderedDumper(Dumper):
    14. pass
    15. def _dict_representer(dumper, data):
    16. return dumper.represent_mapping(
    17. yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, data.items())
    18. OrderedDumper.add_representer(OrderedDict, _dict_representer)
    19. return yaml.dump(data, stream, OrderedDumper, **kwds)
    20. def process_checkpoint(in_file, out_file):
    21. checkpoint = torch.load(in_file, map_location='cpu')
    22. # remove optimizer for smaller file size
    23. if 'optimizer' in checkpoint:
    24. del checkpoint['optimizer']
    25. # remove ema state_dict
    26. for key in list(checkpoint['state_dict']):
    27. if key.startswith('ema_'):
    28. checkpoint['state_dict'].pop(key)
    29. # if it is necessary to remove some sensitive data in checkpoint['meta'],
    30. # add the code here.
    31. if torch.__version__ >= '1.6':
    32. torch.save(checkpoint, out_file, _use_new_zipfile_serialization=False)
    33. else:
    34. torch.save(checkpoint, out_file)
    35. sha = subprocess.check_output(['sha256sum', out_file]).decode()
    36. final_file = out_file.rstrip('.pth') + '-{}.pth'.format(sha[:8])
    37. subprocess.Popen(['mv', out_file, final_file])
    38. return final_file
    39. def is_by_epoch(config):
    40. cfg = mmcv.Config.fromfile('./configs/' + config)
    41. return cfg.runner.type == 'EpochBasedRunner'
    42. def get_final_epoch_or_iter(config):
    43. cfg = mmcv.Config.fromfile('./configs/' + config)
    44. if cfg.runner.type == 'EpochBasedRunner':
    45. return cfg.runner.max_epochs
    46. else:
    47. return cfg.runner.max_iters
    48. def get_best_epoch_or_iter(exp_dir):
    49. best_epoch_iter_full_path = list(
    50. sorted(glob.glob(osp.join(exp_dir, 'best_*.pth'))))[-1]
    51. best_epoch_or_iter_model_path = best_epoch_iter_full_path.split('/')[-1]
    52. best_epoch_or_iter = best_epoch_or_iter_model_path.\
    53. split('_')[-1].split('.')[0]
    54. return best_epoch_or_iter_model_path, int(best_epoch_or_iter)
    55. def get_real_epoch_or_iter(config):
    56. cfg = mmcv.Config.fromfile('./configs/' + config)
    57. if cfg.runner.type == 'EpochBasedRunner':
    58. epoch = cfg.runner.max_epochs
    59. if cfg.data.train.type == 'RepeatDataset':
    60. epoch *= cfg.data.train.times
    61. return epoch
    62. else:
    63. return cfg.runner.max_iters
    64. def get_final_results(log_json_path,
    65. epoch_or_iter,
    66. results_lut,
    67. by_epoch=True):
    68. result_dict = dict()
    69. last_val_line = None
    70. last_train_line = None
    71. last_val_line_idx = -1
    72. last_train_line_idx = -1
    73. with open(log_json_path, 'r') as f:
    74. for i, line in enumerate(f.readlines()):
    75. log_line = json.loads(line)
    76. if 'mode' not in log_line.keys():
    77. continue
    78. if by_epoch:
    79. if (log_line['mode'] == 'train'
    80. and log_line['epoch'] == epoch_or_iter):
    81. result_dict['memory'] = log_line['memory']
    82. if (log_line['mode'] == 'val'
    83. and log_line['epoch'] == epoch_or_iter):
    84. result_dict.update({
    85. key: log_line[key]
    86. for key in results_lut if key in log_line
    87. })
    88. return result_dict
    89. else:
    90. if log_line['mode'] == 'train':
    91. last_train_line_idx = i
    92. last_train_line = log_line
    93. if log_line and log_line['mode'] == 'val':
    94. last_val_line_idx = i
    95. last_val_line = log_line
    96. # bug: max_iters = 768, last_train_line['iter'] = 750
    97. assert last_val_line_idx == last_train_line_idx + 1, \
    98. 'Log file is incomplete'
    99. result_dict['memory'] = last_train_line['memory']
    100. result_dict.update({
    101. key: last_val_line[key]
    102. for key in results_lut if key in last_val_line
    103. })
    104. return result_dict
    105. def get_dataset_name(config):
    106. # If there are more dataset, add here.
    107. name_map = dict(
    108. CityscapesDataset='Cityscapes',
    109. CocoDataset='COCO',
    110. CocoPanopticDataset='COCO',
    111. DeepFashionDataset='Deep Fashion',
    112. LVISV05Dataset='LVIS v0.5',
    113. LVISV1Dataset='LVIS v1',
    114. VOCDataset='Pascal VOC',
    115. WIDERFaceDataset='WIDER Face',
    116. OpenImagesDataset='OpenImagesDataset',
    117. OpenImagesChallengeDataset='OpenImagesChallengeDataset',
    118. Objects365V1Dataset='Objects365 v1',
    119. Objects365V2Dataset='Objects365 v2')
    120. cfg = mmcv.Config.fromfile('./configs/' + config)
    121. return name_map[cfg.dataset_type]
    122. def convert_model_info_to_pwc(model_infos):
    123. pwc_files = {}
    124. for model in model_infos:
    125. cfg_folder_name = osp.split(model['config'])[-2]
    126. pwc_model_info = OrderedDict()
    127. pwc_model_info['Name'] = osp.split(model['config'])[-1].split('.')[0]
    128. pwc_model_info['In Collection'] = 'Please fill in Collection name'
    129. pwc_model_info['Config'] = osp.join('configs', model['config'])
    130. # get metadata
    131. memory = round(model['results']['memory'] / 1024, 1)
    132. meta_data = OrderedDict()
    133. meta_data['Training Memory (GB)'] = memory
    134. if 'epochs' in model:
    135. meta_data['Epochs'] = get_real_epoch_or_iter(model['config'])
    136. else:
    137. meta_data['Iterations'] = get_real_epoch_or_iter(model['config'])
    138. pwc_model_info['Metadata'] = meta_data
    139. # get dataset name
    140. dataset_name = get_dataset_name(model['config'])
    141. # get results
    142. results = []
    143. # if there are more metrics, add here.
    144. if 'bbox_mAP' in model['results']:
    145. metric = round(model['results']['bbox_mAP'] * 100, 1)
    146. results.append(
    147. OrderedDict(
    148. Task='Object Detection',
    149. Dataset=dataset_name,
    150. Metrics={'box AP': metric}))
    151. if 'segm_mAP' in model['results']:
    152. metric = round(model['results']['segm_mAP'] * 100, 1)
    153. results.append(
    154. OrderedDict(
    155. Task='Instance Segmentation',
    156. Dataset=dataset_name,
    157. Metrics={'mask AP': metric}))
    158. if 'PQ' in model['results']:
    159. metric = round(model['results']['PQ'], 1)
    160. results.append(
    161. OrderedDict(
    162. Task='Panoptic Segmentation',
    163. Dataset=dataset_name,
    164. Metrics={'PQ': metric}))
    165. pwc_model_info['Results'] = results
    166. link_string = 'https://download.openmmlab.com/mmdetection/v2.0/'
    167. link_string += '{}/{}'.format(model['config'].rstrip('.py'),
    168. osp.split(model['model_path'])[-1])
    169. pwc_model_info['Weights'] = link_string
    170. if cfg_folder_name in pwc_files:
    171. pwc_files[cfg_folder_name].append(pwc_model_info)
    172. else:
    173. pwc_files[cfg_folder_name] = [pwc_model_info]
    174. return pwc_files
    175. def parse_args():
    176. parser = argparse.ArgumentParser(description='Gather benchmarked models')
    177. parser.add_argument(
    178. 'root',
    179. type=str,
    180. help='root path of benchmarked models to be gathered')
    181. parser.add_argument(
    182. 'out', type=str, help='output path of gathered models to be stored')
    183. parser.add_argument(
    184. '--best',
    185. action='store_true',
    186. help='whether to gather the best model.')
    187. args = parser.parse_args()
    188. return args
    189. def main():
    190. args = parse_args()
    191. models_root = args.root
    192. models_out = args.out
    193. mmcv.mkdir_or_exist(models_out)
    194. # find all models in the root directory to be gathered
    195. raw_configs = list(mmcv.scandir('./configs', '.py', recursive=True))
    196. # filter configs that is not trained in the experiments dir
    197. used_configs = []
    198. for raw_config in raw_configs:
    199. if osp.exists(osp.join(models_root, raw_config)):
    200. used_configs.append(raw_config)
    201. print(f'Find {len(used_configs)} models to be gathered')
    202. # find final_ckpt and log file for trained each config
    203. # and parse the best performance
    204. model_infos = []
    205. for used_config in used_configs:
    206. exp_dir = osp.join(models_root, used_config)
    207. by_epoch = is_by_epoch(used_config)
    208. # check whether the exps is finished
    209. if args.best is True:
    210. final_model, final_epoch_or_iter = get_best_epoch_or_iter(exp_dir)
    211. else:
    212. final_epoch_or_iter = get_final_epoch_or_iter(used_config)
    213. final_model = '{}_{}.pth'.format('epoch' if by_epoch else 'iter',
    214. final_epoch_or_iter)
    215. model_path = osp.join(exp_dir, final_model)
    216. # skip if the model is still training
    217. if not osp.exists(model_path):
    218. continue
    219. # get the latest logs
    220. log_json_path = list(
    221. sorted(glob.glob(osp.join(exp_dir, '*.log.json'))))[-1]
    222. log_txt_path = list(sorted(glob.glob(osp.join(exp_dir, '*.log'))))[-1]
    223. cfg = mmcv.Config.fromfile('./configs/' + used_config)
    224. results_lut = cfg.evaluation.metric
    225. if not isinstance(results_lut, list):
    226. results_lut = [results_lut]
    227. # case when using VOC, the evaluation key is only 'mAP'
    228. # when using Panoptic Dataset, the evaluation key is 'PQ'.
    229. for i, key in enumerate(results_lut):
    230. if 'mAP' not in key and 'PQ' not in key:
    231. results_lut[i] = key + '_mAP'
    232. model_performance = get_final_results(log_json_path,
    233. final_epoch_or_iter, results_lut,
    234. by_epoch)
    235. if model_performance is None:
    236. continue
    237. model_time = osp.split(log_txt_path)[-1].split('.')[0]
    238. model_info = dict(
    239. config=used_config,
    240. results=model_performance,
    241. model_time=model_time,
    242. final_model=final_model,
    243. log_json_path=osp.split(log_json_path)[-1])
    244. model_info['epochs' if by_epoch else 'iterations'] =\
    245. final_epoch_or_iter
    246. model_infos.append(model_info)
    247. # publish model for each checkpoint
    248. publish_model_infos = []
    249. for model in model_infos:
    250. model_publish_dir = osp.join(models_out, model['config'].rstrip('.py'))
    251. mmcv.mkdir_or_exist(model_publish_dir)
    252. model_name = osp.split(model['config'])[-1].split('.')[0]
    253. model_name += '_' + model['model_time']
    254. publish_model_path = osp.join(model_publish_dir, model_name)
    255. trained_model_path = osp.join(models_root, model['config'],
    256. model['final_model'])
    257. # convert model
    258. final_model_path = process_checkpoint(trained_model_path,
    259. publish_model_path)
    260. # copy log
    261. shutil.copy(
    262. osp.join(models_root, model['config'], model['log_json_path']),
    263. osp.join(model_publish_dir, f'{model_name}.log.json'))
    264. shutil.copy(
    265. osp.join(models_root, model['config'],
    266. model['log_json_path'].rstrip('.json')),
    267. osp.join(model_publish_dir, f'{model_name}.log'))
    268. # copy config to guarantee reproducibility
    269. config_path = model['config']
    270. config_path = osp.join(
    271. 'configs',
    272. config_path) if 'configs' not in config_path else config_path
    273. target_config_path = osp.split(config_path)[-1]
    274. shutil.copy(config_path, osp.join(model_publish_dir,
    275. target_config_path))
    276. model['model_path'] = final_model_path
    277. publish_model_infos.append(model)
    278. models = dict(models=publish_model_infos)
    279. print(f'Totally gathered {len(publish_model_infos)} models')
    280. mmcv.dump(models, osp.join(models_out, 'model_info.json'))
    281. pwc_files = convert_model_info_to_pwc(publish_model_infos)
    282. for name in pwc_files:
    283. with open(osp.join(models_out, name + '_metafile.yml'), 'w') as f:
    284. ordered_yaml_dump(pwc_files[name], f, encoding='utf-8')
    285. if __name__ == '__main__':
    286. main()
    Discard
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    77
    78
    79
    80
    81
    82
    83
    84
    85
    86
    87
    88
    89
    90
    91
    92
    93
    94
    95
    96
    1. # Copyright (c) OpenMMLab. All rights reserved.
    2. import argparse
    3. import glob
    4. import os.path as osp
    5. import mmcv
    6. from mmcv import Config
    7. def parse_args():
    8. parser = argparse.ArgumentParser(
    9. description='Gather benchmarked models metric')
    10. parser.add_argument('config', help='test config file path')
    11. parser.add_argument(
    12. 'root',
    13. type=str,
    14. help='root path of benchmarked models to be gathered')
    15. parser.add_argument(
    16. '--out', type=str, help='output path of gathered metrics to be stored')
    17. parser.add_argument(
    18. '--not-show', action='store_true', help='not show metrics')
    19. parser.add_argument(
    20. '--show-all', action='store_true', help='show all model metrics')
    21. args = parser.parse_args()
    22. return args
    23. if __name__ == '__main__':
    24. args = parse_args()
    25. root_path = args.root
    26. metrics_out = args.out
    27. result_dict = {}
    28. cfg = Config.fromfile(args.config)
    29. for model_key in cfg:
    30. model_infos = cfg[model_key]
    31. if not isinstance(model_infos, list):
    32. model_infos = [model_infos]
    33. for model_info in model_infos:
    34. record_metrics = model_info['metric']
    35. config = model_info['config'].strip()
    36. fname, _ = osp.splitext(osp.basename(config))
    37. metric_json_dir = osp.join(root_path, fname)
    38. if osp.exists(metric_json_dir):
    39. json_list = glob.glob(osp.join(metric_json_dir, '*.json'))
    40. if len(json_list) > 0:
    41. log_json_path = list(sorted(json_list))[-1]
    42. metric = mmcv.load(log_json_path)
    43. if config in metric.get('config', {}):
    44. new_metrics = dict()
    45. for record_metric_key in record_metrics:
    46. record_metric_key_bk = record_metric_key
    47. old_metric = record_metrics[record_metric_key]
    48. if record_metric_key == 'AR_1000':
    49. record_metric_key = 'AR@1000'
    50. if record_metric_key not in metric['metric']:
    51. raise KeyError(
    52. 'record_metric_key not exist, please '
    53. 'check your config')
    54. new_metric = round(
    55. metric['metric'][record_metric_key] * 100, 1)
    56. new_metrics[record_metric_key_bk] = new_metric
    57. if args.show_all:
    58. result_dict[config] = dict(
    59. before=record_metrics, after=new_metrics)
    60. else:
    61. for record_metric_key in record_metrics:
    62. old_metric = record_metrics[record_metric_key]
    63. new_metric = new_metrics[record_metric_key]
    64. if old_metric != new_metric:
    65. result_dict[config] = dict(
    66. before=record_metrics,
    67. after=new_metrics)
    68. break
    69. else:
    70. print(f'{config} not included in: {log_json_path}')
    71. else:
    72. print(f'{config} not exist file: {metric_json_dir}')
    73. else:
    74. print(f'{config} not exist dir: {metric_json_dir}')
    75. if metrics_out:
    76. mmcv.mkdir_or_exist(metrics_out)
    77. mmcv.dump(result_dict,
    78. osp.join(metrics_out, 'batch_test_metric_info.json'))
    79. if not args.not_show:
    80. print('===================================')
    81. for config_name, metrics in result_dict.items():
    82. print(config_name, metrics)
    83. print('===================================')
    Discard
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    77
    78
    79
    80
    81
    82
    83
    84
    85
    86
    87
    88
    89
    90
    91
    92
    93
    94
    95
    96
    97
    98
    99
    100
    101
    102
    103
    104
    105
    106
    107
    108
    109
    110
    111
    112
    113
    114
    115
    116
    117
    118
    119
    120
    121
    122
    123
    124
    125
    126
    127
    128
    129
    130
    131
    132
    133
    134
    135
    136
    137
    138
    139
    140
    141
    142
    143
    144
    145
    146
    147
    148
    149
    150
    1. # Copyright (c) OpenMMLab. All rights reserved.
    2. import argparse
    3. import glob
    4. import os.path as osp
    5. import mmcv
    6. from gather_models import get_final_results
    7. try:
    8. import xlrd
    9. except ImportError:
    10. xlrd = None
    11. try:
    12. import xlutils
    13. from xlutils.copy import copy
    14. except ImportError:
    15. xlutils = None
    16. def parse_args():
    17. parser = argparse.ArgumentParser(
    18. description='Gather benchmarked models metric')
    19. parser.add_argument(
    20. 'root',
    21. type=str,
    22. help='root path of benchmarked models to be gathered')
    23. parser.add_argument(
    24. 'txt_path', type=str, help='txt path output by benchmark_filter')
    25. parser.add_argument(
    26. '--out', type=str, help='output path of gathered metrics to be stored')
    27. parser.add_argument(
    28. '--not-show', action='store_true', help='not show metrics')
    29. parser.add_argument(
    30. '--excel', type=str, help='input path of excel to be recorded')
    31. parser.add_argument(
    32. '--ncol', type=int, help='Number of column to be modified or appended')
    33. args = parser.parse_args()
    34. return args
    35. if __name__ == '__main__':
    36. args = parse_args()
    37. if args.excel:
    38. assert args.ncol, 'Please specify "--excel" and "--ncol" ' \
    39. 'at the same time'
    40. if xlrd is None:
    41. raise RuntimeError(
    42. 'xlrd is not installed,'
    43. 'Please use “pip install xlrd==1.2.0” to install')
    44. if xlutils is None:
    45. raise RuntimeError(
    46. 'xlutils is not installed,'
    47. 'Please use “pip install xlutils==2.0.0” to install')
    48. readbook = xlrd.open_workbook(args.excel)
    49. sheet = readbook.sheet_by_name('Sheet1')
    50. sheet_info = {}
    51. total_nrows = sheet.nrows
    52. for i in range(3, sheet.nrows):
    53. sheet_info[sheet.row_values(i)[0]] = i
    54. xlrw = copy(readbook)
    55. table = xlrw.get_sheet(0)
    56. root_path = args.root
    57. metrics_out = args.out
    58. result_dict = {}
    59. with open(args.txt_path, 'r') as f:
    60. model_cfgs = f.readlines()
    61. for i, config in enumerate(model_cfgs):
    62. config = config.strip()
    63. if len(config) == 0:
    64. continue
    65. config_name = osp.split(config)[-1]
    66. config_name = osp.splitext(config_name)[0]
    67. result_path = osp.join(root_path, config_name)
    68. if osp.exists(result_path):
    69. # 1 read config
    70. cfg = mmcv.Config.fromfile(config)
    71. total_epochs = cfg.runner.max_epochs
    72. final_results = cfg.evaluation.metric
    73. if not isinstance(final_results, list):
    74. final_results = [final_results]
    75. final_results_out = []
    76. for key in final_results:
    77. if 'proposal_fast' in key:
    78. final_results_out.append('AR@1000') # RPN
    79. elif 'mAP' not in key:
    80. final_results_out.append(key + '_mAP')
    81. # 2 determine whether total_epochs ckpt exists
    82. ckpt_path = f'epoch_{total_epochs}.pth'
    83. if osp.exists(osp.join(result_path, ckpt_path)):
    84. log_json_path = list(
    85. sorted(glob.glob(osp.join(result_path,
    86. '*.log.json'))))[-1]
    87. # 3 read metric
    88. model_performance = get_final_results(
    89. log_json_path, total_epochs, final_results_out)
    90. if model_performance is None:
    91. print(f'log file error: {log_json_path}')
    92. continue
    93. for performance in model_performance:
    94. if performance in ['AR@1000', 'bbox_mAP', 'segm_mAP']:
    95. metric = round(
    96. model_performance[performance] * 100, 1)
    97. model_performance[performance] = metric
    98. result_dict[config] = model_performance
    99. # update and append excel content
    100. if args.excel:
    101. if 'AR@1000' in model_performance:
    102. metrics = f'{model_performance["AR@1000"]}' \
    103. f'(AR@1000)'
    104. elif 'segm_mAP' in model_performance:
    105. metrics = f'{model_performance["bbox_mAP"]}/' \
    106. f'{model_performance["segm_mAP"]}'
    107. else:
    108. metrics = f'{model_performance["bbox_mAP"]}'
    109. row_num = sheet_info.get(config, None)
    110. if row_num:
    111. table.write(row_num, args.ncol, metrics)
    112. else:
    113. table.write(total_nrows, 0, config)
    114. table.write(total_nrows, args.ncol, metrics)
    115. total_nrows += 1
    116. else:
    117. print(f'{config} not exist: {ckpt_path}')
    118. else:
    119. print(f'not exist: {config}')
    120. # 4 save or print results
    121. if metrics_out:
    122. mmcv.mkdir_or_exist(metrics_out)
    123. mmcv.dump(result_dict,
    124. osp.join(metrics_out, 'model_metric_info.json'))
    125. if not args.not_show:
    126. print('===================================')
    127. for config_name, metrics in result_dict.items():
    128. print(config_name, metrics)
    129. print('===================================')
    130. if args.excel:
    131. filename, sufflx = osp.splitext(args.excel)
    132. xlrw.save(f'{filename}_o{sufflx}')
    133. print(f'>>> Output {filename}_o{sufflx}')
    Discard
    1
    2
    3
    1. yapf -r -i mmdet/ configs/ tests/ tools/
    2. isort -rc mmdet/ configs/ tests/ tools/
    3. flake8 .
    Discard
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    77
    78
    79
    80
    81
    82
    83
    84
    85
    86
    87
    88
    89
    90
    91
    92
    93
    94
    95
    96
    97
    98
    99
    100
    101
    102
    103
    104
    105
    106
    107
    108
    109
    110
    111
    112
    113
    114
    115
    116
    117
    118
    119
    1. PARTITION=$1
    2. CHECKPOINT_DIR=$2
    3. echo 'configs/atss/atss_r50_fpn_1x_coco.py' &
    4. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION atss_r50_fpn_1x_coco configs/atss/atss_r50_fpn_1x_coco.py $CHECKPOINT_DIR/atss_r50_fpn_1x_coco_20200209-985f7bd0.pth --work-dir tools/batch_test/atss_r50_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29666 &
    5. echo 'configs/autoassign/autoassign_r50_fpn_8x2_1x_coco.py' &
    6. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION autoassign_r50_fpn_8x2_1x_coco configs/autoassign/autoassign_r50_fpn_8x2_1x_coco.py $CHECKPOINT_DIR/auto_assign_r50_fpn_1x_coco_20210413_115540-5e17991f.pth --work-dir tools/batch_test/autoassign_r50_fpn_8x2_1x_coco --eval bbox --cfg-option dist_params.port=29667 &
    7. echo 'configs/carafe/faster_rcnn_r50_fpn_carafe_1x_coco.py' &
    8. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION faster_rcnn_r50_fpn_carafe_1x_coco configs/carafe/faster_rcnn_r50_fpn_carafe_1x_coco.py $CHECKPOINT_DIR/faster_rcnn_r50_fpn_carafe_1x_coco_bbox_mAP-0.386_20200504_175733-385a75b7.pth --work-dir tools/batch_test/faster_rcnn_r50_fpn_carafe_1x_coco --eval bbox --cfg-option dist_params.port=29668 &
    9. echo 'configs/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.py' &
    10. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION cascade_rcnn_r50_fpn_1x_coco configs/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.py $CHECKPOINT_DIR/cascade_rcnn_r50_fpn_1x_coco_20200316-3dc56deb.pth --work-dir tools/batch_test/cascade_rcnn_r50_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29669 &
    11. echo 'configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py' &
    12. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION cascade_mask_rcnn_r50_fpn_1x_coco configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py $CHECKPOINT_DIR/cascade_mask_rcnn_r50_fpn_1x_coco_20200203-9d4dcb24.pth --work-dir tools/batch_test/cascade_mask_rcnn_r50_fpn_1x_coco --eval bbox segm --cfg-option dist_params.port=29670 &
    13. echo 'configs/cascade_rpn/crpn_faster_rcnn_r50_caffe_fpn_1x_coco.py' &
    14. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION crpn_faster_rcnn_r50_caffe_fpn_1x_coco configs/cascade_rpn/crpn_faster_rcnn_r50_caffe_fpn_1x_coco.py $CHECKPOINT_DIR/crpn_faster_rcnn_r50_caffe_fpn_1x_coco-c8283cca.pth --work-dir tools/batch_test/crpn_faster_rcnn_r50_caffe_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29671 &
    15. echo 'configs/centripetalnet/centripetalnet_hourglass104_mstest_16x6_210e_coco.py' &
    16. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION centripetalnet_hourglass104_mstest_16x6_210e_coco configs/centripetalnet/centripetalnet_hourglass104_mstest_16x6_210e_coco.py $CHECKPOINT_DIR/centripetalnet_hourglass104_mstest_16x6_210e_coco_20200915_204804-3ccc61e5.pth --work-dir tools/batch_test/centripetalnet_hourglass104_mstest_16x6_210e_coco --eval bbox --cfg-option dist_params.port=29672 &
    17. echo 'configs/cornernet/cornernet_hourglass104_mstest_8x6_210e_coco.py' &
    18. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION cornernet_hourglass104_mstest_8x6_210e_coco configs/cornernet/cornernet_hourglass104_mstest_8x6_210e_coco.py $CHECKPOINT_DIR/cornernet_hourglass104_mstest_8x6_210e_coco_20200825_150618-79b44c30.pth --work-dir tools/batch_test/cornernet_hourglass104_mstest_8x6_210e_coco --eval bbox --cfg-option dist_params.port=29673 &
    19. echo 'configs/dcn/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py' &
    20. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco configs/dcn/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py $CHECKPOINT_DIR/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco_20200130-d68aed1e.pth --work-dir tools/batch_test/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco --eval bbox --cfg-option dist_params.port=29674 &
    21. echo 'configs/deformable_detr/deformable_detr_r50_16x2_50e_coco.py' &
    22. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION deformable_detr_r50_16x2_50e_coco configs/deformable_detr/deformable_detr_r50_16x2_50e_coco.py $CHECKPOINT_DIR/deformable_detr_r50_16x2_50e_coco_20210419_220030-a12b9512.pth --work-dir tools/batch_test/deformable_detr_r50_16x2_50e_coco --eval bbox --cfg-option dist_params.port=29675 &
    23. echo 'configs/detectors/detectors_htc_r50_1x_coco.py' &
    24. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION detectors_htc_r50_1x_coco configs/detectors/detectors_htc_r50_1x_coco.py $CHECKPOINT_DIR/detectors_htc_r50_1x_coco-329b1453.pth --work-dir tools/batch_test/detectors_htc_r50_1x_coco --eval bbox segm --cfg-option dist_params.port=29676 &
    25. echo 'configs/detr/detr_r50_8x2_150e_coco.py' &
    26. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION detr_r50_8x2_150e_coco configs/detr/detr_r50_8x2_150e_coco.py $CHECKPOINT_DIR/detr_r50_8x2_150e_coco_20201130_194835-2c4b8974.pth --work-dir tools/batch_test/detr_r50_8x2_150e_coco --eval bbox --cfg-option dist_params.port=29677 &
    27. echo 'configs/double_heads/dh_faster_rcnn_r50_fpn_1x_coco.py' &
    28. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION dh_faster_rcnn_r50_fpn_1x_coco configs/double_heads/dh_faster_rcnn_r50_fpn_1x_coco.py $CHECKPOINT_DIR/dh_faster_rcnn_r50_fpn_1x_coco_20200130-586b67df.pth --work-dir tools/batch_test/dh_faster_rcnn_r50_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29678 &
    29. echo 'configs/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x_coco.py' &
    30. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION dynamic_rcnn_r50_fpn_1x_coco configs/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x_coco.py $CHECKPOINT_DIR/dynamic_rcnn_r50_fpn_1x-62a3f276.pth --work-dir tools/batch_test/dynamic_rcnn_r50_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29679 &
    31. echo 'configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_1x_coco.py' &
    32. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION faster_rcnn_r50_fpn_attention_1111_1x_coco configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_1x_coco.py $CHECKPOINT_DIR/faster_rcnn_r50_fpn_attention_1111_1x_coco_20200130-403cccba.pth --work-dir tools/batch_test/faster_rcnn_r50_fpn_attention_1111_1x_coco --eval bbox --cfg-option dist_params.port=29680 &
    33. echo 'configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' &
    34. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION faster_rcnn_r50_fpn_1x_coco configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py $CHECKPOINT_DIR/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth --work-dir tools/batch_test/faster_rcnn_r50_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29681 &
    35. echo 'configs/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco.py' &
    36. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco configs/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco.py $CHECKPOINT_DIR/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco-0a0d75a8.pth --work-dir tools/batch_test/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco --eval bbox --cfg-option dist_params.port=29682 &
    37. echo 'configs/foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco.py' &
    38. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION fovea_align_r50_fpn_gn-head_4x4_2x_coco configs/foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco.py $CHECKPOINT_DIR/fovea_align_r50_fpn_gn-head_4x4_2x_coco_20200203-8987880d.pth --work-dir tools/batch_test/fovea_align_r50_fpn_gn-head_4x4_2x_coco --eval bbox --cfg-option dist_params.port=29683 &
    39. echo 'configs/free_anchor/retinanet_free_anchor_r50_fpn_1x_coco.py' &
    40. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION retinanet_free_anchor_r50_fpn_1x_coco configs/free_anchor/retinanet_free_anchor_r50_fpn_1x_coco.py $CHECKPOINT_DIR/retinanet_free_anchor_r50_fpn_1x_coco_20200130-0f67375f.pth --work-dir tools/batch_test/retinanet_free_anchor_r50_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29684 &
    41. echo 'configs/fsaf/fsaf_r50_fpn_1x_coco.py' &
    42. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION fsaf_r50_fpn_1x_coco configs/fsaf/fsaf_r50_fpn_1x_coco.py $CHECKPOINT_DIR/fsaf_r50_fpn_1x_coco-94ccc51f.pth --work-dir tools/batch_test/fsaf_r50_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29685 &
    43. echo 'configs/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py' &
    44. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco configs/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py $CHECKPOINT_DIR/mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco_20200202-587b99aa.pth --work-dir tools/batch_test/mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco --eval bbox segm --cfg-option dist_params.port=29686 &
    45. echo 'configs/gfl/gfl_r50_fpn_1x_coco.py' &
    46. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION gfl_r50_fpn_1x_coco configs/gfl/gfl_r50_fpn_1x_coco.py $CHECKPOINT_DIR/gfl_r50_fpn_1x_coco_20200629_121244-25944287.pth --work-dir tools/batch_test/gfl_r50_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29687 &
    47. echo 'configs/gn/mask_rcnn_r50_fpn_gn-all_2x_coco.py' &
    48. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION mask_rcnn_r50_fpn_gn-all_2x_coco configs/gn/mask_rcnn_r50_fpn_gn-all_2x_coco.py $CHECKPOINT_DIR/mask_rcnn_r50_fpn_gn-all_2x_coco_20200206-8eee02a6.pth --work-dir tools/batch_test/mask_rcnn_r50_fpn_gn-all_2x_coco --eval bbox segm --cfg-option dist_params.port=29688 &
    49. echo 'configs/gn+ws/faster_rcnn_r50_fpn_gn_ws-all_1x_coco.py' &
    50. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION faster_rcnn_r50_fpn_gn_ws-all_1x_coco configs/gn+ws/faster_rcnn_r50_fpn_gn_ws-all_1x_coco.py $CHECKPOINT_DIR/faster_rcnn_r50_fpn_gn_ws-all_1x_coco_20200130-613d9fe2.pth --work-dir tools/batch_test/faster_rcnn_r50_fpn_gn_ws-all_1x_coco --eval bbox --cfg-option dist_params.port=29689 &
    51. echo 'configs/grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco.py' &
    52. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION grid_rcnn_r50_fpn_gn-head_2x_coco configs/grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco.py $CHECKPOINT_DIR/grid_rcnn_r50_fpn_gn-head_2x_coco_20200130-6cca8223.pth --work-dir tools/batch_test/grid_rcnn_r50_fpn_gn-head_2x_coco --eval bbox --cfg-option dist_params.port=29690 &
    53. echo 'configs/groie/faster_rcnn_r50_fpn_groie_1x_coco.py' &
    54. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION faster_rcnn_r50_fpn_groie_1x_coco configs/groie/faster_rcnn_r50_fpn_groie_1x_coco.py $CHECKPOINT_DIR/faster_rcnn_r50_fpn_groie_1x_coco_20200604_211715-66ee9516.pth --work-dir tools/batch_test/faster_rcnn_r50_fpn_groie_1x_coco --eval bbox --cfg-option dist_params.port=29691 &
    55. echo 'configs/guided_anchoring/ga_retinanet_r50_caffe_fpn_1x_coco.py' &
    56. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION ga_retinanet_r50_caffe_fpn_1x_coco configs/guided_anchoring/ga_retinanet_r50_caffe_fpn_1x_coco.py $CHECKPOINT_DIR/ga_retinanet_r50_caffe_fpn_1x_coco_20201020-39581c6f.pth --work-dir tools/batch_test/ga_retinanet_r50_caffe_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29692 &
    57. echo 'configs/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco.py' &
    58. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION ga_faster_r50_caffe_fpn_1x_coco configs/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco.py $CHECKPOINT_DIR/ga_faster_r50_caffe_fpn_1x_coco_20200702_000718-a11ccfe6.pth --work-dir tools/batch_test/ga_faster_r50_caffe_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29693 &
    59. echo 'configs/hrnet/faster_rcnn_hrnetv2p_w18_1x_coco.py' &
    60. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION faster_rcnn_hrnetv2p_w18_1x_coco configs/hrnet/faster_rcnn_hrnetv2p_w18_1x_coco.py $CHECKPOINT_DIR/faster_rcnn_hrnetv2p_w18_1x_coco_20200130-56651a6d.pth --work-dir tools/batch_test/faster_rcnn_hrnetv2p_w18_1x_coco --eval bbox --cfg-option dist_params.port=29694 &
    61. echo 'configs/htc/htc_r50_fpn_1x_coco.py' &
    62. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION htc_r50_fpn_1x_coco configs/htc/htc_r50_fpn_1x_coco.py $CHECKPOINT_DIR/htc_r50_fpn_1x_coco_20200317-7332cf16.pth --work-dir tools/batch_test/htc_r50_fpn_1x_coco --eval bbox segm --cfg-option dist_params.port=29695 &
    63. echo 'configs/libra_rcnn/libra_faster_rcnn_r50_fpn_1x_coco.py' &
    64. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION libra_faster_rcnn_r50_fpn_1x_coco configs/libra_rcnn/libra_faster_rcnn_r50_fpn_1x_coco.py $CHECKPOINT_DIR/libra_faster_rcnn_r50_fpn_1x_coco_20200130-3afee3a9.pth --work-dir tools/batch_test/libra_faster_rcnn_r50_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29696 &
    65. echo 'configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' &
    66. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION mask_rcnn_r50_fpn_1x_coco configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py $CHECKPOINT_DIR/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth --work-dir tools/batch_test/mask_rcnn_r50_fpn_1x_coco --eval bbox segm --cfg-option dist_params.port=29697 &
    67. echo 'configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x_coco.py' &
    68. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION ms_rcnn_r50_caffe_fpn_1x_coco configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x_coco.py $CHECKPOINT_DIR/ms_rcnn_r50_caffe_fpn_1x_coco_20200702_180848-61c9355e.pth --work-dir tools/batch_test/ms_rcnn_r50_caffe_fpn_1x_coco --eval bbox segm --cfg-option dist_params.port=29698 &
    69. echo 'configs/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco.py' &
    70. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco configs/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco.py $CHECKPOINT_DIR/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco_20200520-1bdba3ce.pth --work-dir tools/batch_test/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco --eval bbox --cfg-option dist_params.port=29699 &
    71. echo 'configs/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco.py' &
    72. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION retinanet_r50_nasfpn_crop640_50e_coco configs/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco.py $CHECKPOINT_DIR/retinanet_r50_nasfpn_crop640_50e_coco-0ad1f644.pth --work-dir tools/batch_test/retinanet_r50_nasfpn_crop640_50e_coco --eval bbox --cfg-option dist_params.port=29700 &
    73. echo 'configs/paa/paa_r50_fpn_1x_coco.py' &
    74. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION paa_r50_fpn_1x_coco configs/paa/paa_r50_fpn_1x_coco.py $CHECKPOINT_DIR/paa_r50_fpn_1x_coco_20200821-936edec3.pth --work-dir tools/batch_test/paa_r50_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29701 &
    75. echo 'configs/pafpn/faster_rcnn_r50_pafpn_1x_coco.py' &
    76. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION faster_rcnn_r50_pafpn_1x_coco configs/pafpn/faster_rcnn_r50_pafpn_1x_coco.py $CHECKPOINT_DIR/faster_rcnn_r50_pafpn_1x_coco_bbox_mAP-0.375_20200503_105836-b7b4b9bd.pth --work-dir tools/batch_test/faster_rcnn_r50_pafpn_1x_coco --eval bbox --cfg-option dist_params.port=29702 &
    77. echo 'configs/pisa/pisa_faster_rcnn_r50_fpn_1x_coco.py' &
    78. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION pisa_faster_rcnn_r50_fpn_1x_coco configs/pisa/pisa_faster_rcnn_r50_fpn_1x_coco.py $CHECKPOINT_DIR/pisa_faster_rcnn_r50_fpn_1x_coco-dea93523.pth --work-dir tools/batch_test/pisa_faster_rcnn_r50_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29703 &
    79. echo 'configs/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco.py' &
    80. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION point_rend_r50_caffe_fpn_mstrain_1x_coco configs/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco.py $CHECKPOINT_DIR/point_rend_r50_caffe_fpn_mstrain_1x_coco-1bcb5fb4.pth --work-dir tools/batch_test/point_rend_r50_caffe_fpn_mstrain_1x_coco --eval bbox segm --cfg-option dist_params.port=29704 &
    81. echo 'configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py' &
    82. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION mask_rcnn_regnetx-3.2GF_fpn_1x_coco configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py $CHECKPOINT_DIR/mask_rcnn_regnetx-3.2GF_fpn_1x_coco_20200520_163141-2a9d1814.pth --work-dir tools/batch_test/mask_rcnn_regnetx-3.2GF_fpn_1x_coco --eval bbox segm --cfg-option dist_params.port=29705 &
    83. echo 'configs/reppoints/reppoints_moment_r50_fpn_1x_coco.py' &
    84. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION reppoints_moment_r50_fpn_1x_coco configs/reppoints/reppoints_moment_r50_fpn_1x_coco.py $CHECKPOINT_DIR/reppoints_moment_r50_fpn_1x_coco_20200330-b73db8d1.pth --work-dir tools/batch_test/reppoints_moment_r50_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29706 &
    85. echo 'configs/res2net/faster_rcnn_r2_101_fpn_2x_coco.py' &
    86. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION faster_rcnn_r2_101_fpn_2x_coco configs/res2net/faster_rcnn_r2_101_fpn_2x_coco.py $CHECKPOINT_DIR/faster_rcnn_r2_101_fpn_2x_coco-175f1da6.pth --work-dir tools/batch_test/faster_rcnn_r2_101_fpn_2x_coco --eval bbox --cfg-option dist_params.port=29707 &
    87. echo 'configs/resnest/faster_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py' &
    88. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION faster_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco configs/resnest/faster_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py $CHECKPOINT_DIR/faster_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco_20200926_125502-20289c16.pth --work-dir tools/batch_test/faster_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco --eval bbox --cfg-option dist_params.port=29708 &
    89. echo 'configs/retinanet/retinanet_r50_fpn_1x_coco.py' &
    90. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION retinanet_r50_fpn_1x_coco configs/retinanet/retinanet_r50_fpn_1x_coco.py $CHECKPOINT_DIR/retinanet_r50_fpn_1x_coco_20200130-c2398f9e.pth --work-dir tools/batch_test/retinanet_r50_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29709 &
    91. echo 'configs/rpn/rpn_r50_fpn_1x_coco.py' &
    92. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION rpn_r50_fpn_1x_coco configs/rpn/rpn_r50_fpn_1x_coco.py $CHECKPOINT_DIR/rpn_r50_fpn_1x_coco_20200218-5525fa2e.pth --work-dir tools/batch_test/rpn_r50_fpn_1x_coco --eval proposal_fast --cfg-option dist_params.port=29710 &
    93. echo 'configs/sabl/sabl_retinanet_r50_fpn_1x_coco.py' &
    94. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION sabl_retinanet_r50_fpn_1x_coco configs/sabl/sabl_retinanet_r50_fpn_1x_coco.py $CHECKPOINT_DIR/sabl_retinanet_r50_fpn_1x_coco-6c54fd4f.pth --work-dir tools/batch_test/sabl_retinanet_r50_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29711 &
    95. echo 'configs/sabl/sabl_faster_rcnn_r50_fpn_1x_coco.py' &
    96. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION sabl_faster_rcnn_r50_fpn_1x_coco configs/sabl/sabl_faster_rcnn_r50_fpn_1x_coco.py $CHECKPOINT_DIR/sabl_faster_rcnn_r50_fpn_1x_coco-e867595b.pth --work-dir tools/batch_test/sabl_faster_rcnn_r50_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29712 &
    97. echo 'configs/scnet/scnet_r50_fpn_1x_coco.py' &
    98. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION scnet_r50_fpn_1x_coco configs/scnet/scnet_r50_fpn_1x_coco.py $CHECKPOINT_DIR/scnet_r50_fpn_1x_coco-c3f09857.pth --work-dir tools/batch_test/scnet_r50_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29713 &
    99. echo 'configs/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco.py' &
    100. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION sparse_rcnn_r50_fpn_1x_coco configs/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco.py $CHECKPOINT_DIR/sparse_rcnn_r50_fpn_1x_coco_20201222_214453-dc79b137.pth --work-dir tools/batch_test/sparse_rcnn_r50_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29714 &
    101. echo 'configs/ssd/ssd300_coco.py' &
    102. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION ssd300_coco configs/ssd/ssd300_coco.py $CHECKPOINT_DIR/ssd300_coco_20210803_015428-d231a06e.pth --work-dir tools/batch_test/ssd300_coco --eval bbox --cfg-option dist_params.port=29715 &
    103. echo 'configs/tridentnet/tridentnet_r50_caffe_1x_coco.py' &
    104. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION tridentnet_r50_caffe_1x_coco configs/tridentnet/tridentnet_r50_caffe_1x_coco.py $CHECKPOINT_DIR/tridentnet_r50_caffe_1x_coco_20201230_141838-2ec0b530.pth --work-dir tools/batch_test/tridentnet_r50_caffe_1x_coco --eval bbox --cfg-option dist_params.port=29716 &
    105. echo 'configs/vfnet/vfnet_r50_fpn_1x_coco.py' &
    106. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION vfnet_r50_fpn_1x_coco configs/vfnet/vfnet_r50_fpn_1x_coco.py $CHECKPOINT_DIR/vfnet_r50_fpn_1x_coco_20201027-38db6f58.pth --work-dir tools/batch_test/vfnet_r50_fpn_1x_coco --eval bbox --cfg-option dist_params.port=29717 &
    107. echo 'configs/yolact/yolact_r50_1x8_coco.py' &
    108. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION yolact_r50_1x8_coco configs/yolact/yolact_r50_1x8_coco.py $CHECKPOINT_DIR/yolact_r50_1x8_coco_20200908-f38d58df.pth --work-dir tools/batch_test/yolact_r50_1x8_coco --eval bbox segm --cfg-option dist_params.port=29718 &
    109. echo 'configs/yolo/yolov3_d53_320_273e_coco.py' &
    110. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION yolov3_d53_320_273e_coco configs/yolo/yolov3_d53_320_273e_coco.py $CHECKPOINT_DIR/yolov3_d53_320_273e_coco-421362b6.pth --work-dir tools/batch_test/yolov3_d53_320_273e_coco --eval bbox --cfg-option dist_params.port=29719 &
    111. echo 'configs/yolof/yolof_r50_c5_8x8_1x_coco.py' &
    112. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION yolof_r50_c5_8x8_1x_coco configs/yolof/yolof_r50_c5_8x8_1x_coco.py $CHECKPOINT_DIR/yolof_r50_c5_8x8_1x_coco_20210425_024427-8e864411.pth --work-dir tools/batch_test/yolof_r50_c5_8x8_1x_coco --eval bbox --cfg-option dist_params.port=29720 &
    113. echo 'configs/centernet/centernet_resnet18_dcnv2_140e_coco.py' &
    114. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION centernet_resnet18_dcnv2_140e_coco configs/centernet/centernet_resnet18_dcnv2_140e_coco.py $CHECKPOINT_DIR/centernet_resnet18_dcnv2_140e_coco_20210702_155131-c8cd631f.pth --work-dir tools/batch_test/centernet_resnet18_dcnv2_140e_coco --eval bbox --cfg-option dist_params.port=29721 &
    115. echo 'configs/yolox/yolox_tiny_8x8_300e_coco.py' &
    116. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION yolox_tiny_8x8_300e_coco configs/yolox/yolox_tiny_8x8_300e_coco.py $CHECKPOINT_DIR/yolox_tiny_8x8_300e_coco_20210806_234250-4ff3b67e.pth --work-dir tools/batch_test/yolox_tiny_8x8_300e_coco --eval bbox --cfg-option dist_params.port=29722 &
    117. echo 'configs/ssd/ssdlite_mobilenetv2_scratch_600e_coco.py' &
    118. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION ssdlite_mobilenetv2_scratch_600e_coco configs/ssd/ssdlite_mobilenetv2_scratch_600e_coco.py $CHECKPOINT_DIR/ssdlite_mobilenetv2_scratch_600e_coco_20210629_110627-974d9307.pth --work-dir tools/batch_test/ssdlite_mobilenetv2_scratch_600e_coco --eval bbox --cfg-option dist_params.port=29723 &
    Discard
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    77
    78
    79
    80
    81
    82
    83
    84
    85
    86
    87
    88
    89
    90
    91
    92
    93
    94
    95
    96
    97
    98
    99
    100
    101
    102
    103
    104
    105
    106
    107
    108
    109
    110
    111
    112
    113
    114
    115
    116
    117
    118
    119
    120
    121
    122
    123
    124
    125
    126
    127
    128
    129
    130
    131
    132
    133
    134
    135
    136
    137
    138
    139
    140
    141
    142
    143
    144
    145
    146
    147
    148
    149
    150
    151
    152
    153
    154
    155
    156
    157
    158
    159
    160
    161
    162
    163
    164
    165
    166
    167
    168
    169
    170
    171
    172
    173
    174
    175
    176
    177
    178
    179
    180
    181
    1. # Copyright (c) OpenMMLab. All rights reserved.
    2. """Check out backbone whether successfully load pretrained checkpoint."""
    3. import copy
    4. import os
    5. from os.path import dirname, exists, join
    6. import pytest
    7. from mmcv import Config, ProgressBar
    8. from mmcv.runner import _load_checkpoint
    9. from mmdet.models import build_detector
    10. def _get_config_directory():
    11. """Find the predefined detector config directory."""
    12. try:
    13. # Assume we are running in the source mmdetection repo
    14. repo_dpath = dirname(dirname(__file__))
    15. except NameError:
    16. # For IPython development when this __file__ is not defined
    17. import mmdet
    18. repo_dpath = dirname(dirname(mmdet.__file__))
    19. config_dpath = join(repo_dpath, 'configs')
    20. if not exists(config_dpath):
    21. raise Exception('Cannot find config path')
    22. return config_dpath
    23. def _get_config_module(fname):
    24. """Load a configuration as a python module."""
    25. from mmcv import Config
    26. config_dpath = _get_config_directory()
    27. config_fpath = join(config_dpath, fname)
    28. config_mod = Config.fromfile(config_fpath)
    29. return config_mod
    30. def _get_detector_cfg(fname):
    31. """Grab configs necessary to create a detector.
    32. These are deep copied to allow for safe modification of parameters without
    33. influencing other tests.
    34. """
    35. config = _get_config_module(fname)
    36. model = copy.deepcopy(config.model)
    37. return model
    38. def _traversed_config_file():
    39. """We traversed all potential config files under the `config` file. If you
    40. need to print details or debug code, you can use this function.
    41. If the `backbone.init_cfg` is None (do not use `Pretrained` init way), you
    42. need add the folder name in `ignores_folder` (if the config files in this
    43. folder all set backbone.init_cfg is None) or add config name in
    44. `ignores_file` (if the config file set backbone.init_cfg is None)
    45. """
    46. config_path = _get_config_directory()
    47. check_cfg_names = []
    48. # `base`, `legacy_1.x` and `common` ignored by default.
    49. ignores_folder = ['_base_', 'legacy_1.x', 'common']
    50. # 'ld' need load teacher model, if want to check 'ld',
    51. # please check teacher_config path first.
    52. ignores_folder += ['ld']
    53. # `selfsup_pretrain` need convert model, if want to check this model,
    54. # need to convert the model first.
    55. ignores_folder += ['selfsup_pretrain']
    56. # the `init_cfg` in 'centripetalnet', 'cornernet', 'cityscapes',
    57. # 'scratch' is None.
    58. # the `init_cfg` in ssdlite(`ssdlite_mobilenetv2_scratch_600e_coco.py`)
    59. # is None
    60. # Please confirm `bockbone.init_cfg` is None first.
    61. ignores_folder += ['centripetalnet', 'cornernet', 'cityscapes', 'scratch']
    62. ignores_file = ['ssdlite_mobilenetv2_scratch_600e_coco.py']
    63. for config_file_name in os.listdir(config_path):
    64. if config_file_name not in ignores_folder:
    65. config_file = join(config_path, config_file_name)
    66. if os.path.isdir(config_file):
    67. for config_sub_file in os.listdir(config_file):
    68. if config_sub_file.endswith('py') and \
    69. config_sub_file not in ignores_file:
    70. name = join(config_file, config_sub_file)
    71. check_cfg_names.append(name)
    72. return check_cfg_names
    73. def _check_backbone(config, print_cfg=True):
    74. """Check out backbone whether successfully load pretrained model, by using
    75. `backbone.init_cfg`.
    76. First, using `mmcv._load_checkpoint` to load the checkpoint without
    77. loading models.
    78. Then, using `build_detector` to build models, and using
    79. `model.init_weights()` to initialize the parameters.
    80. Finally, assert weights and bias of each layer loaded from pretrained
    81. checkpoint are equal to the weights and bias of original checkpoint.
    82. For the convenience of comparison, we sum up weights and bias of
    83. each loaded layer separately.
    84. Args:
    85. config (str): Config file path.
    86. print_cfg (bool): Whether print logger and return the result.
    87. Returns:
    88. results (str or None): If backbone successfully load pretrained
    89. checkpoint, return None; else, return config file path.
    90. """
    91. if print_cfg:
    92. print('-' * 15 + 'loading ', config)
    93. cfg = Config.fromfile(config)
    94. init_cfg = None
    95. try:
    96. init_cfg = cfg.model.backbone.init_cfg
    97. init_flag = True
    98. except AttributeError:
    99. init_flag = False
    100. if init_cfg is None or init_cfg.get('type') != 'Pretrained':
    101. init_flag = False
    102. if init_flag:
    103. checkpoint = _load_checkpoint(init_cfg.checkpoint)
    104. if 'state_dict' in checkpoint:
    105. state_dict = checkpoint['state_dict']
    106. else:
    107. state_dict = checkpoint
    108. model = build_detector(
    109. cfg.model,
    110. train_cfg=cfg.get('train_cfg'),
    111. test_cfg=cfg.get('test_cfg'))
    112. model.init_weights()
    113. checkpoint_layers = state_dict.keys()
    114. for name, value in model.backbone.state_dict().items():
    115. if name in checkpoint_layers:
    116. assert value.equal(state_dict[name])
    117. if print_cfg:
    118. print('-' * 10 + 'Successfully load checkpoint' + '-' * 10 +
    119. '\n', )
    120. return None
    121. else:
    122. if print_cfg:
    123. print(config + '\n' + '-' * 10 +
    124. 'config file do not have init_cfg' + '-' * 10 + '\n')
    125. return config
    126. @pytest.mark.parametrize('config', _traversed_config_file())
    127. def test_load_pretrained(config):
    128. """Check out backbone whether successfully load pretrained model by using
    129. `backbone.init_cfg`.
    130. Details please refer to `_check_backbone`
    131. """
    132. _check_backbone(config, print_cfg=False)
    133. def _test_load_pretrained():
    134. """We traversed all potential config files under the `config` file. If you
    135. need to print details or debug code, you can use this function.
    136. Returns:
    137. check_cfg_names (list[str]): Config files that backbone initialized
    138. from pretrained checkpoint might be problematic. Need to recheck
    139. the config file. The output including the config files that the
    140. backbone.init_cfg is None
    141. """
    142. check_cfg_names = _traversed_config_file()
    143. need_check_cfg = []
    144. prog_bar = ProgressBar(len(check_cfg_names))
    145. for config in check_cfg_names:
    146. init_cfg_name = _check_backbone(config)
    147. if init_cfg_name is not None:
    148. need_check_cfg.append(init_cfg_name)
    149. prog_bar.update()
    150. print('These config files need to be checked again')
    151. print(need_check_cfg)
    Discard
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    77
    78
    79
    80
    81
    82
    83
    84
    85
    86
    87
    88
    89
    90
    91
    92
    93
    94
    95
    96
    97
    98
    99
    100
    101
    102
    103
    104
    105
    106
    107
    108
    109
    110
    111
    112
    113
    114
    115
    116
    117
    118
    119
    120
    121
    122
    123
    124
    125
    126
    127
    128
    129
    130
    131
    132
    133
    134
    1. echo 'configs/atss/atss_r50_fpn_1x_coco.py' &
    2. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab atss_r50_fpn_1x_coco configs/atss/atss_r50_fpn_1x_coco.py ./tools/work_dir/atss_r50_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
    3. echo 'configs/autoassign/autoassign_r50_fpn_8x2_1x_coco.py' &
    4. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab autoassign_r50_fpn_8x2_1x_coco configs/autoassign/autoassign_r50_fpn_8x2_1x_coco.py ./tools/work_dir/autoassign_r50_fpn_8x2_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
    5. echo 'configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py' &
    6. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab cascade_mask_rcnn_r50_fpn_1x_coco configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py ./tools/work_dir/cascade_mask_rcnn_r50_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
    7. echo 'configs/cascade_rpn/crpn_faster_rcnn_r50_caffe_fpn_1x_coco.py' &
    8. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab crpn_faster_rcnn_r50_caffe_fpn_1x_coco configs/cascade_rpn/crpn_faster_rcnn_r50_caffe_fpn_1x_coco.py ./tools/work_dir/crpn_faster_rcnn_r50_caffe_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
    9. echo 'configs/centernet/centernet_resnet18_dcnv2_140e_coco.py' &
    10. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab centernet_resnet18_dcnv2_140e_coco configs/centernet/centernet_resnet18_dcnv2_140e_coco.py ./tools/work_dir/centernet_resnet18_dcnv2_140e_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
    11. echo 'configs/centripetalnet/centripetalnet_hourglass104_mstest_16x6_210e_coco.py' &
    12. GPUS=16 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab centripetalnet_hourglass104_mstest_16x6_210e_coco configs/centripetalnet/centripetalnet_hourglass104_mstest_16x6_210e_coco.py ./tools/work_dir/centripetalnet_hourglass104_mstest_16x6_210e_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
    13. echo 'configs/cornernet/cornernet_hourglass104_mstest_8x6_210e_coco.py' &
    14. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab cornernet_hourglass104_mstest_8x6_210e_coco configs/cornernet/cornernet_hourglass104_mstest_8x6_210e_coco.py ./tools/work_dir/cornernet_hourglass104_mstest_8x6_210e_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
    15. echo 'configs/detectors/detectors_htc_r50_1x_coco.py' &
    16. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab detectors_htc_r50_1x_coco configs/detectors/detectors_htc_r50_1x_coco.py ./tools/work_dir/detectors_htc_r50_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
    17. echo 'configs/deformable_detr/deformable_detr_r50_16x2_50e_coco.py' &
    18. GPUS=16 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab deformable_detr_r50_16x2_50e_coco configs/deformable_detr/deformable_detr_r50_16x2_50e_coco.py ./tools/work_dir/deformable_detr_r50_16x2_50e_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
    19. echo 'configs/detr/detr_r50_8x2_150e_coco.py' &
    20. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab detr_r50_8x2_150e_coco configs/detr/detr_r50_8x2_150e_coco.py ./tools/work_dir/detr_r50_8x2_150e_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
    21. echo 'configs/double_heads/dh_faster_rcnn_r50_fpn_1x_coco.py' &
    22. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab dh_faster_rcnn_r50_fpn_1x_coco configs/double_heads/dh_faster_rcnn_r50_fpn_1x_coco.py ./tools/work_dir/dh_faster_rcnn_r50_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
    23. echo 'configs/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x_coco.py' &
    24. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab dynamic_rcnn_r50_fpn_1x_coco configs/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x_coco.py ./tools/work_dir/dynamic_rcnn_r50_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
    25. echo 'configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' &
    26. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab faster_rcnn_r50_fpn_1x_coco configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py ./tools/work_dir/faster_rcnn_r50_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
    27. echo 'configs/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_1x_coco.py' &
    28. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab faster_rcnn_r50_caffe_dc5_mstrain_1x_coco configs/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_1x_coco.py ./tools/work_dir/faster_rcnn_r50_caffe_dc5_mstrain_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
    29. echo 'configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py' &
    30. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab faster_rcnn_r50_caffe_fpn_mstrain_1x_coco configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py ./tools/work_dir/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
    31. echo 'configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco.py' &
    32. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab faster_rcnn_r50_caffe_fpn_1x_coco configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco.py ./tools/work_dir/faster_rcnn_r50_caffe_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
    33. echo 'configs/faster_rcnn/faster_rcnn_r50_fpn_ohem_1x_coco.py' &
    34. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab faster_rcnn_r50_fpn_ohem_1x_coco configs/faster_rcnn/faster_rcnn_r50_fpn_ohem_1x_coco.py ./tools/work_dir/faster_rcnn_r50_fpn_ohem_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
    35. echo 'configs/foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco.py' &
    36. GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab fovea_align_r50_fpn_gn-head_4x4_2x_coco configs/foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco.py ./tools/work_dir/fovea_align_r50_fpn_gn-head_4x4_2x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
    37. echo 'configs/mask_rcnn/mask_rcnn_r50_fpn_fp16_1x_coco.py' &
    38. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab mask_rcnn_r50_fpn_fp16_1x_coco configs/mask_rcnn/mask_rcnn_r50_fpn_fp16_1x_coco.py ./tools/work_dir/mask_rcnn_r50_fpn_fp16_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
    39. echo 'configs/retinanet/retinanet_r50_fpn_fp16_1x_coco.py' &
    40. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab retinanet_r50_fpn_fp16_1x_coco configs/retinanet/retinanet_r50_fpn_fp16_1x_coco.py ./tools/work_dir/retinanet_r50_fpn_fp16_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
    41. echo 'configs/free_anchor/retinanet_free_anchor_r50_fpn_1x_coco.py' &
    42. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab retinanet_free_anchor_r50_fpn_1x_coco configs/free_anchor/retinanet_free_anchor_r50_fpn_1x_coco.py ./tools/work_dir/retinanet_free_anchor_r50_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
    43. echo 'configs/fsaf/fsaf_r50_fpn_1x_coco.py' &
    44. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab fsaf_r50_fpn_1x_coco configs/fsaf/fsaf_r50_fpn_1x_coco.py ./tools/work_dir/fsaf_r50_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
    45. echo 'configs/gfl/gfl_r50_fpn_1x_coco.py' &
    46. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab gfl_r50_fpn_1x_coco configs/gfl/gfl_r50_fpn_1x_coco.py ./tools/work_dir/gfl_r50_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
    47. echo 'configs/ghm/retinanet_ghm_r50_fpn_1x_coco.py' &
    48. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab retinanet_ghm_r50_fpn_1x_coco configs/ghm/retinanet_ghm_r50_fpn_1x_coco.py ./tools/work_dir/retinanet_ghm_r50_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
    49. echo 'configs/grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco.py' &
    50. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab grid_rcnn_r50_fpn_gn-head_2x_coco configs/grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco.py ./tools/work_dir/grid_rcnn_r50_fpn_gn-head_2x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
    51. echo 'configs/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco.py' &
    52. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab ga_faster_r50_caffe_fpn_1x_coco configs/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco.py ./tools/work_dir/ga_faster_r50_caffe_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
    53. echo 'configs/htc/htc_r50_fpn_1x_coco.py' &
    54. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab htc_r50_fpn_1x_coco configs/htc/htc_r50_fpn_1x_coco.py ./tools/work_dir/htc_r50_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
    55. echo 'configs/ld/ld_r18_gflv1_r101_fpn_coco_1x.py' &
    56. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab ld_r18_gflv1_r101_fpn_coco_1x configs/ld/ld_r18_gflv1_r101_fpn_coco_1x.py ./tools/work_dir/ld_r18_gflv1_r101_fpn_coco_1x --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
    57. echo 'configs/libra_rcnn/libra_faster_rcnn_r50_fpn_1x_coco.py' &
    58. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab libra_faster_rcnn_r50_fpn_1x_coco configs/libra_rcnn/libra_faster_rcnn_r50_fpn_1x_coco.py ./tools/work_dir/libra_faster_rcnn_r50_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
    59. echo 'configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py' &
    60. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py ./tools/work_dir/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
    61. echo 'configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x_coco.py' &
    62. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab ms_rcnn_r50_caffe_fpn_1x_coco configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x_coco.py ./tools/work_dir/ms_rcnn_r50_caffe_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
    63. echo 'configs/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco.py' &
    64. GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco configs/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco.py ./tools/work_dir/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
    65. echo 'configs/paa/paa_r50_fpn_1x_coco.py' &
    66. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab paa_r50_fpn_1x_coco configs/paa/paa_r50_fpn_1x_coco.py ./tools/work_dir/paa_r50_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
    67. echo 'configs/pisa/pisa_mask_rcnn_r50_fpn_1x_coco.py' &
    68. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab pisa_mask_rcnn_r50_fpn_1x_coco configs/pisa/pisa_mask_rcnn_r50_fpn_1x_coco.py ./tools/work_dir/pisa_mask_rcnn_r50_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
    69. echo 'configs/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco.py' &
    70. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab point_rend_r50_caffe_fpn_mstrain_1x_coco configs/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco.py ./tools/work_dir/point_rend_r50_caffe_fpn_mstrain_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
    71. echo 'configs/reppoints/reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py' &
    72. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab reppoints_moment_r50_fpn_gn-neck+head_1x_coco configs/reppoints/reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py ./tools/work_dir/reppoints_moment_r50_fpn_gn-neck+head_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
    73. echo 'configs/retinanet/retinanet_r50_caffe_fpn_1x_coco.py' &
    74. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab retinanet_r50_caffe_fpn_1x_coco configs/retinanet/retinanet_r50_caffe_fpn_1x_coco.py ./tools/work_dir/retinanet_r50_caffe_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
    75. echo 'configs/rpn/rpn_r50_fpn_1x_coco.py' &
    76. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab rpn_r50_fpn_1x_coco configs/rpn/rpn_r50_fpn_1x_coco.py ./tools/work_dir/rpn_r50_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
    77. echo 'configs/sabl/sabl_retinanet_r50_fpn_1x_coco.py' &
    78. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab sabl_retinanet_r50_fpn_1x_coco configs/sabl/sabl_retinanet_r50_fpn_1x_coco.py ./tools/work_dir/sabl_retinanet_r50_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
    79. echo 'configs/ssd/ssd300_coco.py' &
    80. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab ssd300_coco configs/ssd/ssd300_coco.py ./tools/work_dir/ssd300_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
    81. echo 'configs/tridentnet/tridentnet_r50_caffe_1x_coco.py' &
    82. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab tridentnet_r50_caffe_1x_coco configs/tridentnet/tridentnet_r50_caffe_1x_coco.py ./tools/work_dir/tridentnet_r50_caffe_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
    83. echo 'configs/vfnet/vfnet_r50_fpn_1x_coco.py' &
    84. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab vfnet_r50_fpn_1x_coco configs/vfnet/vfnet_r50_fpn_1x_coco.py ./tools/work_dir/vfnet_r50_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
    85. echo 'configs/yolact/yolact_r50_8x8_coco.py' &
    86. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab yolact_r50_8x8_coco configs/yolact/yolact_r50_8x8_coco.py ./tools/work_dir/yolact_r50_8x8_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
    87. echo 'configs/yolo/yolov3_d53_320_273e_coco.py' &
    88. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab yolov3_d53_320_273e_coco configs/yolo/yolov3_d53_320_273e_coco.py ./tools/work_dir/yolov3_d53_320_273e_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
    89. echo 'configs/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco.py' &
    90. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab sparse_rcnn_r50_fpn_1x_coco configs/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco.py ./tools/work_dir/sparse_rcnn_r50_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
    91. echo 'configs/scnet/scnet_r50_fpn_1x_coco.py' &
    92. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab scnet_r50_fpn_1x_coco configs/scnet/scnet_r50_fpn_1x_coco.py ./tools/work_dir/scnet_r50_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
    93. echo 'configs/yolof/yolof_r50_c5_8x8_1x_coco.py' &
    94. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab yolof_r50_c5_8x8_1x_coco configs/yolof/yolof_r50_c5_8x8_1x_coco.py ./tools/work_dir/yolof_r50_c5_8x8_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
    95. echo 'configs/carafe/mask_rcnn_r50_fpn_carafe_1x_coco.py' &
    96. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab mask_rcnn_r50_fpn_carafe_1x_coco configs/carafe/mask_rcnn_r50_fpn_carafe_1x_coco.py ./tools/work_dir/mask_rcnn_r50_fpn_carafe_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
    97. echo 'configs/dcn/faster_rcnn_r50_fpn_mdpool_1x_coco.py' &
    98. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab faster_rcnn_r50_fpn_mdpool_1x_coco configs/dcn/faster_rcnn_r50_fpn_mdpool_1x_coco.py ./tools/work_dir/faster_rcnn_r50_fpn_mdpool_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
    99. echo 'configs/dcn/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py' &
    100. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco configs/dcn/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py ./tools/work_dir/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
    101. echo 'configs/dcn/faster_rcnn_r50_fpn_dpool_1x_coco.py' &
    102. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab faster_rcnn_r50_fpn_dpool_1x_coco configs/dcn/faster_rcnn_r50_fpn_dpool_1x_coco.py ./tools/work_dir/faster_rcnn_r50_fpn_dpool_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
    103. echo 'configs/dcn/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py' &
    104. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco configs/dcn/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py ./tools/work_dir/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
    105. echo 'configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco.py' &
    106. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco.py ./tools/work_dir/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
    107. echo 'configs/gcnet/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco.py' &
    108. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco configs/gcnet/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco.py ./tools/work_dir/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
    109. echo 'configs/gn/mask_rcnn_r50_fpn_gn-all_2x_coco.py' &
    110. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab mask_rcnn_r50_fpn_gn-all_2x_coco configs/gn/mask_rcnn_r50_fpn_gn-all_2x_coco.py ./tools/work_dir/mask_rcnn_r50_fpn_gn-all_2x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
    111. echo 'configs/gn+ws/mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py' &
    112. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab mask_rcnn_r50_fpn_gn_ws-all_2x_coco configs/gn+ws/mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py ./tools/work_dir/mask_rcnn_r50_fpn_gn_ws-all_2x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
    113. echo 'configs/hrnet/mask_rcnn_hrnetv2p_w18_1x_coco.py' &
    114. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab mask_rcnn_hrnetv2p_w18_1x_coco configs/hrnet/mask_rcnn_hrnetv2p_w18_1x_coco.py ./tools/work_dir/mask_rcnn_hrnetv2p_w18_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
    115. echo 'configs/pafpn/faster_rcnn_r50_pafpn_1x_coco.py' &
    116. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab faster_rcnn_r50_pafpn_1x_coco configs/pafpn/faster_rcnn_r50_pafpn_1x_coco.py ./tools/work_dir/faster_rcnn_r50_pafpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
    117. echo 'configs/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco.py' &
    118. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab retinanet_r50_nasfpn_crop640_50e_coco configs/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco.py ./tools/work_dir/retinanet_r50_nasfpn_crop640_50e_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
    119. echo 'configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py' &
    120. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab mask_rcnn_regnetx-3.2GF_fpn_1x_coco configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py ./tools/work_dir/mask_rcnn_regnetx-3.2GF_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
    121. echo 'configs/resnest/mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco.py' &
    122. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco configs/resnest/mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco.py ./tools/work_dir/mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
    123. echo 'configs/res2net/faster_rcnn_r2_101_fpn_2x_coco.py' &
    124. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab faster_rcnn_r2_101_fpn_2x_coco configs/res2net/faster_rcnn_r2_101_fpn_2x_coco.py ./tools/work_dir/faster_rcnn_r2_101_fpn_2x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
    125. echo 'configs/groie/faster_rcnn_r50_fpn_groie_1x_coco.py' &
    126. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab faster_rcnn_r50_fpn_groie_1x_coco configs/groie/faster_rcnn_r50_fpn_groie_1x_coco.py ./tools/work_dir/faster_rcnn_r50_fpn_groie_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
    127. echo 'configs/cityscapes/mask_rcnn_r50_fpn_1x_cityscapes.py' &
    128. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab mask_rcnn_r50_fpn_1x_cityscapes configs/cityscapes/mask_rcnn_r50_fpn_1x_cityscapes.py ./tools/work_dir/mask_rcnn_r50_fpn_1x_cityscapes --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
    129. echo 'configs/panoptic_fpn/panoptic_fpn_r50_fpn_1x_coco.py' &
    130. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab panoptic_fpn_r50_fpn_1x_coco configs/panoptic_fpn/panoptic_fpn_r50_fpn_1x_coco.py ./tools/work_dir/panoptic_fpn_r50_fpn_1x_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
    131. echo 'configs/yolox/yolox_tiny_8x8_300e_coco.py' &
    132. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab yolox_tiny_8x8_300e_coco configs/yolox/yolox_tiny_8x8_300e_coco.py ./tools/work_dir/yolox_tiny_8x8_300e_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
    133. echo 'configs/ssd/ssdlite_mobilenetv2_scratch_600e_coco.py' &
    134. GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab ssdlite_mobilenetv2_scratch_600e_coco configs/ssd/ssdlite_mobilenetv2_scratch_600e_coco.py ./tools/work_dir/ssdlite_mobilenetv2_scratch_600e_coco --cfg-options checkpoint_config.max_keep_ckpts=1 >/dev/null &
    Discard

    Contributor Covenant Code of Conduct

    Our Pledge

    In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, religion, or sexual identity and orientation.

    Our Standards

    Examples of behavior that contributes to creating a positive environment include:

    • Using welcoming and inclusive language
    • Being respectful of differing viewpoints and experiences
    • Gracefully accepting constructive criticism
    • Focusing on what is best for the community
    • Showing empathy towards other community members

    Examples of unacceptable behavior by participants include:

    • The use of sexualized language or imagery and unwelcome sexual attention or advances
    • Trolling, insulting/derogatory comments, and personal or political attacks
    • Public or private harassment
    • Publishing others' private information, such as a physical or electronic address, without explicit permission
    • Other conduct which could reasonably be considered inappropriate in a professional setting

    Our Responsibilities

    Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.

    Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.

    Scope

    This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.

    Enforcement

    Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at chenkaidev@gmail.com. All complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.

    Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.

    Attribution

    This Code of Conduct is adapted from the Contributor Covenant, version 1.4, available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html

    For answers to common questions about this code of conduct, see https://www.contributor-covenant.org/faq

    Discard

    We appreciate all contributions to improve MMDetection. Please refer to CONTRIBUTING.md in MMCV for more details about the contributing guideline.

    Discard
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    77
    78
    79
    80
    81
    82
    83
    84
    85
    86
    87
    88
    89
    90
    91
    92
    93
    94
    95
    96
    97
    98
    99
    100
    101
    102
    103
    104
    105
    1. name: "🐞 Bug report"
    2. description: "Create a report to help us reproduce and fix the bug"
    3. labels: "kind/bug,status/unconfirmed"
    4. title: "[Bug] "
    5. body:
    6. - type: markdown
    7. attributes:
    8. value: |
    9. If you have already identified the reason, we strongly appreciate you creating a new PR to fix it [here](https://github.com/open-mmlab/mmdetection/pulls)!
    10. If this issue is about installing MMCV, please file an issue at [MMCV](https://github.com/open-mmlab/mmcv/issues/new/choose).
    11. If you need our help, please fill in as much of the following form as you're able to.
    12. **The less clear the description, the longer it will take to solve it.**
    13. - type: checkboxes
    14. attributes:
    15. label: Prerequisite
    16. description: Please check the following items before creating a new issue.
    17. options:
    18. - label: I have searched [Issues](https://github.com/open-mmlab/mmdetection/issues) and [Discussions](https://github.com/open-mmlab/mmdetection/discussions) but cannot get the expected help.
    19. required: true
    20. - label: I have read the [FAQ documentation](https://mmdetection.readthedocs.io/en/latest/faq.html) but cannot get the expected help.
    21. required: true
    22. - label: The bug has not been fixed in the [latest version (master)](https://github.com/open-mmlab/mmdetection) or [latest version (3.x)](https://github.com/open-mmlab/mmdetection/tree/dev-3.x).
    23. required: true
    24. - type: dropdown
    25. id: task
    26. attributes:
    27. label: Task
    28. description: The problem arises when
    29. options:
    30. - I'm using the official example scripts/configs for the officially supported tasks/models/datasets.
    31. - I have modified the scripts/configs, or I'm working on my own tasks/models/datasets.
    32. validations:
    33. required: true
    34. - type: dropdown
    35. id: branch
    36. attributes:
    37. label: Branch
    38. description: The problem arises when I'm working on
    39. options:
    40. - master branch https://github.com/open-mmlab/mmdetection
    41. - 3.x branch https://github.com/open-mmlab/mmdetection/tree/3.x
    42. validations:
    43. required: true
    44. - type: textarea
    45. attributes:
    46. label: Environment
    47. description: |
    48. Please run `python mmdet/utils/collect_env.py` to collect necessary environment information and copy-paste it here.
    49. You may add additional information that may be helpful for locating the problem, such as
    50. - How you installed PyTorch \[e.g., pip, conda, source\]
    51. - Other environment variables that may be related (such as `$PATH`, `$LD_LIBRARY_PATH`, `$PYTHONPATH`, etc.)
    52. validations:
    53. required: true
    54. - type: textarea
    55. attributes:
    56. label: Reproduces the problem - code sample
    57. description: |
    58. Please provide a code sample that reproduces the problem you ran into. It can be a Colab link or just a code snippet.
    59. placeholder: |
    60. ```python
    61. # Sample code to reproduce the problem
    62. ```
    63. validations:
    64. required: true
    65. - type: textarea
    66. attributes:
    67. label: Reproduces the problem - command or script
    68. description: |
    69. What command or script did you run?
    70. placeholder: |
    71. ```shell
    72. The command or script you run.
    73. ```
    74. validations:
    75. required: true
    76. - type: textarea
    77. attributes:
    78. label: Reproduces the problem - error message
    79. description: |
    80. Please provide the error message or logs you got, with the full traceback.
    81. placeholder: |
    82. ```
    83. The error message or logs you got, with the full traceback.
    84. ```
    85. validations:
    86. required: true
    87. - type: textarea
    88. attributes:
    89. label: Additional information
    90. description: Tell us anything else you think we should know.
    91. placeholder: |
    92. 1. What's your expected result?
    93. 2. What dataset did you use?
    94. 3. What do you think might be the reason?
    Discard
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    1. name: 🚀 Feature request
    2. description: Suggest an idea for this project
    3. labels: "kind/enhancement,status/unconfirmed"
    4. title: "[Feature] "
    5. body:
    6. - type: markdown
    7. attributes:
    8. value: |
    9. We strongly appreciate you creating a PR to implement this feature [here](https://github.com/open-mmlab/mmdetection/pulls)!
    10. If you need our help, please fill in as much of the following form as you're able to.
    11. **The less clear the description, the longer it will take to solve it.**
    12. - type: textarea
    13. attributes:
    14. label: What's the feature?
    15. description: |
    16. Tell us more about the feature and how this feature can help.
    17. placeholder: |
    18. E.g., It is inconvenient when \[....\].
    19. This feature can \[....\].
    20. validations:
    21. required: true
    22. - type: textarea
    23. attributes:
    24. label: Any other context?
    25. description: |
    26. Have you considered any alternative solutions or features? If so, what are they?
    27. Also, feel free to add any other context or screenshots about the feature request here.
    Discard
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    1. name: "\U0001F31F New model/dataset/scheduler addition"
    2. description: Submit a proposal/request to implement a new model / dataset / scheduler
    3. labels: "kind/feature,status/unconfirmed"
    4. title: "[New Models] "
    5. body:
    6. - type: textarea
    7. id: description-request
    8. validations:
    9. required: true
    10. attributes:
    11. label: Model/Dataset/Scheduler description
    12. description: |
    13. Put any and all important information relative to the model/dataset/scheduler
    14. - type: checkboxes
    15. attributes:
    16. label: Open source status
    17. description: |
    18. Please provide the open-source status, which would be very helpful
    19. options:
    20. - label: "The model implementation is available"
    21. - label: "The model weights are available."
    22. - type: textarea
    23. id: additional-info
    24. attributes:
    25. label: Provide useful links for the implementation
    26. description: |
    27. Please provide information regarding the implementation, the weights, and the authors.
    28. Please mention the authors by @gh-username if you're aware of their usernames.
    Discard
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    1. name: 📚 Documentation
    2. description: Report an issue related to the documentation.
    3. labels: "kind/doc,status/unconfirmed"
    4. title: "[Docs] "
    5. body:
    6. - type: dropdown
    7. id: branch
    8. attributes:
    9. label: Branch
    10. description: This issue is related to the
    11. options:
    12. - master branch https://mmdetection.readthedocs.io/en/latest/
    13. - 3.x branch https://mmdetection.readthedocs.io/en/3.x/
    14. validations:
    15. required: true
    16. - type: textarea
    17. attributes:
    18. label: 📚 The doc issue
    19. description: >
    20. A clear and concise description the issue.
    21. validations:
    22. required: true
    23. - type: textarea
    24. attributes:
    25. label: Suggest a potential alternative/fix
    26. description: >
    27. Tell us how we could improve the documentation in this regard.
    28. - type: markdown
    29. attributes:
    30. value: >
    31. Thanks for contributing 🎉!
    Discard
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    77
    78
    79
    80
    81
    82
    83
    84
    85
    86
    87
    88
    89
    1. name: "💥 Reimplementation Questions"
    2. description: "Ask about questions during model reimplementation"
    3. labels: "kind/enhancement,status/unconfirmed"
    4. title: "[Reimplementation] "
    5. body:
    6. - type: markdown
    7. attributes:
    8. value: |
    9. We strongly appreciate you creating a PR to implement this feature [here](https://github.com/open-mmlab/mmdetection/pulls)!
    10. If you need our help, please fill in as much of the following form as you're able to.
    11. **The less clear the description, the longer it will take to solve it.**
    12. - type: checkboxes
    13. attributes:
    14. label: Prerequisite
    15. description: Please check the following items before creating a new issue.
    16. options:
    17. - label: I have searched [Issues](https://github.com/open-mmlab/mmdetection/issues) and [Discussions](https://github.com/open-mmlab/mmdetection/discussions) but cannot get the expected help.
    18. required: true
    19. - label: I have read the [FAQ documentation](https://mmdetection.readthedocs.io/en/latest/faq.html) but cannot get the expected help.
    20. required: true
    21. - label: The bug has not been fixed in the [latest version (master)](https://github.com/open-mmlab/mmdetection) or [latest version (3.x)](https://github.com/open-mmlab/mmdetection/tree/dev-3.x).
    22. required: true
    23. - type: textarea
    24. attributes:
    25. label: 💬 Describe the reimplementation questions
    26. description: |
    27. A clear and concise description of what the problem you meet and what have you done.
    28. There are several common situations in the reimplementation issues as below
    29. 1. Reimplement a model in the model zoo using the provided configs
    30. 2. Reimplement a model in the model zoo on other dataset (e.g., custom datasets)
    31. 3. Reimplement a custom model but all the components are implemented in MMDetection
    32. 4. Reimplement a custom model with new modules implemented by yourself
    33. There are several things to do for different cases as below.
    34. - For case 1 & 3, please follow the steps in the following sections thus we could help to quick identify the issue.
    35. - For case 2 & 4, please understand that we are not able to do much help here because we usually do not know the full code and the users should be responsible to the code they write.
    36. - One suggestion for case 2 & 4 is that the users should first check whether the bug lies in the self-implemented code or the original code. For example, users can first make sure that the same model runs well on supported datasets. If you still need help, please describe what you have done and what you obtain in the issue, and follow the steps in the following sections and try as clear as possible so that we can better help you.
    37. placeholder: |
    38. A clear and concise description of what the bug is.
    39. What config dir you run?
    40. ```none
    41. A placeholder for the config.
    42. ```
    43. ```shell
    44. The command or script you run.
    45. ```
    46. ```
    47. The error message or logs you got, with the full traceback.
    48. ```
    49. validations:
    50. required: true
    51. - type: textarea
    52. attributes:
    53. label: Environment
    54. description: |
    55. Please run `python mmdet/utils/collect_env.py` to collect necessary environment information and paste it here.
    56. You may add addition that may be helpful for locating the problem, such as
    57. - How you installed PyTorch \[e.g., pip, conda, source\]
    58. - Other environment variables that may be related (such as `$PATH`, `$LD_LIBRARY_PATH`, `$PYTHONPATH`, etc.)
    59. validations:
    60. required: true
    61. - type: textarea
    62. attributes:
    63. label: Expected results
    64. description: If applicable, paste the related results here, e.g., what you expect and what you get.
    65. placeholder: |
    66. ```none
    67. A placeholder for results comparison
    68. ```
    69. - type: textarea
    70. attributes:
    71. label: Additional information
    72. description: Tell us anything else you think we should know.
    73. placeholder: |
    74. 1. Did you make any modifications on the code or config? Did you understand what you have modified?
    75. 2. What dataset did you use?
    76. 3. What do you think might be the reason?
    Discard
    1
    2
    3
    4
    5
    6
    7
    8
    9
    1. blank_issues_enabled: true
    2. contact_links:
    3. - name: 💬 Forum
    4. url: https://github.com/open-mmlab/mmdetection/discussions
    5. about: Ask general usage questions and discuss with other MMDetection community members
    6. - name: 🌐 Explore OpenMMLab
    7. url: https://openmmlab.com/
    8. about: Get know more about OpenMMLab
    Discard

    Thanks for your contribution and we appreciate it a lot. The following instructions would make your pull request more healthy and more easily get feedback. If you do not understand some items, don't worry, just make the pull request and seek help from maintainers.

    Motivation

    Please describe the motivation of this PR and the goal you want to achieve through this PR.

    Modification

    Please briefly describe what modification is made in this PR.

    BC-breaking (Optional)

    Does the modification introduce changes that break the backward-compatibility of the downstream repos? If so, please describe how it breaks the compatibility and how the downstream projects should modify their code to keep compatibility with this PR.

    Use cases (Optional)

    If this PR introduces a new feature, it is better to list some use cases here, and update the documentation.

    Checklist

    1. Pre-commit or other linting tools are used to fix the potential lint issues.
    2. The modification is covered by complete unit tests. If not, please add more unit test to ensure the correctness.
    3. If the modification has potential influence on downstream projects, this PR should be tested with downstream projects, like MMDet or MMCls.
    4. The documentation has been modified accordingly, like docstring or example tutorials.
    Discard
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    77
    78
    79
    80
    81
    82
    83
    84
    85
    86
    87
    88
    89
    90
    91
    92
    93
    94
    95
    96
    97
    98
    99
    100
    101
    102
    103
    104
    105
    106
    107
    108
    109
    110
    111
    112
    113
    114
    115
    116
    117
    118
    119
    120
    121
    122
    123
    124
    125
    126
    127
    128
    129
    130
    131
    132
    133
    134
    135
    136
    137
    138
    139
    140
    141
    142
    143
    144
    145
    146
    147
    148
    149
    150
    151
    152
    153
    154
    155
    156
    157
    158
    159
    160
    161
    162
    163
    164
    165
    166
    167
    168
    169
    170
    171
    172
    173
    174
    175
    176
    177
    178
    179
    180
    181
    182
    183
    184
    185
    186
    187
    188
    189
    190
    191
    192
    193
    194
    195
    196
    197
    198
    199
    200
    201
    202
    203
    204
    205
    206
    207
    208
    209
    210
    211
    212
    213
    214
    215
    216
    217
    218
    219
    220
    221
    222
    223
    224
    225
    226
    227
    228
    229
    230
    231
    232
    233
    234
    235
    236
    237
    238
    239
    240
    241
    242
    243
    244
    245
    246
    247
    248
    249
    250
    251
    252
    253
    254
    255
    256
    257
    258
    259
    260
    261
    262
    263
    264
    265
    266
    267
    268
    269
    270
    271
    272
    273
    274
    275
    276
    277
    278
    279
    280
    281
    282
    283
    284
    285
    286
    1. name: build
    2. on:
    3. push:
    4. paths-ignore:
    5. - ".dev_scripts/**"
    6. - ".github/**.md"
    7. - "demo/**"
    8. - "docker/**"
    9. - "tools/**"
    10. - "README.md"
    11. - "README_zh-CN.md"
    12. pull_request:
    13. paths-ignore:
    14. - ".dev_scripts/**"
    15. - ".github/**.md"
    16. - "demo/**"
    17. - "docker/**"
    18. - "docs/**"
    19. - "docs_zh-CN/**"
    20. - "tools/**"
    21. - "README.md"
    22. - "README_zh-CN.md"
    23. concurrency:
    24. group: ${{ github.workflow }}-${{ github.ref }}
    25. cancel-in-progress: true
    26. permissions:
    27. contents: read
    28. jobs:
    29. build_cpu:
    30. runs-on: ubuntu-18.04
    31. strategy:
    32. matrix:
    33. python-version: [3.7]
    34. torch: [1.5.1, 1.6.0, 1.7.0, 1.8.0, 1.9.0, 1.10.1]
    35. include:
    36. - torch: 1.5.1
    37. torchvision: 0.6.1
    38. mmcv: 1.5
    39. - torch: 1.6.0
    40. torchvision: 0.7.0
    41. mmcv: 1.6
    42. - torch: 1.7.0
    43. torchvision: 0.8.1
    44. mmcv: 1.7
    45. - torch: 1.8.0
    46. torchvision: 0.9.0
    47. mmcv: 1.8
    48. - torch: 1.9.0
    49. torchvision: 0.10.0
    50. mmcv: 1.9
    51. - torch: 1.10.1
    52. torchvision: 0.11.2
    53. mmcv: "1.10"
    54. steps:
    55. - uses: actions/checkout@v2
    56. - name: Set up Python ${{ matrix.python-version }}
    57. uses: actions/setup-python@v2
    58. with:
    59. python-version: ${{ matrix.python-version }}
    60. - name: Install PyTorch
    61. run: pip install torch==${{matrix.torch}}+cpu torchvision==${{matrix.torchvision}}+cpu -f https://download.pytorch.org/whl/torch_stable.html
    62. - name: Install MMCV
    63. run: |
    64. pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/cpu/torch${{matrix.mmcv}}/index.html
    65. python -c 'import mmcv; print(mmcv.__version__)'
    66. - name: Install unittest dependencies
    67. run: |
    68. pip install -r requirements/tests.txt -r requirements/optional.txt
    69. pip install albumentations>=0.3.2 --no-binary imgaug,albumentations
    70. pip install git+https://github.com/cocodataset/panopticapi.git
    71. - name: Build and install
    72. run: rm -rf .eggs && pip install -e .
    73. - name: Run unittests and generate coverage report
    74. run: |
    75. coverage run --branch --source mmdet -m pytest tests/
    76. coverage xml
    77. coverage report -m
    78. build_cuda101:
    79. runs-on: ubuntu-18.04
    80. container:
    81. image: pytorch/pytorch:1.6.0-cuda10.1-cudnn7-devel
    82. strategy:
    83. matrix:
    84. python-version: [3.7]
    85. torch: [1.5.1+cu101, 1.6.0+cu101, 1.7.0+cu101, 1.8.0+cu101]
    86. include:
    87. - torch: 1.5.1+cu101
    88. torch_version: torch1.5.1
    89. torchvision: 0.6.1+cu101
    90. mmcv: 1.5
    91. - torch: 1.6.0+cu101
    92. torch_version: torch1.6.0
    93. torchvision: 0.7.0+cu101
    94. mmcv: 1.6
    95. - torch: 1.7.0+cu101
    96. torch_version: torch1.7.0
    97. torchvision: 0.8.1+cu101
    98. mmcv: 1.7
    99. - torch: 1.8.0+cu101
    100. torch_version: torch1.8.0
    101. torchvision: 0.9.0+cu101
    102. mmcv: 1.8
    103. steps:
    104. - uses: actions/checkout@v2
    105. - name: Set up Python ${{ matrix.python-version }}
    106. uses: actions/setup-python@v2
    107. with:
    108. python-version: ${{ matrix.python-version }}
    109. - name: Fetch GPG keys
    110. run: |
    111. apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/3bf863cc.pub
    112. apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64/7fa2af80.pub
    113. - name: Install system dependencies
    114. run: |
    115. apt-get update && apt-get install -y ffmpeg libsm6 libxext6 git ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 python${{matrix.python-version}}-dev
    116. apt-get clean
    117. rm -rf /var/lib/apt/lists/*
    118. - name: Install PyTorch
    119. run: python -m pip install torch==${{matrix.torch}} torchvision==${{matrix.torchvision}} -f https://download.pytorch.org/whl/torch_stable.html
    120. - name: Install dependencies for compiling onnx when python=3.9
    121. run: python -m pip install "protobuf <= 3.20.1" && apt-get install libprotobuf-dev protobuf-compiler
    122. if: ${{matrix.python-version == '3.9'}}
    123. - name: Install mmdet dependencies
    124. run: |
    125. python -V
    126. export CFLAGS=`python -c 'import sysconfig;print("-I"+sysconfig.get_paths()["include"])'`
    127. export CXXFLAGS="${CFLAGS}"
    128. python -m pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/cu101/torch${{matrix.mmcv}}/index.html
    129. python -m pip install pycocotools
    130. python -m pip install -r requirements/tests.txt -r requirements/optional.txt
    131. python -m pip install albumentations>=0.3.2 --no-binary imgaug,albumentations
    132. python -m pip install git+https://github.com/cocodataset/panopticapi.git
    133. python -c 'import mmcv; print(mmcv.__version__)'
    134. - name: Build and install
    135. run: |
    136. rm -rf .eggs
    137. python setup.py check -m -s
    138. TORCH_CUDA_ARCH_LIST=7.0 pip install .
    139. - name: Run unittests and generate coverage report
    140. run: |
    141. coverage run --branch --source mmdet -m pytest tests/
    142. coverage xml
    143. coverage report -m
    144. - name: Upload coverage to Codecov
    145. uses: codecov/codecov-action@v1.0.10
    146. with:
    147. file: ./coverage.xml
    148. flags: unittests
    149. env_vars: OS,PYTHON
    150. name: codecov-umbrella
    151. fail_ci_if_error: false
    152. build_cuda102:
    153. runs-on: ubuntu-18.04
    154. container:
    155. image: pytorch/pytorch:1.9.0-cuda10.2-cudnn7-devel
    156. strategy:
    157. matrix:
    158. python-version: [3.7, 3.8, 3.9]
    159. torch: [1.9.0+cu102, 1.10.1+cu102]
    160. include:
    161. - torch: 1.9.0+cu102
    162. torch_version: torch1.9.0
    163. torchvision: 0.10.0+cu102
    164. mmcv: 1.9
    165. - torch: 1.10.1+cu102
    166. torch_version: torch1.10.1
    167. torchvision: 0.11.2+cu102
    168. mmcv: "1.10"
    169. steps:
    170. - uses: actions/checkout@v2
    171. - name: Set up Python ${{ matrix.python-version }}
    172. uses: actions/setup-python@v2
    173. with:
    174. python-version: ${{ matrix.python-version }}
    175. - name: Fetch GPG keys
    176. run: |
    177. apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/3bf863cc.pub
    178. apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64/7fa2af80.pub
    179. # Add ppa source repo for python3.9.
    180. - name: Add python3.9 source
    181. run: |
    182. apt-get update && apt-get install -y software-properties-common
    183. add-apt-repository -y ppa:deadsnakes/ppa
    184. if: ${{matrix.python-version == '3.9'}}
    185. # Install python-dev for some packages which require libpython3.Xm.
    186. # Github's setup-python cannot install python3.9-dev, so we have to use apt install.
    187. # Set DEBIAN_FRONTEND=noninteractive to avoid some interactions.
    188. - name: Install python-dev
    189. run: apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends python${{matrix.python-version}}-dev
    190. - name: Install system dependencies
    191. run: |
    192. apt-get update && apt-get install -y ffmpeg libsm6 libxext6 git ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6
    193. apt-get clean
    194. rm -rf /var/lib/apt/lists/*
    195. - name: Install PyTorch
    196. run: python -m pip install torch==${{matrix.torch}} torchvision==${{matrix.torchvision}} -f https://download.pytorch.org/whl/torch_stable.html
    197. - name: Install dependencies for compiling onnx when python=3.9
    198. run: python -m pip install "protobuf <= 3.20.1" && apt-get update && apt-get -y install libprotobuf-dev protobuf-compiler cmake
    199. if: ${{matrix.python-version == '3.9'}}
    200. - name: Install mmdet dependencies
    201. run: |
    202. python -V
    203. export CFLAGS=`python -c 'import sysconfig;print("-I"+sysconfig.get_paths()["include"])'`
    204. export CXXFLAGS="${CFLAGS}"
    205. python -m pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/cu102/torch${{matrix.mmcv}}/index.html
    206. python -m pip install pycocotools
    207. python -m pip install -r requirements/tests.txt -r requirements/optional.txt
    208. python -m pip install albumentations>=0.3.2 --no-binary imgaug,albumentations
    209. python -m pip install git+https://github.com/cocodataset/panopticapi.git
    210. python -c 'import mmcv; print(mmcv.__version__)'
    211. - name: Build and install
    212. run: |
    213. rm -rf .eggs
    214. python setup.py check -m -s
    215. TORCH_CUDA_ARCH_LIST=7.0 pip install .
    216. - name: Run unittests and generate coverage report
    217. run: |
    218. coverage run --branch --source mmdet -m pytest tests/
    219. coverage xml
    220. coverage report -m
    221. - name: Upload coverage to Codecov
    222. uses: codecov/codecov-action@v2
    223. with:
    224. files: ./coverage.xml
    225. flags: unittests
    226. env_vars: OS,PYTHON
    227. name: codecov-umbrella
    228. fail_ci_if_error: false
    229. build_windows:
    230. runs-on: ${{ matrix.os }}
    231. strategy:
    232. matrix:
    233. os: [windows-2022]
    234. python: [3.8]
    235. platform: [cpu, cu111]
    236. steps:
    237. - uses: actions/checkout@v2
    238. - name: Set up Python ${{ matrix.python }}
    239. uses: actions/setup-python@v2
    240. with:
    241. python-version: ${{ matrix.python }}
    242. - name: Upgrade pip
    243. run: python -m pip install pip --upgrade --user
    244. - name: Install PyTorch
    245. # As a complement to Linux CI, we test on PyTorch LTS version
    246. run: pip install torch==1.8.2+${{ matrix.platform }} torchvision==0.9.2+${{ matrix.platform }} -f https://download.pytorch.org/whl/lts/1.8/torch_lts.html
    247. - name: Install MMCV
    248. run: pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/cpu/torch1.8/index.html --only-binary mmcv-full
    249. - name: Install unittest dependencies
    250. run: |
    251. python -V
    252. python -m pip install pycocotools
    253. python -m pip install -r requirements/tests.txt -r requirements/optional.txt
    254. python -m pip install albumentations>=0.3.2 --no-binary imgaug,albumentations
    255. python -m pip install git+https://github.com/cocodataset/panopticapi.git
    256. python -c 'import mmcv; print(mmcv.__version__)'
    257. - name: Show pip list
    258. run: pip list
    259. - name: Build and install
    260. run: pip install -e .
    261. - name: Run unittests
    262. run: coverage run --branch --source mmdet -m pytest tests
    263. - name: Generate coverage report
    264. run: |
    265. coverage xml
    266. coverage report -m
    267. - name: Upload coverage to Codecov
    268. uses: codecov/codecov-action@v2
    269. with:
    270. file: ./coverage.xml
    271. flags: unittests
    272. env_vars: OS,PYTHON
    273. name: codecov-umbrella
    274. fail_ci_if_error: false
    Discard
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    1. name: build_pat
    2. on: push
    3. concurrency:
    4. group: ${{ github.workflow }}-${{ github.ref }}
    5. cancel-in-progress: true
    6. permissions:
    7. contents: read
    8. jobs:
    9. build_parrots:
    10. runs-on: ubuntu-latest
    11. container:
    12. image: ghcr.io/zhouzaida/parrots-mmcv:1.3.4
    13. credentials:
    14. username: zhouzaida
    15. password: ${{ secrets.CR_PAT }}
    16. steps:
    17. - uses: actions/checkout@v2
    18. - name: Install mmdet dependencies
    19. run: |
    20. git clone https://github.com/open-mmlab/mmcv.git && cd mmcv
    21. MMCV_WITH_OPS=1 python setup.py install
    22. cd .. && rm -rf mmcv
    23. python -c 'import mmcv; print(mmcv.__version__)'
    24. pip install -r requirements.txt
    25. - name: Build and install
    26. run: rm -rf .eggs && pip install -e .
    Discard
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    1. name: deploy
    2. on: push
    3. concurrency:
    4. group: ${{ github.workflow }}-${{ github.ref }}
    5. cancel-in-progress: true
    6. permissions:
    7. contents: read
    8. jobs:
    9. build-n-publish:
    10. runs-on: ubuntu-latest
    11. if: startsWith(github.event.ref, 'refs/tags')
    12. steps:
    13. - uses: actions/checkout@v2
    14. - name: Set up Python 3.7
    15. uses: actions/setup-python@v2
    16. with:
    17. python-version: 3.7
    18. - name: Install torch
    19. run: pip install torch
    20. - name: Install wheel
    21. run: pip install wheel
    22. - name: Build MMDetection
    23. run: python setup.py sdist bdist_wheel
    24. - name: Publish distribution to PyPI
    25. run: |
    26. pip install twine
    27. twine upload dist/* -u __token__ -p ${{ secrets.pypi_password }}
    Discard
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    1. name: lint
    2. on: [push, pull_request]
    3. concurrency:
    4. group: ${{ github.workflow }}-${{ github.ref }}
    5. cancel-in-progress: true
    6. permissions:
    7. contents: read
    8. jobs:
    9. lint:
    10. runs-on: ubuntu-latest
    11. steps:
    12. - uses: actions/checkout@v2
    13. - name: Set up Python 3.7
    14. uses: actions/setup-python@v2
    15. with:
    16. python-version: 3.7
    17. - name: Install pre-commit hook
    18. run: |
    19. pip install pre-commit
    20. pre-commit install
    21. - name: Linting
    22. run: pre-commit run --all-files
    23. - name: Check docstring coverage
    24. run: |
    25. pip install interrogate
    26. interrogate -v --ignore-init-method --ignore-module --ignore-nested-functions --ignore-regex "__repr__" --fail-under 80 mmdet
    Discard
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    1. name: 'Close stale issues and PRs'
    2. on:
    3. schedule:
    4. # check issue and pull request once every day
    5. - cron: '25 11 * * *'
    6. permissions:
    7. contents: read
    8. jobs:
    9. invalid-stale-close:
    10. permissions:
    11. issues: write
    12. pull-requests: write
    13. runs-on: ubuntu-latest
    14. steps:
    15. - uses: actions/stale@v4
    16. with:
    17. stale-issue-message: 'This issue is marked as stale because it has been marked as invalid or awaiting response for 7 days without any further response. It will be closed in 5 days if the stale label is not removed or if there is no further response.'
    18. stale-pr-message: 'This PR is marked as stale because there has been no activity in the past 45 days. It will be closed in 10 days if the stale label is not removed or if there is no further updates.'
    19. close-issue-message: 'This issue is closed because it has been stale for 5 days. Please open a new issue if you have similar issues or you have any new updates now.'
    20. close-pr-message: 'This PR is closed because it has been stale for 10 days. Please reopen this PR if you have any updates and want to keep contributing the code.'
    21. # only issues/PRS with any of invalid and awaiting response labels are checked
    22. any-of-labels: 'invalid, awaiting response'
    23. days-before-issue-stale: 7
    24. days-before-pr-stale: 45
    25. days-before-issue-close: 5
    26. days-before-pr-close: 10
    27. # automatically remove the stale label when the issues or the pull reqquests are updated or commented
    28. remove-stale-when-updated: true
    Discard
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    1. name: test-mim
    2. on:
    3. push:
    4. paths:
    5. - 'model-index.yml'
    6. - 'configs/**'
    7. pull_request:
    8. paths:
    9. - 'model-index.yml'
    10. - 'configs/**'
    11. concurrency:
    12. group: ${{ github.workflow }}-${{ github.ref }}
    13. cancel-in-progress: true
    14. permissions:
    15. contents: read
    16. jobs:
    17. build_cpu:
    18. runs-on: ubuntu-18.04
    19. strategy:
    20. matrix:
    21. python-version: [3.7]
    22. torch: [1.8.0]
    23. include:
    24. - torch: 1.8.0
    25. torch_version: torch1.8
    26. torchvision: 0.9.0
    27. steps:
    28. - uses: actions/checkout@v2
    29. - name: Set up Python ${{ matrix.python-version }}
    30. uses: actions/setup-python@v2
    31. with:
    32. python-version: ${{ matrix.python-version }}
    33. - name: Upgrade pip
    34. run: pip install pip --upgrade
    35. - name: Install Pillow
    36. run: pip install Pillow==6.2.2
    37. if: ${{matrix.torchvision == '0.4.2'}}
    38. - name: Install PyTorch
    39. run: pip install torch==${{matrix.torch}}+cpu torchvision==${{matrix.torchvision}}+cpu -f https://download.pytorch.org/whl/torch_stable.html
    40. - name: Install openmim
    41. run: pip install openmim
    42. - name: Build and install
    43. run: rm -rf .eggs && mim install -e .
    44. - name: test commands of mim
    45. run: mim search mmdet
    Discard
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    77
    78
    79
    80
    81
    82
    83
    84
    85
    86
    87
    88
    89
    90
    91
    92
    93
    94
    95
    96
    97
    98
    99
    100
    101
    102
    103
    104
    105
    106
    107
    108
    109
    110
    111
    112
    113
    114
    115
    116
    117
    118
    119
    120
    121
    122
    123
    124
    1. # Byte-compiled / optimized / DLL files
    2. __pycache__/
    3. *.py[cod]
    4. *$py.class
    5. # C extensions
    6. *.so
    7. # Distribution / packaging
    8. .Python
    9. build/
    10. develop-eggs/
    11. dist/
    12. downloads/
    13. eggs/
    14. .eggs/
    15. lib/
    16. lib64/
    17. parts/
    18. sdist/
    19. var/
    20. wheels/
    21. *.egg-info/
    22. .installed.cfg
    23. *.egg
    24. MANIFEST
    25. # PyInstaller
    26. # Usually these files are written by a python script from a template
    27. # before PyInstaller builds the exe, so as to inject date/other infos into it.
    28. *.manifest
    29. *.spec
    30. # Installer logs
    31. pip-log.txt
    32. pip-delete-this-directory.txt
    33. # Unit test / coverage reports
    34. htmlcov/
    35. .tox/
    36. .coverage
    37. .coverage.*
    38. .cache
    39. nosetests.xml
    40. coverage.xml
    41. *.cover
    42. .hypothesis/
    43. .pytest_cache/
    44. # Translations
    45. *.mo
    46. *.pot
    47. # Django stuff:
    48. *.log
    49. local_settings.py
    50. db.sqlite3
    51. # Flask stuff:
    52. instance/
    53. .webassets-cache
    54. # Scrapy stuff:
    55. .scrapy
    56. # Sphinx documentation
    57. docs/en/_build/
    58. docs/zh_cn/_build/
    59. # PyBuilder
    60. target/
    61. # Jupyter Notebook
    62. .ipynb_checkpoints
    63. # pyenv
    64. .python-version
    65. # celery beat schedule file
    66. celerybeat-schedule
    67. # SageMath parsed files
    68. *.sage.py
    69. # Environments
    70. .env
    71. .venv
    72. env/
    73. venv/
    74. ENV/
    75. env.bak/
    76. venv.bak/
    77. # Spyder project settings
    78. .spyderproject
    79. .spyproject
    80. # Rope project settings
    81. .ropeproject
    82. # mkdocs documentation
    83. /site
    84. # mypy
    85. .mypy_cache/
    86. data/
    87. data
    88. .vscode
    89. .idea
    90. .DS_Store
    91. # custom
    92. *.pkl
    93. *.pkl.json
    94. *.log.json
    95. docs/modelzoo_statistics.md
    96. mmdet/.mim
    97. work_dirs/
    98. # Pytorch
    99. *.pth
    100. *.py~
    101. *.sh~
    Discard
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    1. assign:
    2. strategy:
    3. # random
    4. daily-shift-based
    5. scedule: "*/1 * * * *"
    6. assignees:
    7. - Czm369
    8. - hhaAndroid
    9. - zytx121
    10. - RangiLyu
    11. - BIGWangYuDong
    12. - chhluo
    13. - ZwwWayne
    Discard
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    1. repos:
    2. - repo: https://github.com/PyCQA/flake8
    3. rev: 5.0.4
    4. hooks:
    5. - id: flake8
    6. - repo: https://github.com/PyCQA/isort
    7. rev: 5.11.5
    8. hooks:
    9. - id: isort
    10. - repo: https://github.com/pre-commit/mirrors-yapf
    11. rev: v0.32.0
    12. hooks:
    13. - id: yapf
    14. - repo: https://github.com/pre-commit/pre-commit-hooks
    15. rev: v4.3.0
    16. hooks:
    17. - id: trailing-whitespace
    18. - id: check-yaml
    19. - id: end-of-file-fixer
    20. - id: requirements-txt-fixer
    21. - id: double-quote-string-fixer
    22. - id: check-merge-conflict
    23. - id: fix-encoding-pragma
    24. args: ["--remove"]
    25. - id: mixed-line-ending
    26. args: ["--fix=lf"]
    27. - repo: https://github.com/codespell-project/codespell
    28. rev: v2.2.1
    29. hooks:
    30. - id: codespell
    31. - repo: https://github.com/executablebooks/mdformat
    32. rev: 0.7.9
    33. hooks:
    34. - id: mdformat
    35. args: ["--number"]
    36. additional_dependencies:
    37. - mdformat-openmmlab
    38. - mdformat_frontmatter
    39. - linkify-it-py
    40. - repo: https://github.com/myint/docformatter
    41. rev: v1.3.1
    42. hooks:
    43. - id: docformatter
    44. args: ["--in-place", "--wrap-descriptions", "79"]
    45. - repo: https://github.com/open-mmlab/pre-commit-hooks
    46. rev: v0.2.0 # Use the ref you want to point at
    47. hooks:
    48. - id: check-algo-readme
    49. - id: check-copyright
    50. args: ["mmdet"] # replace the dir_to_check with your expected directory to check
    Discard
    1
    2
    3
    4
    5
    6
    7
    8
    9
    1. version: 2
    2. formats: all
    3. python:
    4. version: 3.7
    5. install:
    6. - requirements: requirements/docs.txt
    7. - requirements: requirements/readthedocs.txt
    Discard
    1
    2
    3
    4
    5
    6
    7
    8
    1. cff-version: 1.2.0
    2. message: "If you use this software, please cite it as below."
    3. authors:
    4. - name: "MMDetection Contributors"
    5. title: "OpenMMLab Detection Toolbox and Benchmark"
    6. date-released: 2018-08-22
    7. url: "https://github.com/open-mmlab/mmdetection"
    8. license: Apache-2.0
    Discard
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    77
    78
    79
    80
    81
    82
    83
    84
    85
    86
    87
    88
    89
    90
    91
    92
    93
    94
    95
    96
    97
    98
    99
    100
    101
    102
    103
    104
    105
    106
    107
    108
    109
    110
    111
    112
    113
    114
    115
    116
    117
    118
    119
    120
    121
    122
    123
    124
    125
    126
    127
    128
    129
    130
    131
    132
    133
    134
    135
    136
    137
    138
    139
    140
    141
    142
    143
    144
    145
    146
    147
    148
    149
    150
    151
    152
    153
    154
    155
    156
    157
    158
    159
    160
    161
    162
    163
    164
    165
    166
    167
    168
    169
    170
    171
    172
    173
    174
    175
    176
    177
    178
    179
    180
    181
    182
    183
    184
    185
    186
    187
    188
    189
    190
    191
    192
    193
    194
    195
    196
    197
    198
    199
    200
    201
    202
    203
    1. Copyright 2018-2023 OpenMMLab. All rights reserved.
    2. Apache License
    3. Version 2.0, January 2004
    4. http://www.apache.org/licenses/
    5. TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
    6. 1. Definitions.
    7. "License" shall mean the terms and conditions for use, reproduction,
    8. and distribution as defined by Sections 1 through 9 of this document.
    9. "Licensor" shall mean the copyright owner or entity authorized by
    10. the copyright owner that is granting the License.
    11. "Legal Entity" shall mean the union of the acting entity and all
    12. other entities that control, are controlled by, or are under common
    13. control with that entity. For the purposes of this definition,
    14. "control" means (i) the power, direct or indirect, to cause the
    15. direction or management of such entity, whether by contract or
    16. otherwise, or (ii) ownership of fifty percent (50%) or more of the
    17. outstanding shares, or (iii) beneficial ownership of such entity.
    18. "You" (or "Your") shall mean an individual or Legal Entity
    19. exercising permissions granted by this License.
    20. "Source" form shall mean the preferred form for making modifications,
    21. including but not limited to software source code, documentation
    22. source, and configuration files.
    23. "Object" form shall mean any form resulting from mechanical
    24. transformation or translation of a Source form, including but
    25. not limited to compiled object code, generated documentation,
    26. and conversions to other media types.
    27. "Work" shall mean the work of authorship, whether in Source or
    28. Object form, made available under the License, as indicated by a
    29. copyright notice that is included in or attached to the work
    30. (an example is provided in the Appendix below).
    31. "Derivative Works" shall mean any work, whether in Source or Object
    32. form, that is based on (or derived from) the Work and for which the
    33. editorial revisions, annotations, elaborations, or other modifications
    34. represent, as a whole, an original work of authorship. For the purposes
    35. of this License, Derivative Works shall not include works that remain
    36. separable from, or merely link (or bind by name) to the interfaces of,
    37. the Work and Derivative Works thereof.
    38. "Contribution" shall mean any work of authorship, including
    39. the original version of the Work and any modifications or additions
    40. to that Work or Derivative Works thereof, that is intentionally
    41. submitted to Licensor for inclusion in the Work by the copyright owner
    42. or by an individual or Legal Entity authorized to submit on behalf of
    43. the copyright owner. For the purposes of this definition, "submitted"
    44. means any form of electronic, verbal, or written communication sent
    45. to the Licensor or its representatives, including but not limited to
    46. communication on electronic mailing lists, source code control systems,
    47. and issue tracking systems that are managed by, or on behalf of, the
    48. Licensor for the purpose of discussing and improving the Work, but
    49. excluding communication that is conspicuously marked or otherwise
    50. designated in writing by the copyright owner as "Not a Contribution."
    51. "Contributor" shall mean Licensor and any individual or Legal Entity
    52. on behalf of whom a Contribution has been received by Licensor and
    53. subsequently incorporated within the Work.
    54. 2. Grant of Copyright License. Subject to the terms and conditions of
    55. this License, each Contributor hereby grants to You a perpetual,
    56. worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    57. copyright license to reproduce, prepare Derivative Works of,
    58. publicly display, publicly perform, sublicense, and distribute the
    59. Work and such Derivative Works in Source or Object form.
    60. 3. Grant of Patent License. Subject to the terms and conditions of
    61. this License, each Contributor hereby grants to You a perpetual,
    62. worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    63. (except as stated in this section) patent license to make, have made,
    64. use, offer to sell, sell, import, and otherwise transfer the Work,
    65. where such license applies only to those patent claims licensable
    66. by such Contributor that are necessarily infringed by their
    67. Contribution(s) alone or by combination of their Contribution(s)
    68. with the Work to which such Contribution(s) was submitted. If You
    69. institute patent litigation against any entity (including a
    70. cross-claim or counterclaim in a lawsuit) alleging that the Work
    71. or a Contribution incorporated within the Work constitutes direct
    72. or contributory patent infringement, then any patent licenses
    73. granted to You under this License for that Work shall terminate
    74. as of the date such litigation is filed.
    75. 4. Redistribution. You may reproduce and distribute copies of the
    76. Work or Derivative Works thereof in any medium, with or without
    77. modifications, and in Source or Object form, provided that You
    78. meet the following conditions:
    79. (a) You must give any other recipients of the Work or
    80. Derivative Works a copy of this License; and
    81. (b) You must cause any modified files to carry prominent notices
    82. stating that You changed the files; and
    83. (c) You must retain, in the Source form of any Derivative Works
    84. that You distribute, all copyright, patent, trademark, and
    85. attribution notices from the Source form of the Work,
    86. excluding those notices that do not pertain to any part of
    87. the Derivative Works; and
    88. (d) If the Work includes a "NOTICE" text file as part of its
    89. distribution, then any Derivative Works that You distribute must
    90. include a readable copy of the attribution notices contained
    91. within such NOTICE file, excluding those notices that do not
    92. pertain to any part of the Derivative Works, in at least one
    93. of the following places: within a NOTICE text file distributed
    94. as part of the Derivative Works; within the Source form or
    95. documentation, if provided along with the Derivative Works; or,
    96. within a display generated by the Derivative Works, if and
    97. wherever such third-party notices normally appear. The contents
    98. of the NOTICE file are for informational purposes only and
    99. do not modify the License. You may add Your own attribution
    100. notices within Derivative Works that You distribute, alongside
    101. or as an addendum to the NOTICE text from the Work, provided
    102. that such additional attribution notices cannot be construed
    103. as modifying the License.
    104. You may add Your own copyright statement to Your modifications and
    105. may provide additional or different license terms and conditions
    106. for use, reproduction, or distribution of Your modifications, or
    107. for any such Derivative Works as a whole, provided Your use,
    108. reproduction, and distribution of the Work otherwise complies with
    109. the conditions stated in this License.
    110. 5. Submission of Contributions. Unless You explicitly state otherwise,
    111. any Contribution intentionally submitted for inclusion in the Work
    112. by You to the Licensor shall be under the terms and conditions of
    113. this License, without any additional terms or conditions.
    114. Notwithstanding the above, nothing herein shall supersede or modify
    115. the terms of any separate license agreement you may have executed
    116. with Licensor regarding such Contributions.
    117. 6. Trademarks. This License does not grant permission to use the trade
    118. names, trademarks, service marks, or product names of the Licensor,
    119. except as required for reasonable and customary use in describing the
    120. origin of the Work and reproducing the content of the NOTICE file.
    121. 7. Disclaimer of Warranty. Unless required by applicable law or
    122. agreed to in writing, Licensor provides the Work (and each
    123. Contributor provides its Contributions) on an "AS IS" BASIS,
    124. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
    125. implied, including, without limitation, any warranties or conditions
    126. of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
    127. PARTICULAR PURPOSE. You are solely responsible for determining the
    128. appropriateness of using or redistributing the Work and assume any
    129. risks associated with Your exercise of permissions under this License.
    130. 8. Limitation of Liability. In no event and under no legal theory,
    131. whether in tort (including negligence), contract, or otherwise,
    132. unless required by applicable law (such as deliberate and grossly
    133. negligent acts) or agreed to in writing, shall any Contributor be
    134. liable to You for damages, including any direct, indirect, special,
    135. incidental, or consequential damages of any character arising as a
    136. result of this License or out of the use or inability to use the
    137. Work (including but not limited to damages for loss of goodwill,
    138. work stoppage, computer failure or malfunction, or any and all
    139. other commercial damages or losses), even if such Contributor
    140. has been advised of the possibility of such damages.
    141. 9. Accepting Warranty or Additional Liability. While redistributing
    142. the Work or Derivative Works thereof, You may choose to offer,
    143. and charge a fee for, acceptance of support, warranty, indemnity,
    144. or other liability obligations and/or rights consistent with this
    145. License. However, in accepting such obligations, You may act only
    146. on Your own behalf and on Your sole responsibility, not on behalf
    147. of any other Contributor, and only if You agree to indemnify,
    148. defend, and hold each Contributor harmless for any liability
    149. incurred by, or claims asserted against, such Contributor by reason
    150. of your accepting any such warranty or additional liability.
    151. END OF TERMS AND CONDITIONS
    152. APPENDIX: How to apply the Apache License to your work.
    153. To apply the Apache License to your work, attach the following
    154. boilerplate notice, with the fields enclosed by brackets "[]"
    155. replaced with your own identifying information. (Don't include
    156. the brackets!) The text should be enclosed in the appropriate
    157. comment syntax for the file format. We also recommend that a
    158. file or class name and description of purpose be included on the
    159. same "printed page" as the copyright notice for easier
    160. identification within third-party archives.
    161. Copyright 2018-2023 OpenMMLab.
    162. Licensed under the Apache License, Version 2.0 (the "License");
    163. you may not use this file except in compliance with the License.
    164. You may obtain a copy of the License at
    165. http://www.apache.org/licenses/LICENSE-2.0
    166. Unless required by applicable law or agreed to in writing, software
    167. distributed under the License is distributed on an "AS IS" BASIS,
    168. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    169. See the License for the specific language governing permissions and
    170. limitations under the License.
    Discard
    1
    2
    3
    4
    5
    6
    1. include requirements/*.txt
    2. include mmdet/VERSION
    3. include mmdet/.mim/model-index.yml
    4. include mmdet/.mim/demo/*/*
    5. recursive-include mmdet/.mim/configs *.py *.yml
    6. recursive-include mmdet/.mim/tools *.sh *.py
    Discard

    English | 简体中文

    Introduction

    MMDetection is an open source object detection toolbox based on PyTorch. It is a part of the OpenMMLab project.

    The master branch works with PyTorch 1.5+.

    Major features
    • Modular Design

      We decompose the detection framework into different components and one can easily construct a customized object detection framework by combining different modules.

    • Support of multiple frameworks out of box

      The toolbox directly supports popular and contemporary detection frameworks, e.g. Faster RCNN, Mask RCNN, RetinaNet, etc.

    • High efficiency

      All basic bbox and mask operations run on GPUs. The training speed is faster than or comparable to other codebases, including Detectron2, maskrcnn-benchmark and SimpleDet.

    • State of the art

      The toolbox stems from the codebase developed by the MMDet team, who won COCO Detection Challenge in 2018, and we keep pushing it forward.

    Apart from MMDetection, we also released a library mmcv for computer vision research, which is heavily depended on by this toolbox.

    What's New

    💎 Stable version

    2.28.1 was released in 1/2/2023:

    • Support Objects365 Dataset, and Separated and Occluded COCO metric
    • Support acceleration of RetinaNet and SSD on Ascend
    • Deprecate the support of Python 3.6 and fix some bugs of 2.28.0

    Please refer to changelog.md for details and release history.

    For compatibility changes between different versions of MMDetection, please refer to compatibility.md.

    🌟 Preview of 3.x version

    Highlight

    We are excited to announce our latest work on real-time object recognition tasks, RTMDet, a family of fully convolutional single-stage detectors. RTMDet not only achieves the best parameter-accuracy trade-off on object detection from tiny to extra-large model sizes but also obtains new state-of-the-art performance on instance segmentation and rotated object detection tasks. Details can be found in the technical report. Pre-trained models are here.

    PWC PWC PWC

    Task Dataset AP FPS(TRT FP16 BS1 3090)
    Object Detection COCO 52.8 322
    Instance Segmentation COCO 44.6 188
    Rotated Object Detection DOTA 78.9(single-scale)/81.3(multi-scale) 121

    A brand new version of MMDetection v3.0.0rc5 was released in 26/12/2022:

    Find more new features in 3.x branch. Issues and PRs are welcome!

    Installation

    Please refer to Installation for installation instructions.

    Getting Started

    Please see get_started.md for the basic usage of MMDetection. We provide colab tutorial and instance segmentation colab tutorial, and other tutorials for:

    Overview of Benchmark and Model Zoo

    Results and models are available in the model zoo.

    Architectures
    Object Detection Instance Segmentation Panoptic Segmentation Other
  1. Contrastive Learning
  2. Distillation
  3. Receptive Field Search
  4. Components
    Backbones Necks Loss Common

    Some other methods are also supported in projects using MMDetection.

    FAQ

    Please refer to FAQ for frequently asked questions.

    Contributing

    We appreciate all contributions to improve MMDetection. Ongoing projects can be found in out GitHub Projects. Welcome community users to participate in these projects. Please refer to CONTRIBUTING.md for the contributing guideline.

    Acknowledgement

    MMDetection is an open source project that is contributed by researchers and engineers from various colleges and companies. We appreciate all the contributors who implement their methods or add new features, as well as users who give valuable feedbacks. We wish that the toolbox and benchmark could serve the growing research community by providing a flexible toolkit to reimplement existing methods and develop their own new detectors.

    Citation

    If you use this toolbox or benchmark in your research, please cite this project.

    @article{mmdetection,
      title   = {{MMDetection}: Open MMLab Detection Toolbox and Benchmark},
      author  = {Chen, Kai and Wang, Jiaqi and Pang, Jiangmiao and Cao, Yuhang and
                 Xiong, Yu and Li, Xiaoxiao and Sun, Shuyang and Feng, Wansen and
                 Liu, Ziwei and Xu, Jiarui and Zhang, Zheng and Cheng, Dazhi and
                 Zhu, Chenchen and Cheng, Tianheng and Zhao, Qijie and Li, Buyu and
                 Lu, Xin and Zhu, Rui and Wu, Yue and Dai, Jifeng and Wang, Jingdong
                 and Shi, Jianping and Ouyang, Wanli and Loy, Chen Change and Lin, Dahua},
      journal= {arXiv preprint arXiv:1906.07155},
      year={2019}
    }
    

    License

    This project is released under the Apache 2.0 license.

    Projects in OpenMMLab

    • MMEngine: OpenMMLab foundational library for training deep learning models.
    • MMCV: OpenMMLab foundational library for computer vision.
    • MMEval: A unified evaluation library for multiple machine learning libraries.
    • MIM: MIM installs OpenMMLab packages.
    • MMClassification: OpenMMLab image classification toolbox and benchmark.
    • MMDetection: OpenMMLab detection toolbox and benchmark.
    • MMDetection3D: OpenMMLab's next-generation platform for general 3D object detection.
    • MMRotate: OpenMMLab rotated object detection toolbox and benchmark.
    • MMSegmentation: OpenMMLab semantic segmentation toolbox and benchmark.
    • MMOCR: OpenMMLab text detection, recognition, and understanding toolbox.
    • MMPose: OpenMMLab pose estimation toolbox and benchmark.
    • MMHuman3D: OpenMMLab 3D human parametric model toolbox and benchmark.
    • MMSelfSup: OpenMMLab self-supervised learning toolbox and benchmark.
    • MMRazor: OpenMMLab model compression toolbox and benchmark.
    • MMFewShot: OpenMMLab fewshot learning toolbox and benchmark.
    • MMAction2: OpenMMLab's next-generation action understanding toolbox and benchmark.
    • MMTracking: OpenMMLab video perception toolbox and benchmark.
    • MMFlow: OpenMMLab optical flow toolbox and benchmark.
    • MMEditing: OpenMMLab image and video editing toolbox.
    • MMGeneration: OpenMMLab image and video generative models toolbox.
    • MMDeploy: OpenMMLab model deployment framework.
    Discard

    English | 简体中文

    简介

    MMDetection 是一个基于 PyTorch 的目标检测开源工具箱。它是 OpenMMLab 项目的一部分。

    主分支代码目前支持 PyTorch 1.5 以上的版本。

    主要特性
    • 模块化设计

      MMDetection 将检测框架解耦成不同的模块组件,通过组合不同的模块组件,用户可以便捷地构建自定义的检测模型

    • 丰富的即插即用的算法和模型

      MMDetection 支持了众多主流的和最新的检测算法,例如 Faster R-CNN,Mask R-CNN,RetinaNet 等。

    • 速度快

      基本的框和 mask 操作都实现了 GPU 版本,训练速度比其他代码库更快或者相当,包括 Detectron2, maskrcnn-benchmarkSimpleDet

    • 性能高

      MMDetection 这个算法库源自于 COCO 2018 目标检测竞赛的冠军团队 MMDet 团队开发的代码,我们在之后持续进行了改进和提升。

    除了 MMDetection 之外,我们还开源了计算机视觉基础库 MMCV,MMCV 是 MMDetection 的主要依赖。

    最新进展

    💎 稳定版本

    最新的 2.28.1 版本已经在 2023.2.1 发布:

    • 支持 Object365 数据集和遮挡物检测的 benchmark
    • 支持 SSD 和 RetinaNet 算法在昇腾芯片上的加速
    • 不再保证对 Python 3.6 的支持并修复了 2.28.0 的一些 bug

    如果想了解更多版本更新细节和历史信息,请阅读更新日志

    如果想了解 MMDetection 不同版本之间的兼容性, 请参考兼容性说明文档

    🌟 3.x 预览版本

    亮点

    我们很高兴向大家介绍我们在实时目标识别任务方面的最新成果 RTMDet,包含了一系列的全卷积单阶段检测模型。 RTMDet 不仅在从 tiny 到 extra-large 尺寸的目标检测模型上实现了最佳的参数量和精度的平衡,而且在实时实例分割和旋转目标检测任务上取得了最先进的成果。 更多细节请参阅技术报告。 预训练模型可以在这里找到。

    PWC PWC PWC

    Task Dataset AP FPS(TRT FP16 BS1 3090)
    Object Detection COCO 52.8 322
    Instance Segmentation COCO 44.6 188
    Rotated Object Detection DOTA 78.9(single-scale)/81.3(multi-scale) 121

    全新的 v3.0.0rc5 版本已经在 2022.12.26 发布:

    安装

    请参考安装指令进行安装。

    教程

    请参考快速入门文档学习 MMDetection 的基本使用。 我们提供了 检测的 colab 教程实例分割的 colab 教程,也为新手提供了完整的运行教程,其他教程如下

    同时,我们还提供了 MMDetection 中文解读文案汇总

    基准测试和模型库

    测试结果和模型可以在模型库中找到。

    算法架构
    Object Detection Instance Segmentation Panoptic Segmentation Other
  5. Contrastive Learning
  6. Distillation
  7. Receptive Field Search
  8. 模块组件
    Backbones Necks Loss Common

    我们在基于 MMDetection 的项目中列举了一些其他的支持的算法。

    常见问题

    请参考 FAQ 了解其他用户的常见问题。

    贡献指南

    我们感谢所有的贡献者为改进和提升 MMDetection 所作出的努力。我们将正在进行中的项目添加进了GitHub Projects页面,非常欢迎社区用户能参与进这些项目中来。请参考贡献指南来了解参与项目贡献的相关指引。

    致谢

    MMDetection 是一款由来自不同高校和企业的研发人员共同参与贡献的开源项目。我们感谢所有为项目提供算法复现和新功能支持的贡献者,以及提供宝贵反馈的用户。 我们希望这个工具箱和基准测试可以为社区提供灵活的代码工具,供用户复现已有算法并开发自己的新模型,从而不断为开源社区提供贡献。

    引用

    如果你在研究中使用了本项目的代码或者性能基准,请参考如下 bibtex 引用 MMDetection。

    @article{mmdetection,
      title   = {{MMDetection}: Open MMLab Detection Toolbox and Benchmark},
      author  = {Chen, Kai and Wang, Jiaqi and Pang, Jiangmiao and Cao, Yuhang and
                 Xiong, Yu and Li, Xiaoxiao and Sun, Shuyang and Feng, Wansen and
                 Liu, Ziwei and Xu, Jiarui and Zhang, Zheng and Cheng, Dazhi and
                 Zhu, Chenchen and Cheng, Tianheng and Zhao, Qijie and Li, Buyu and
                 Lu, Xin and Zhu, Rui and Wu, Yue and Dai, Jifeng and Wang, Jingdong
                 and Shi, Jianping and Ouyang, Wanli and Loy, Chen Change and Lin, Dahua},
      journal= {arXiv preprint arXiv:1906.07155},
      year={2019}
    }
    

    开源许可证

    该项目采用 Apache 2.0 开源许可证

    OpenMMLab 的其他项目

    • MMEngine: OpenMMLab 深度学习模型训练基础库
    • MMCV: OpenMMLab 计算机视觉基础库
    • MMEval: 统一开放的跨框架算法评测库
    • MIM: MIM 是 OpenMMlab 项目、算法、模型的统一入口
    • MMClassification: OpenMMLab 图像分类工具箱
    • MMDetection: OpenMMLab 目标检测工具箱
    • MMDetection3D: OpenMMLab 新一代通用 3D 目标检测平台
    • MMRotate: OpenMMLab 旋转框检测工具箱与测试基准
    • MMSegmentation: OpenMMLab 语义分割工具箱
    • MMOCR: OpenMMLab 全流程文字检测识别理解工具包
    • MMPose: OpenMMLab 姿态估计工具箱
    • MMHuman3D: OpenMMLab 人体参数化模型工具箱与测试基准
    • MMSelfSup: OpenMMLab 自监督学习工具箱与测试基准
    • MMRazor: OpenMMLab 模型压缩工具箱与测试基准
    • MMFewShot: OpenMMLab 少样本学习工具箱与测试基准
    • MMAction2: OpenMMLab 新一代视频理解工具箱
    • MMTracking: OpenMMLab 一体化视频目标感知平台
    • MMFlow: OpenMMLab 光流估计工具箱与测试基准
    • MMEditing: OpenMMLab 图像视频编辑工具箱
    • MMGeneration: OpenMMLab 图片视频生成模型工具箱
    • MMDeploy: OpenMMLab 模型部署框架

    欢迎加入 OpenMMLab 社区

    扫描下方的二维码可关注 OpenMMLab 团队的 知乎官方账号,加入 OpenMMLab 团队的官方交流 QQ 群

    我们会在 OpenMMLab 社区为大家

    • 📢 分享 AI 框架的前沿核心技术
    • 💻 解读 PyTorch 常用模块源码
    • 📰 发布 OpenMMLab 的相关新闻
    • 🚀 介绍 OpenMMLab 开发的前沿算法
    • 🏃 获取更高效的问题答疑和意见反馈
    • 🔥 提供与各行各业开发者充分交流的平台

    干货满满 📘,等你来撩 💗,OpenMMLab 社区期待您的加入 👬

    Discard
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    1. # dataset settings
    2. dataset_type = 'CityscapesDataset'
    3. data_root = 'data/cityscapes/'
    4. img_norm_cfg = dict(
    5. mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
    6. train_pipeline = [
    7. dict(type='LoadImageFromFile'),
    8. dict(type='LoadAnnotations', with_bbox=True),
    9. dict(
    10. type='Resize', img_scale=[(2048, 800), (2048, 1024)], keep_ratio=True),
    11. dict(type='RandomFlip', flip_ratio=0.5),
    12. dict(type='Normalize', **img_norm_cfg),
    13. dict(type='Pad', size_divisor=32),
    14. dict(type='DefaultFormatBundle'),
    15. dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
    16. ]
    17. test_pipeline = [
    18. dict(type='LoadImageFromFile'),
    19. dict(
    20. type='MultiScaleFlipAug',
    21. img_scale=(2048, 1024),
    22. flip=False,
    23. transforms=[
    24. dict(type='Resize', keep_ratio=True),
    25. dict(type='RandomFlip'),
    26. dict(type='Normalize', **img_norm_cfg),
    27. dict(type='Pad', size_divisor=32),
    28. dict(type='ImageToTensor', keys=['img']),
    29. dict(type='Collect', keys=['img']),
    30. ])
    31. ]
    32. data = dict(
    33. samples_per_gpu=1,
    34. workers_per_gpu=2,
    35. train=dict(
    36. type='RepeatDataset',
    37. times=8,
    38. dataset=dict(
    39. type=dataset_type,
    40. ann_file=data_root +
    41. 'annotations/instancesonly_filtered_gtFine_train.json',
    42. img_prefix=data_root + 'leftImg8bit/train/',
    43. pipeline=train_pipeline)),
    44. val=dict(
    45. type=dataset_type,
    46. ann_file=data_root +
    47. 'annotations/instancesonly_filtered_gtFine_val.json',
    48. img_prefix=data_root + 'leftImg8bit/val/',
    49. pipeline=test_pipeline),
    50. test=dict(
    51. type=dataset_type,
    52. ann_file=data_root +
    53. 'annotations/instancesonly_filtered_gtFine_test.json',
    54. img_prefix=data_root + 'leftImg8bit/test/',
    55. pipeline=test_pipeline))
    56. evaluation = dict(interval=1, metric='bbox')
    Discard
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    1. # dataset settings
    2. dataset_type = 'CityscapesDataset'
    3. data_root = 'data/cityscapes/'
    4. img_norm_cfg = dict(
    5. mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
    6. train_pipeline = [
    7. dict(type='LoadImageFromFile'),
    8. dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
    9. dict(
    10. type='Resize', img_scale=[(2048, 800), (2048, 1024)], keep_ratio=True),
    11. dict(type='RandomFlip', flip_ratio=0.5),
    12. dict(type='Normalize', **img_norm_cfg),
    13. dict(type='Pad', size_divisor=32),
    14. dict(type='DefaultFormatBundle'),
    15. dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
    16. ]
    17. test_pipeline = [
    18. dict(type='LoadImageFromFile'),
    19. dict(
    20. type='MultiScaleFlipAug',
    21. img_scale=(2048, 1024),
    22. flip=False,
    23. transforms=[
    24. dict(type='Resize', keep_ratio=True),
    25. dict(type='RandomFlip'),
    26. dict(type='Normalize', **img_norm_cfg),
    27. dict(type='Pad', size_divisor=32),
    28. dict(type='ImageToTensor', keys=['img']),
    29. dict(type='Collect', keys=['img']),
    30. ])
    31. ]
    32. data = dict(
    33. samples_per_gpu=1,
    34. workers_per_gpu=2,
    35. train=dict(
    36. type='RepeatDataset',
    37. times=8,
    38. dataset=dict(
    39. type=dataset_type,
    40. ann_file=data_root +
    41. 'annotations/instancesonly_filtered_gtFine_train.json',
    42. img_prefix=data_root + 'leftImg8bit/train/',
    43. pipeline=train_pipeline)),
    44. val=dict(
    45. type=dataset_type,
    46. ann_file=data_root +
    47. 'annotations/instancesonly_filtered_gtFine_val.json',
    48. img_prefix=data_root + 'leftImg8bit/val/',
    49. pipeline=test_pipeline),
    50. test=dict(
    51. type=dataset_type,
    52. ann_file=data_root +
    53. 'annotations/instancesonly_filtered_gtFine_test.json',
    54. img_prefix=data_root + 'leftImg8bit/test/',
    55. pipeline=test_pipeline))
    56. evaluation = dict(metric=['bbox', 'segm'])
    Discard
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    1. # dataset settings
    2. dataset_type = 'CocoDataset'
    3. data_root = 'data/coco/'
    4. img_norm_cfg = dict(
    5. mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
    6. train_pipeline = [
    7. dict(type='LoadImageFromFile'),
    8. dict(type='LoadAnnotations', with_bbox=True),
    9. dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
    10. dict(type='RandomFlip', flip_ratio=0.5),
    11. dict(type='Normalize', **img_norm_cfg),
    12. dict(type='Pad', size_divisor=32),
    13. dict(type='DefaultFormatBundle'),
    14. dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
    15. ]
    16. test_pipeline = [
    17. dict(type='LoadImageFromFile'),
    18. dict(
    19. type='MultiScaleFlipAug',
    20. img_scale=(1333, 800),
    21. flip=False,
    22. transforms=[
    23. dict(type='Resize', keep_ratio=True),
    24. dict(type='RandomFlip'),
    25. dict(type='Normalize', **img_norm_cfg),
    26. dict(type='Pad', size_divisor=32),
    27. dict(type='ImageToTensor', keys=['img']),
    28. dict(type='Collect', keys=['img']),
    29. ])
    30. ]
    31. data = dict(
    32. samples_per_gpu=2,
    33. workers_per_gpu=2,
    34. train=dict(
    35. type=dataset_type,
    36. ann_file=data_root + 'annotations/instances_train2017.json',
    37. img_prefix=data_root + 'train2017/',
    38. pipeline=train_pipeline),
    39. val=dict(
    40. type=dataset_type,
    41. ann_file=data_root + 'annotations/instances_val2017.json',
    42. img_prefix=data_root + 'val2017/',
    43. pipeline=test_pipeline),
    44. test=dict(
    45. type=dataset_type,
    46. ann_file=data_root + 'annotations/instances_val2017.json',
    47. img_prefix=data_root + 'val2017/',
    48. pipeline=test_pipeline))
    49. evaluation = dict(interval=1, metric='bbox')
    Discard
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    1. # dataset settings
    2. dataset_type = 'CocoDataset'
    3. data_root = 'data/coco/'
    4. img_norm_cfg = dict(
    5. mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
    6. train_pipeline = [
    7. dict(type='LoadImageFromFile'),
    8. dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
    9. dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
    10. dict(type='RandomFlip', flip_ratio=0.5),
    11. dict(type='Normalize', **img_norm_cfg),
    12. dict(type='Pad', size_divisor=32),
    13. dict(type='DefaultFormatBundle'),
    14. dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
    15. ]
    16. test_pipeline = [
    17. dict(type='LoadImageFromFile'),
    18. dict(
    19. type='MultiScaleFlipAug',
    20. img_scale=(1333, 800),
    21. flip=False,
    22. transforms=[
    23. dict(type='Resize', keep_ratio=True),
    24. dict(type='RandomFlip'),
    25. dict(type='Normalize', **img_norm_cfg),
    26. dict(type='Pad', size_divisor=32),
    27. dict(type='ImageToTensor', keys=['img']),
    28. dict(type='Collect', keys=['img']),
    29. ])
    30. ]
    31. data = dict(
    32. samples_per_gpu=2,
    33. workers_per_gpu=2,
    34. train=dict(
    35. type=dataset_type,
    36. ann_file=data_root + 'annotations/instances_train2017.json',
    37. img_prefix=data_root + 'train2017/',
    38. pipeline=train_pipeline),
    39. val=dict(
    40. type=dataset_type,
    41. ann_file=data_root + 'annotations/instances_val2017.json',
    42. img_prefix=data_root + 'val2017/',
    43. pipeline=test_pipeline),
    44. test=dict(
    45. type=dataset_type,
    46. ann_file=data_root + 'annotations/instances_val2017.json',
    47. img_prefix=data_root + 'val2017/',
    48. pipeline=test_pipeline))
    49. evaluation = dict(metric=['bbox', 'segm'])
    Discard
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    1. # dataset settings
    2. dataset_type = 'CocoDataset'
    3. data_root = 'data/coco/'
    4. img_norm_cfg = dict(
    5. mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
    6. train_pipeline = [
    7. dict(type='LoadImageFromFile'),
    8. dict(
    9. type='LoadAnnotations', with_bbox=True, with_mask=True, with_seg=True),
    10. dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
    11. dict(type='RandomFlip', flip_ratio=0.5),
    12. dict(type='Normalize', **img_norm_cfg),
    13. dict(type='Pad', size_divisor=32),
    14. dict(type='SegRescale', scale_factor=1 / 8),
    15. dict(type='DefaultFormatBundle'),
    16. dict(
    17. type='Collect',
    18. keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg']),
    19. ]
    20. test_pipeline = [
    21. dict(type='LoadImageFromFile'),
    22. dict(
    23. type='MultiScaleFlipAug',
    24. img_scale=(1333, 800),
    25. flip=False,
    26. transforms=[
    27. dict(type='Resize', keep_ratio=True),
    28. dict(type='RandomFlip', flip_ratio=0.5),
    29. dict(type='Normalize', **img_norm_cfg),
    30. dict(type='Pad', size_divisor=32),
    31. dict(type='ImageToTensor', keys=['img']),
    32. dict(type='Collect', keys=['img']),
    33. ])
    34. ]
    35. data = dict(
    36. samples_per_gpu=2,
    37. workers_per_gpu=2,
    38. train=dict(
    39. type=dataset_type,
    40. ann_file=data_root + 'annotations/instances_train2017.json',
    41. img_prefix=data_root + 'train2017/',
    42. seg_prefix=data_root + 'stuffthingmaps/train2017/',
    43. pipeline=train_pipeline),
    44. val=dict(
    45. type=dataset_type,
    46. ann_file=data_root + 'annotations/instances_val2017.json',
    47. img_prefix=data_root + 'val2017/',
    48. pipeline=test_pipeline),
    49. test=dict(
    50. type=dataset_type,
    51. ann_file=data_root + 'annotations/instances_val2017.json',
    52. img_prefix=data_root + 'val2017/',
    53. pipeline=test_pipeline))
    54. evaluation = dict(metric=['bbox', 'segm'])
    Discard
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    1. # dataset settings
    2. dataset_type = 'CocoPanopticDataset'
    3. data_root = 'data/coco/'
    4. img_norm_cfg = dict(
    5. mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
    6. train_pipeline = [
    7. dict(type='LoadImageFromFile'),
    8. dict(
    9. type='LoadPanopticAnnotations',
    10. with_bbox=True,
    11. with_mask=True,
    12. with_seg=True),
    13. dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
    14. dict(type='RandomFlip', flip_ratio=0.5),
    15. dict(type='Normalize', **img_norm_cfg),
    16. dict(type='Pad', size_divisor=32),
    17. dict(type='SegRescale', scale_factor=1 / 4),
    18. dict(type='DefaultFormatBundle'),
    19. dict(
    20. type='Collect',
    21. keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg']),
    22. ]
    23. test_pipeline = [
    24. dict(type='LoadImageFromFile'),
    25. dict(
    26. type='MultiScaleFlipAug',
    27. img_scale=(1333, 800),
    28. flip=False,
    29. transforms=[
    30. dict(type='Resize', keep_ratio=True),
    31. dict(type='RandomFlip'),
    32. dict(type='Normalize', **img_norm_cfg),
    33. dict(type='Pad', size_divisor=32),
    34. dict(type='ImageToTensor', keys=['img']),
    35. dict(type='Collect', keys=['img']),
    36. ])
    37. ]
    38. data = dict(
    39. samples_per_gpu=2,
    40. workers_per_gpu=2,
    41. train=dict(
    42. type=dataset_type,
    43. ann_file=data_root + 'annotations/panoptic_train2017.json',
    44. img_prefix=data_root + 'train2017/',
    45. seg_prefix=data_root + 'annotations/panoptic_train2017/',
    46. pipeline=train_pipeline),
    47. val=dict(
    48. type=dataset_type,
    49. ann_file=data_root + 'annotations/panoptic_val2017.json',
    50. img_prefix=data_root + 'val2017/',
    51. seg_prefix=data_root + 'annotations/panoptic_val2017/',
    52. pipeline=test_pipeline),
    53. test=dict(
    54. type=dataset_type,
    55. ann_file=data_root + 'annotations/panoptic_val2017.json',
    56. img_prefix=data_root + 'val2017/',
    57. seg_prefix=data_root + 'annotations/panoptic_val2017/',
    58. pipeline=test_pipeline))
    59. evaluation = dict(interval=1, metric=['PQ'])
    Discard
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    1. # dataset settings
    2. dataset_type = 'DeepFashionDataset'
    3. data_root = 'data/DeepFashion/In-shop/'
    4. img_norm_cfg = dict(
    5. mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
    6. train_pipeline = [
    7. dict(type='LoadImageFromFile'),
    8. dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
    9. dict(type='Resize', img_scale=(750, 1101), keep_ratio=True),
    10. dict(type='RandomFlip', flip_ratio=0.5),
    11. dict(type='Normalize', **img_norm_cfg),
    12. dict(type='Pad', size_divisor=32),
    13. dict(type='DefaultFormatBundle'),
    14. dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
    15. ]
    16. test_pipeline = [
    17. dict(type='LoadImageFromFile'),
    18. dict(
    19. type='MultiScaleFlipAug',
    20. img_scale=(750, 1101),
    21. flip=False,
    22. transforms=[
    23. dict(type='Resize', keep_ratio=True),
    24. dict(type='RandomFlip'),
    25. dict(type='Normalize', **img_norm_cfg),
    26. dict(type='Pad', size_divisor=32),
    27. dict(type='ImageToTensor', keys=['img']),
    28. dict(type='Collect', keys=['img']),
    29. ])
    30. ]
    31. data = dict(
    32. imgs_per_gpu=2,
    33. workers_per_gpu=1,
    34. train=dict(
    35. type=dataset_type,
    36. ann_file=data_root + 'annotations/DeepFashion_segmentation_query.json',
    37. img_prefix=data_root + 'Img/',
    38. pipeline=train_pipeline,
    39. data_root=data_root),
    40. val=dict(
    41. type=dataset_type,
    42. ann_file=data_root + 'annotations/DeepFashion_segmentation_query.json',
    43. img_prefix=data_root + 'Img/',
    44. pipeline=test_pipeline,
    45. data_root=data_root),
    46. test=dict(
    47. type=dataset_type,
    48. ann_file=data_root +
    49. 'annotations/DeepFashion_segmentation_gallery.json',
    50. img_prefix=data_root + 'Img/',
    51. pipeline=test_pipeline,
    52. data_root=data_root))
    53. evaluation = dict(interval=5, metric=['bbox', 'segm'])
    Discard
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    1. # dataset settings
    2. _base_ = 'coco_instance.py'
    3. dataset_type = 'LVISV05Dataset'
    4. data_root = 'data/lvis_v0.5/'
    5. data = dict(
    6. samples_per_gpu=2,
    7. workers_per_gpu=2,
    8. train=dict(
    9. _delete_=True,
    10. type='ClassBalancedDataset',
    11. oversample_thr=1e-3,
    12. dataset=dict(
    13. type=dataset_type,
    14. ann_file=data_root + 'annotations/lvis_v0.5_train.json',
    15. img_prefix=data_root + 'train2017/')),
    16. val=dict(
    17. type=dataset_type,
    18. ann_file=data_root + 'annotations/lvis_v0.5_val.json',
    19. img_prefix=data_root + 'val2017/'),
    20. test=dict(
    21. type=dataset_type,
    22. ann_file=data_root + 'annotations/lvis_v0.5_val.json',
    23. img_prefix=data_root + 'val2017/'))
    24. evaluation = dict(metric=['bbox', 'segm'])
    Discard
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    1. # dataset settings
    2. _base_ = 'coco_instance.py'
    3. dataset_type = 'LVISV1Dataset'
    4. data_root = 'data/lvis_v1/'
    5. data = dict(
    6. samples_per_gpu=2,
    7. workers_per_gpu=2,
    8. train=dict(
    9. _delete_=True,
    10. type='ClassBalancedDataset',
    11. oversample_thr=1e-3,
    12. dataset=dict(
    13. type=dataset_type,
    14. ann_file=data_root + 'annotations/lvis_v1_train.json',
    15. img_prefix=data_root)),
    16. val=dict(
    17. type=dataset_type,
    18. ann_file=data_root + 'annotations/lvis_v1_val.json',
    19. img_prefix=data_root),
    20. test=dict(
    21. type=dataset_type,
    22. ann_file=data_root + 'annotations/lvis_v1_val.json',
    23. img_prefix=data_root))
    24. evaluation = dict(metric=['bbox', 'segm'])
    Discard
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    1. # dataset settings
    2. dataset_type = 'Objects365V1Dataset'
    3. data_root = 'data/Objects365/Obj365_v1/'
    4. img_norm_cfg = dict(
    5. mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
    6. train_pipeline = [
    7. dict(type='LoadImageFromFile'),
    8. dict(type='LoadAnnotations', with_bbox=True),
    9. dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
    10. dict(type='RandomFlip', flip_ratio=0.5),
    11. dict(type='Normalize', **img_norm_cfg),
    12. dict(type='Pad', size_divisor=32),
    13. dict(type='DefaultFormatBundle'),
    14. dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
    15. ]
    16. test_pipeline = [
    17. dict(type='LoadImageFromFile'),
    18. dict(
    19. type='MultiScaleFlipAug',
    20. img_scale=(1333, 800),
    21. flip=False,
    22. transforms=[
    23. dict(type='Resize', keep_ratio=True),
    24. dict(type='RandomFlip'),
    25. dict(type='Normalize', **img_norm_cfg),
    26. dict(type='Pad', size_divisor=32),
    27. dict(type='ImageToTensor', keys=['img']),
    28. dict(type='Collect', keys=['img']),
    29. ])
    30. ]
    31. data = dict(
    32. samples_per_gpu=2,
    33. workers_per_gpu=2,
    34. train=dict(
    35. type=dataset_type,
    36. ann_file=data_root + 'annotations/objects365_train.json',
    37. img_prefix=data_root + 'train/',
    38. pipeline=train_pipeline),
    39. val=dict(
    40. type=dataset_type,
    41. ann_file=data_root + 'annotations/objects365_val.json',
    42. img_prefix=data_root + 'val/',
    43. pipeline=test_pipeline),
    44. test=dict(
    45. type=dataset_type,
    46. ann_file=data_root + 'annotations/objects365_val.json',
    47. img_prefix=data_root + 'val/',
    48. pipeline=test_pipeline))
    49. evaluation = dict(interval=1, metric='bbox')
    Discard
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    1. # dataset settings
    2. dataset_type = 'Objects365V2Dataset'
    3. data_root = 'data/Objects365/Obj365_v2/'
    4. img_norm_cfg = dict(
    5. mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
    6. train_pipeline = [
    7. dict(type='LoadImageFromFile'),
    8. dict(type='LoadAnnotations', with_bbox=True),
    9. dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
    10. dict(type='RandomFlip', flip_ratio=0.5),
    11. dict(type='Normalize', **img_norm_cfg),
    12. dict(type='Pad', size_divisor=32),
    13. dict(type='DefaultFormatBundle'),
    14. dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
    15. ]
    16. test_pipeline = [
    17. dict(type='LoadImageFromFile'),
    18. dict(
    19. type='MultiScaleFlipAug',
    20. img_scale=(1333, 800),
    21. flip=False,
    22. transforms=[
    23. dict(type='Resize', keep_ratio=True),
    24. dict(type='RandomFlip'),
    25. dict(type='Normalize', **img_norm_cfg),
    26. dict(type='Pad', size_divisor=32),
    27. dict(type='ImageToTensor', keys=['img']),
    28. dict(type='Collect', keys=['img']),
    29. ])
    30. ]
    31. data = dict(
    32. samples_per_gpu=2,
    33. workers_per_gpu=2,
    34. train=dict(
    35. type=dataset_type,
    36. ann_file=data_root + 'annotations/zhiyuan_objv2_train.json',
    37. img_prefix=data_root + 'train/',
    38. pipeline=train_pipeline),
    39. val=dict(
    40. type=dataset_type,
    41. ann_file=data_root + 'annotations/zhiyuan_objv2_val.json',
    42. img_prefix=data_root + 'val/',
    43. pipeline=test_pipeline),
    44. test=dict(
    45. type=dataset_type,
    46. ann_file=data_root + 'annotations/zhiyuan_objv2_val.json',
    47. img_prefix=data_root + 'val/',
    48. pipeline=test_pipeline))
    49. evaluation = dict(interval=1, metric='bbox')
    Discard
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    1. # dataset settings
    2. dataset_type = 'OpenImagesDataset'
    3. data_root = 'data/OpenImages/'
    4. img_norm_cfg = dict(
    5. mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
    6. train_pipeline = [
    7. dict(type='LoadImageFromFile'),
    8. dict(type='LoadAnnotations', with_bbox=True, denorm_bbox=True),
    9. dict(type='Resize', img_scale=(1024, 800), keep_ratio=True),
    10. dict(type='RandomFlip', flip_ratio=0.5),
    11. dict(type='Normalize', **img_norm_cfg),
    12. dict(type='Pad', size_divisor=32),
    13. dict(type='DefaultFormatBundle'),
    14. dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
    15. ]
    16. test_pipeline = [
    17. dict(type='LoadImageFromFile'),
    18. dict(
    19. type='MultiScaleFlipAug',
    20. img_scale=(1024, 800),
    21. flip=False,
    22. transforms=[
    23. dict(type='Resize', keep_ratio=True),
    24. dict(type='RandomFlip'),
    25. dict(type='Normalize', **img_norm_cfg),
    26. dict(type='Pad', size_divisor=32),
    27. dict(type='ImageToTensor', keys=['img']),
    28. dict(type='Collect', keys=['img']),
    29. ],
    30. ),
    31. ]
    32. data = dict(
    33. samples_per_gpu=2,
    34. workers_per_gpu=0, # workers_per_gpu > 0 may occur out of memory
    35. train=dict(
    36. type=dataset_type,
    37. ann_file=data_root + 'annotations/oidv6-train-annotations-bbox.csv',
    38. img_prefix=data_root + 'OpenImages/train/',
    39. label_file=data_root + 'annotations/class-descriptions-boxable.csv',
    40. hierarchy_file=data_root +
    41. 'annotations/bbox_labels_600_hierarchy.json',
    42. pipeline=train_pipeline),
    43. val=dict(
    44. type=dataset_type,
    45. ann_file=data_root + 'annotations/validation-annotations-bbox.csv',
    46. img_prefix=data_root + 'OpenImages/validation/',
    47. label_file=data_root + 'annotations/class-descriptions-boxable.csv',
    48. hierarchy_file=data_root +
    49. 'annotations/bbox_labels_600_hierarchy.json',
    50. meta_file=data_root + 'annotations/validation-image-metas.pkl',
    51. image_level_ann_file=data_root +
    52. 'annotations/validation-annotations-human-imagelabels-boxable.csv',
    53. pipeline=test_pipeline),
    54. test=dict(
    55. type=dataset_type,
    56. ann_file=data_root + 'annotations/validation-annotations-bbox.csv',
    57. img_prefix=data_root + 'OpenImages/validation/',
    58. label_file=data_root + 'annotations/class-descriptions-boxable.csv',
    59. hierarchy_file=data_root +
    60. 'annotations/bbox_labels_600_hierarchy.json',
    61. meta_file=data_root + 'annotations/validation-image-metas.pkl',
    62. image_level_ann_file=data_root +
    63. 'annotations/validation-annotations-human-imagelabels-boxable.csv',
    64. pipeline=test_pipeline))
    65. evaluation = dict(interval=1, metric='mAP')
    Discard
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    1. # dataset settings
    2. dataset_type = 'VOCDataset'
    3. data_root = 'data/VOCdevkit/'
    4. img_norm_cfg = dict(
    5. mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
    6. train_pipeline = [
    7. dict(type='LoadImageFromFile'),
    8. dict(type='LoadAnnotations', with_bbox=True),
    9. dict(type='Resize', img_scale=(1000, 600), keep_ratio=True),
    10. dict(type='RandomFlip', flip_ratio=0.5),
    11. dict(type='Normalize', **img_norm_cfg),
    12. dict(type='Pad', size_divisor=32),
    13. dict(type='DefaultFormatBundle'),
    14. dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
    15. ]
    16. test_pipeline = [
    17. dict(type='LoadImageFromFile'),
    18. dict(
    19. type='MultiScaleFlipAug',
    20. img_scale=(1000, 600),
    21. flip=False,
    22. transforms=[
    23. dict(type='Resize', keep_ratio=True),
    24. dict(type='RandomFlip'),
    25. dict(type='Normalize', **img_norm_cfg),
    26. dict(type='Pad', size_divisor=32),
    27. dict(type='ImageToTensor', keys=['img']),
    28. dict(type='Collect', keys=['img']),
    29. ])
    30. ]
    31. data = dict(
    32. samples_per_gpu=2,
    33. workers_per_gpu=2,
    34. train=dict(
    35. type='RepeatDataset',
    36. times=3,
    37. dataset=dict(
    38. type=dataset_type,
    39. ann_file=[
    40. data_root + 'VOC2007/ImageSets/Main/trainval.txt',
    41. data_root + 'VOC2012/ImageSets/Main/trainval.txt'
    42. ],
    43. img_prefix=[data_root + 'VOC2007/', data_root + 'VOC2012/'],
    44. pipeline=train_pipeline)),
    45. val=dict(
    46. type=dataset_type,
    47. ann_file=data_root + 'VOC2007/ImageSets/Main/test.txt',
    48. img_prefix=data_root + 'VOC2007/',
    49. pipeline=test_pipeline),
    50. test=dict(
    51. type=dataset_type,
    52. ann_file=data_root + 'VOC2007/ImageSets/Main/test.txt',
    53. img_prefix=data_root + 'VOC2007/',
    54. pipeline=test_pipeline))
    55. evaluation = dict(interval=1, metric='mAP')
    Discard
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    1. # dataset settings
    2. dataset_type = 'WIDERFaceDataset'
    3. data_root = 'data/WIDERFace/'
    4. img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True)
    5. train_pipeline = [
    6. dict(type='LoadImageFromFile', to_float32=True),
    7. dict(type='LoadAnnotations', with_bbox=True),
    8. dict(
    9. type='PhotoMetricDistortion',
    10. brightness_delta=32,
    11. contrast_range=(0.5, 1.5),
    12. saturation_range=(0.5, 1.5),
    13. hue_delta=18),
    14. dict(
    15. type='Expand',
    16. mean=img_norm_cfg['mean'],
    17. to_rgb=img_norm_cfg['to_rgb'],
    18. ratio_range=(1, 4)),
    19. dict(
    20. type='MinIoURandomCrop',
    21. min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
    22. min_crop_size=0.3),
    23. dict(type='Resize', img_scale=(300, 300), keep_ratio=False),
    24. dict(type='Normalize', **img_norm_cfg),
    25. dict(type='RandomFlip', flip_ratio=0.5),
    26. dict(type='DefaultFormatBundle'),
    27. dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
    28. ]
    29. test_pipeline = [
    30. dict(type='LoadImageFromFile'),
    31. dict(
    32. type='MultiScaleFlipAug',
    33. img_scale=(300, 300),
    34. flip=False,
    35. transforms=[
    36. dict(type='Resize', keep_ratio=False),
    37. dict(type='Normalize', **img_norm_cfg),
    38. dict(type='ImageToTensor', keys=['img']),
    39. dict(type='Collect', keys=['img']),
    40. ])
    41. ]
    42. data = dict(
    43. samples_per_gpu=60,
    44. workers_per_gpu=2,
    45. train=dict(
    46. type='RepeatDataset',
    47. times=2,
    48. dataset=dict(
    49. type=dataset_type,
    50. ann_file=data_root + 'train.txt',
    51. img_prefix=data_root + 'WIDER_train/',
    52. min_size=17,
    53. pipeline=train_pipeline)),
    54. val=dict(
    55. type=dataset_type,
    56. ann_file=data_root + 'val.txt',
    57. img_prefix=data_root + 'WIDER_val/',
    58. pipeline=test_pipeline),
    59. test=dict(
    60. type=dataset_type,
    61. ann_file=data_root + 'val.txt',
    62. img_prefix=data_root + 'WIDER_val/',
    63. pipeline=test_pipeline))
    Discard
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    1. checkpoint_config = dict(interval=1)
    2. # yapf:disable
    3. log_config = dict(
    4. interval=50,
    5. hooks=[
    6. dict(type='TextLoggerHook'),
    7. dict(type='MlflowLoggerHook',
    8. exp_name="Edema",
    9. log_model=True,
    10. interval=1,
    11. params={},
    12. ignore_last=False),
    13. # dict(type='TensorboardLoggerHook')
    14. ])
    15. # yapf:enable
    16. custom_hooks = [dict(type='NumClassCheckHook')]
    17. dist_params = dict(backend='nccl')
    18. log_level = 'INFO'
    19. load_from = None
    20. resume_from = None
    21. workflow = [('train', 1)]
    22. # disable opencv multithreading to avoid system being overloaded
    23. opencv_num_threads = 0
    24. # set multi-process start method as `fork` to speed up the training
    25. mp_start_method = 'fork'
    26. # Default setting for scaling LR automatically
    27. # - `enable` means enable scaling LR automatically
    28. # or not by default.
    29. # - `base_batch_size` = (8 GPUs) x (2 samples per GPU).
    30. auto_scale_lr = dict(enable=False, base_batch_size=16)
    Discard
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    1. # model settings
    2. model = dict(
    3. type='RetinaNet',
    4. backbone=dict(
    5. type='ResNet',
    6. depth=50,
    7. num_stages=4,
    8. out_indices=(0, 1, 2, 3),
    9. frozen_stages=1,
    10. norm_cfg=dict(type='BN', requires_grad=True),
    11. norm_eval=True,
    12. style='pytorch',
    13. init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
    14. neck=dict(
    15. type='FPN',
    16. in_channels=[256, 512, 1024, 2048],
    17. out_channels=256,
    18. start_level=1,
    19. add_extra_convs='on_input',
    20. num_outs=5),
    21. bbox_head=dict(
    22. type='AscendRetinaHead',
    23. num_classes=80,
    24. in_channels=256,
    25. stacked_convs=4,
    26. feat_channels=256,
    27. anchor_generator=dict(
    28. type='AnchorGenerator',
    29. octave_base_scale=4,
    30. scales_per_octave=3,
    31. ratios=[0.5, 1.0, 2.0],
    32. strides=[8, 16, 32, 64, 128]),
    33. bbox_coder=dict(
    34. type='DeltaXYWHBBoxCoder',
    35. target_means=[.0, .0, .0, .0],
    36. target_stds=[1.0, 1.0, 1.0, 1.0]),
    37. loss_cls=dict(
    38. type='FocalLoss',
    39. use_sigmoid=True,
    40. gamma=2.0,
    41. alpha=0.25,
    42. loss_weight=1.0),
    43. loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
    44. # model training and testing settings
    45. train_cfg=dict(
    46. assigner=dict(
    47. type='AscendMaxIoUAssigner',
    48. pos_iou_thr=0.5,
    49. neg_iou_thr=0.4,
    50. min_pos_iou=0,
    51. ignore_iof_thr=-1),
    52. allowed_border=-1,
    53. pos_weight=-1,
    54. debug=False),
    55. test_cfg=dict(
    56. nms_pre=1000,
    57. min_bbox_size=0,
    58. score_thr=0.05,
    59. nms=dict(type='nms', iou_threshold=0.5),
    60. max_per_img=100))
    Discard
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    1. # model settings
    2. input_size = 300
    3. model = dict(
    4. type='SingleStageDetector',
    5. backbone=dict(
    6. type='SSDVGG',
    7. depth=16,
    8. with_last_pool=False,
    9. ceil_mode=True,
    10. out_indices=(3, 4),
    11. out_feature_indices=(22, 34),
    12. init_cfg=dict(
    13. type='Pretrained', checkpoint='open-mmlab://vgg16_caffe')),
    14. neck=dict(
    15. type='SSDNeck',
    16. in_channels=(512, 1024),
    17. out_channels=(512, 1024, 512, 256, 256, 256),
    18. level_strides=(2, 2, 1, 1),
    19. level_paddings=(1, 1, 0, 0),
    20. l2_norm_scale=20),
    21. bbox_head=dict(
    22. type='AscendSSDHead',
    23. in_channels=(512, 1024, 512, 256, 256, 256),
    24. num_classes=80,
    25. anchor_generator=dict(
    26. type='SSDAnchorGenerator',
    27. scale_major=False,
    28. input_size=input_size,
    29. basesize_ratio_range=(0.15, 0.9),
    30. strides=[8, 16, 32, 64, 100, 300],
    31. ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]]),
    32. bbox_coder=dict(
    33. type='DeltaXYWHBBoxCoder',
    34. target_means=[.0, .0, .0, .0],
    35. target_stds=[0.1, 0.1, 0.2, 0.2])),
    36. # model training and testing settings
    37. train_cfg=dict(
    38. assigner=dict(
    39. type='AscendMaxIoUAssigner',
    40. pos_iou_thr=0.5,
    41. neg_iou_thr=0.5,
    42. min_pos_iou=0.,
    43. ignore_iof_thr=-1,
    44. gt_max_assign_all=False),
    45. smoothl1_beta=1.,
    46. allowed_border=-1,
    47. pos_weight=-1,
    48. neg_pos_ratio=3,
    49. debug=False),
    50. test_cfg=dict(
    51. nms_pre=1000,
    52. nms=dict(type='nms', iou_threshold=0.45),
    53. min_bbox_size=0,
    54. score_thr=0.02,
    55. max_per_img=200))
    56. cudnn_benchmark = True
    Discard
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    77
    78
    79
    80
    81
    82
    83
    84
    85
    86
    87
    88
    89
    90
    91
    92
    93
    94
    95
    96
    97
    98
    99
    100
    101
    102
    103
    104
    105
    106
    107
    108
    109
    110
    111
    112
    113
    114
    115
    116
    117
    118
    119
    120
    121
    122
    123
    124
    125
    126
    127
    128
    129
    130
    131
    132
    133
    134
    135
    136
    137
    138
    139
    140
    141
    142
    143
    144
    145
    146
    147
    148
    149
    150
    151
    152
    153
    154
    155
    156
    157
    158
    159
    160
    161
    162
    163
    164
    165
    166
    167
    168
    169
    170
    171
    172
    173
    174
    175
    176
    177
    178
    179
    180
    181
    182
    183
    184
    185
    186
    187
    188
    189
    190
    191
    192
    193
    194
    195
    196
    1. # model settings
    2. model = dict(
    3. type='CascadeRCNN',
    4. backbone=dict(
    5. type='ResNet',
    6. depth=50,
    7. num_stages=4,
    8. out_indices=(0, 1, 2, 3),
    9. frozen_stages=1,
    10. norm_cfg=dict(type='BN', requires_grad=True),
    11. norm_eval=True,
    12. style='pytorch',
    13. init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
    14. neck=dict(
    15. type='FPN',
    16. in_channels=[256, 512, 1024, 2048],
    17. out_channels=256,
    18. num_outs=5),
    19. rpn_head=dict(
    20. type='RPNHead',
    21. in_channels=256,
    22. feat_channels=256,
    23. anchor_generator=dict(
    24. type='AnchorGenerator',
    25. scales=[8],
    26. ratios=[0.5, 1.0, 2.0],
    27. strides=[4, 8, 16, 32, 64]),
    28. bbox_coder=dict(
    29. type='DeltaXYWHBBoxCoder',
    30. target_means=[.0, .0, .0, .0],
    31. target_stds=[1.0, 1.0, 1.0, 1.0]),
    32. loss_cls=dict(
    33. type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
    34. loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
    35. roi_head=dict(
    36. type='CascadeRoIHead',
    37. num_stages=3,
    38. stage_loss_weights=[1, 0.5, 0.25],
    39. bbox_roi_extractor=dict(
    40. type='SingleRoIExtractor',
    41. roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
    42. out_channels=256,
    43. featmap_strides=[4, 8, 16, 32]),
    44. bbox_head=[
    45. dict(
    46. type='Shared2FCBBoxHead',
    47. in_channels=256,
    48. fc_out_channels=1024,
    49. roi_feat_size=7,
    50. num_classes=80,
    51. bbox_coder=dict(
    52. type='DeltaXYWHBBoxCoder',
    53. target_means=[0., 0., 0., 0.],
    54. target_stds=[0.1, 0.1, 0.2, 0.2]),
    55. reg_class_agnostic=True,
    56. loss_cls=dict(
    57. type='CrossEntropyLoss',
    58. use_sigmoid=False,
    59. loss_weight=1.0),
    60. loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
    61. loss_weight=1.0)),
    62. dict(
    63. type='Shared2FCBBoxHead',
    64. in_channels=256,
    65. fc_out_channels=1024,
    66. roi_feat_size=7,
    67. num_classes=80,
    68. bbox_coder=dict(
    69. type='DeltaXYWHBBoxCoder',
    70. target_means=[0., 0., 0., 0.],
    71. target_stds=[0.05, 0.05, 0.1, 0.1]),
    72. reg_class_agnostic=True,
    73. loss_cls=dict(
    74. type='CrossEntropyLoss',
    75. use_sigmoid=False,
    76. loss_weight=1.0),
    77. loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
    78. loss_weight=1.0)),
    79. dict(
    80. type='Shared2FCBBoxHead',
    81. in_channels=256,
    82. fc_out_channels=1024,
    83. roi_feat_size=7,
    84. num_classes=80,
    85. bbox_coder=dict(
    86. type='DeltaXYWHBBoxCoder',
    87. target_means=[0., 0., 0., 0.],
    88. target_stds=[0.033, 0.033, 0.067, 0.067]),
    89. reg_class_agnostic=True,
    90. loss_cls=dict(
    91. type='CrossEntropyLoss',
    92. use_sigmoid=False,
    93. loss_weight=1.0),
    94. loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
    95. ],
    96. mask_roi_extractor=dict(
    97. type='SingleRoIExtractor',
    98. roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
    99. out_channels=256,
    100. featmap_strides=[4, 8, 16, 32]),
    101. mask_head=dict(
    102. type='FCNMaskHead',
    103. num_convs=4,
    104. in_channels=256,
    105. conv_out_channels=256,
    106. num_classes=80,
    107. loss_mask=dict(
    108. type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))),
    109. # model training and testing settings
    110. train_cfg=dict(
    111. rpn=dict(
    112. assigner=dict(
    113. type='MaxIoUAssigner',
    114. pos_iou_thr=0.7,
    115. neg_iou_thr=0.3,
    116. min_pos_iou=0.3,
    117. match_low_quality=True,
    118. ignore_iof_thr=-1),
    119. sampler=dict(
    120. type='RandomSampler',
    121. num=256,
    122. pos_fraction=0.5,
    123. neg_pos_ub=-1,
    124. add_gt_as_proposals=False),
    125. allowed_border=0,
    126. pos_weight=-1,
    127. debug=False),
    128. rpn_proposal=dict(
    129. nms_pre=2000,
    130. max_per_img=2000,
    131. nms=dict(type='nms', iou_threshold=0.7),
    132. min_bbox_size=0),
    133. rcnn=[
    134. dict(
    135. assigner=dict(
    136. type='MaxIoUAssigner',
    137. pos_iou_thr=0.5,
    138. neg_iou_thr=0.5,
    139. min_pos_iou=0.5,
    140. match_low_quality=False,
    141. ignore_iof_thr=-1),
    142. sampler=dict(
    143. type='RandomSampler',
    144. num=512,
    145. pos_fraction=0.25,
    146. neg_pos_ub=-1,
    147. add_gt_as_proposals=True),
    148. mask_size=28,
    149. pos_weight=-1,
    150. debug=False),
    151. dict(
    152. assigner=dict(
    153. type='MaxIoUAssigner',
    154. pos_iou_thr=0.6,
    155. neg_iou_thr=0.6,
    156. min_pos_iou=0.6,
    157. match_low_quality=False,
    158. ignore_iof_thr=-1),
    159. sampler=dict(
    160. type='RandomSampler',
    161. num=512,
    162. pos_fraction=0.25,
    163. neg_pos_ub=-1,
    164. add_gt_as_proposals=True),
    165. mask_size=28,
    166. pos_weight=-1,
    167. debug=False),
    168. dict(
    169. assigner=dict(
    170. type='MaxIoUAssigner',
    171. pos_iou_thr=0.7,
    172. neg_iou_thr=0.7,
    173. min_pos_iou=0.7,
    174. match_low_quality=False,
    175. ignore_iof_thr=-1),
    176. sampler=dict(
    177. type='RandomSampler',
    178. num=512,
    179. pos_fraction=0.25,
    180. neg_pos_ub=-1,
    181. add_gt_as_proposals=True),
    182. mask_size=28,
    183. pos_weight=-1,
    184. debug=False)
    185. ]),
    186. test_cfg=dict(
    187. rpn=dict(
    188. nms_pre=1000,
    189. max_per_img=1000,
    190. nms=dict(type='nms', iou_threshold=0.7),
    191. min_bbox_size=0),
    192. rcnn=dict(
    193. score_thr=0.05,
    194. nms=dict(type='nms', iou_threshold=0.5),
    195. max_per_img=100,
    196. mask_thr_binary=0.5)))
    Discard
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    77
    78
    79
    80
    81
    82
    83
    84
    85
    86
    87
    88
    89
    90
    91
    92
    93
    94
    95
    96
    97
    98
    99
    100
    101
    102
    103
    104
    105
    106
    107
    108
    109
    110
    111
    112
    113
    114
    115
    116
    117
    118
    119
    120
    121
    122
    123
    124
    125
    126
    127
    128
    129
    130
    131
    132
    133
    134
    135
    136
    137
    138
    139
    140
    141
    142
    143
    144
    145
    146
    147
    148
    149
    150
    151
    152
    153
    154
    155
    156
    157
    158
    159
    160
    161
    162
    163
    164
    165
    166
    167
    168
    169
    170
    171
    172
    173
    174
    175
    176
    177
    178
    179
    1. # model settings
    2. model = dict(
    3. type='CascadeRCNN',
    4. backbone=dict(
    5. type='ResNet',
    6. depth=50,
    7. num_stages=4,
    8. out_indices=(0, 1, 2, 3),
    9. frozen_stages=1,
    10. norm_cfg=dict(type='BN', requires_grad=True),
    11. norm_eval=True,
    12. style='pytorch',
    13. init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
    14. neck=dict(
    15. type='FPN',
    16. in_channels=[256, 512, 1024, 2048],
    17. out_channels=256,
    18. num_outs=5),
    19. rpn_head=dict(
    20. type='RPNHead',
    21. in_channels=256,
    22. feat_channels=256,
    23. anchor_generator=dict(
    24. type='AnchorGenerator',
    25. scales=[8],
    26. ratios=[0.5, 1.0, 2.0],
    27. strides=[4, 8, 16, 32, 64]),
    28. bbox_coder=dict(
    29. type='DeltaXYWHBBoxCoder',
    30. target_means=[.0, .0, .0, .0],
    31. target_stds=[1.0, 1.0, 1.0, 1.0]),
    32. loss_cls=dict(
    33. type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
    34. loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
    35. roi_head=dict(
    36. type='CascadeRoIHead',
    37. num_stages=3,
    38. stage_loss_weights=[1, 0.5, 0.25],
    39. bbox_roi_extractor=dict(
    40. type='SingleRoIExtractor',
    41. roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
    42. out_channels=256,
    43. featmap_strides=[4, 8, 16, 32]),
    44. bbox_head=[
    45. dict(
    46. type='Shared2FCBBoxHead',
    47. in_channels=256,
    48. fc_out_channels=1024,
    49. roi_feat_size=7,
    50. num_classes=80,
    51. bbox_coder=dict(
    52. type='DeltaXYWHBBoxCoder',
    53. target_means=[0., 0., 0., 0.],
    54. target_stds=[0.1, 0.1, 0.2, 0.2]),
    55. reg_class_agnostic=True,
    56. loss_cls=dict(
    57. type='CrossEntropyLoss',
    58. use_sigmoid=False,
    59. loss_weight=1.0),
    60. loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
    61. loss_weight=1.0)),
    62. dict(
    63. type='Shared2FCBBoxHead',
    64. in_channels=256,
    65. fc_out_channels=1024,
    66. roi_feat_size=7,
    67. num_classes=80,
    68. bbox_coder=dict(
    69. type='DeltaXYWHBBoxCoder',
    70. target_means=[0., 0., 0., 0.],
    71. target_stds=[0.05, 0.05, 0.1, 0.1]),
    72. reg_class_agnostic=True,
    73. loss_cls=dict(
    74. type='CrossEntropyLoss',
    75. use_sigmoid=False,
    76. loss_weight=1.0),
    77. loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
    78. loss_weight=1.0)),
    79. dict(
    80. type='Shared2FCBBoxHead',
    81. in_channels=256,
    82. fc_out_channels=1024,
    83. roi_feat_size=7,
    84. num_classes=80,
    85. bbox_coder=dict(
    86. type='DeltaXYWHBBoxCoder',
    87. target_means=[0., 0., 0., 0.],
    88. target_stds=[0.033, 0.033, 0.067, 0.067]),
    89. reg_class_agnostic=True,
    90. loss_cls=dict(
    91. type='CrossEntropyLoss',
    92. use_sigmoid=False,
    93. loss_weight=1.0),
    94. loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
    95. ]),
    96. # model training and testing settings
    97. train_cfg=dict(
    98. rpn=dict(
    99. assigner=dict(
    100. type='MaxIoUAssigner',
    101. pos_iou_thr=0.7,
    102. neg_iou_thr=0.3,
    103. min_pos_iou=0.3,
    104. match_low_quality=True,
    105. ignore_iof_thr=-1),
    106. sampler=dict(
    107. type='RandomSampler',
    108. num=256,
    109. pos_fraction=0.5,
    110. neg_pos_ub=-1,
    111. add_gt_as_proposals=False),
    112. allowed_border=0,
    113. pos_weight=-1,
    114. debug=False),
    115. rpn_proposal=dict(
    116. nms_pre=2000,
    117. max_per_img=2000,
    118. nms=dict(type='nms', iou_threshold=0.7),
    119. min_bbox_size=0),
    120. rcnn=[
    121. dict(
    122. assigner=dict(
    123. type='MaxIoUAssigner',
    124. pos_iou_thr=0.5,
    125. neg_iou_thr=0.5,
    126. min_pos_iou=0.5,
    127. match_low_quality=False,
    128. ignore_iof_thr=-1),
    129. sampler=dict(
    130. type='RandomSampler',
    131. num=512,
    132. pos_fraction=0.25,
    133. neg_pos_ub=-1,
    134. add_gt_as_proposals=True),
    135. pos_weight=-1,
    136. debug=False),
    137. dict(
    138. assigner=dict(
    139. type='MaxIoUAssigner',
    140. pos_iou_thr=0.6,
    141. neg_iou_thr=0.6,
    142. min_pos_iou=0.6,
    143. match_low_quality=False,
    144. ignore_iof_thr=-1),
    145. sampler=dict(
    146. type='RandomSampler',
    147. num=512,
    148. pos_fraction=0.25,
    149. neg_pos_ub=-1,
    150. add_gt_as_proposals=True),
    151. pos_weight=-1,
    152. debug=False),
    153. dict(
    154. assigner=dict(
    155. type='MaxIoUAssigner',
    156. pos_iou_thr=0.7,
    157. neg_iou_thr=0.7,
    158. min_pos_iou=0.7,
    159. match_low_quality=False,
    160. ignore_iof_thr=-1),
    161. sampler=dict(
    162. type='RandomSampler',
    163. num=512,
    164. pos_fraction=0.25,
    165. neg_pos_ub=-1,
    166. add_gt_as_proposals=True),
    167. pos_weight=-1,
    168. debug=False)
    169. ]),
    170. test_cfg=dict(
    171. rpn=dict(
    172. nms_pre=1000,
    173. max_per_img=1000,
    174. nms=dict(type='nms', iou_threshold=0.7),
    175. min_bbox_size=0),
    176. rcnn=dict(
    177. score_thr=0.05,
    178. nms=dict(type='nms', iou_threshold=0.5),
    179. max_per_img=100)))
    Discard
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    1. # model settings
    2. model = dict(
    3. type='FastRCNN',
    4. backbone=dict(
    5. type='ResNet',
    6. depth=50,
    7. num_stages=4,
    8. out_indices=(0, 1, 2, 3),
    9. frozen_stages=1,
    10. norm_cfg=dict(type='BN', requires_grad=True),
    11. norm_eval=True,
    12. style='pytorch',
    13. init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
    14. neck=dict(
    15. type='FPN',
    16. in_channels=[256, 512, 1024, 2048],
    17. out_channels=256,
    18. num_outs=5),
    19. roi_head=dict(
    20. type='StandardRoIHead',
    21. bbox_roi_extractor=dict(
    22. type='SingleRoIExtractor',
    23. roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
    24. out_channels=256,
    25. featmap_strides=[4, 8, 16, 32]),
    26. bbox_head=dict(
    27. type='Shared2FCBBoxHead',
    28. in_channels=256,
    29. fc_out_channels=1024,
    30. roi_feat_size=7,
    31. num_classes=80,
    32. bbox_coder=dict(
    33. type='DeltaXYWHBBoxCoder',
    34. target_means=[0., 0., 0., 0.],
    35. target_stds=[0.1, 0.1, 0.2, 0.2]),
    36. reg_class_agnostic=False,
    37. loss_cls=dict(
    38. type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
    39. loss_bbox=dict(type='L1Loss', loss_weight=1.0))),
    40. # model training and testing settings
    41. train_cfg=dict(
    42. rcnn=dict(
    43. assigner=dict(
    44. type='MaxIoUAssigner',
    45. pos_iou_thr=0.5,
    46. neg_iou_thr=0.5,
    47. min_pos_iou=0.5,
    48. match_low_quality=False,
    49. ignore_iof_thr=-1),
    50. sampler=dict(
    51. type='RandomSampler',
    52. num=512,
    53. pos_fraction=0.25,
    54. neg_pos_ub=-1,
    55. add_gt_as_proposals=True),
    56. pos_weight=-1,
    57. debug=False)),
    58. test_cfg=dict(
    59. rcnn=dict(
    60. score_thr=0.05,
    61. nms=dict(type='nms', iou_threshold=0.5),
    62. max_per_img=100)))
    Discard
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    77
    78
    79
    80
    81
    82
    83
    84
    85
    86
    87
    88
    89
    90
    91
    92
    93
    94
    95
    96
    97
    98
    99
    100
    101
    102
    103
    104
    105
    106
    107
    108
    109
    110
    111
    112
    113
    114
    115
    116
    117
    1. # model settings
    2. norm_cfg = dict(type='BN', requires_grad=False)
    3. model = dict(
    4. type='FasterRCNN',
    5. backbone=dict(
    6. type='ResNet',
    7. depth=50,
    8. num_stages=3,
    9. strides=(1, 2, 2),
    10. dilations=(1, 1, 1),
    11. out_indices=(2, ),
    12. frozen_stages=1,
    13. norm_cfg=norm_cfg,
    14. norm_eval=True,
    15. style='caffe',
    16. init_cfg=dict(
    17. type='Pretrained',
    18. checkpoint='open-mmlab://detectron2/resnet50_caffe')),
    19. rpn_head=dict(
    20. type='RPNHead',
    21. in_channels=1024,
    22. feat_channels=1024,
    23. anchor_generator=dict(
    24. type='AnchorGenerator',
    25. scales=[2, 4, 8, 16, 32],
    26. ratios=[0.5, 1.0, 2.0],
    27. strides=[16]),
    28. bbox_coder=dict(
    29. type='DeltaXYWHBBoxCoder',
    30. target_means=[.0, .0, .0, .0],
    31. target_stds=[1.0, 1.0, 1.0, 1.0]),
    32. loss_cls=dict(
    33. type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
    34. loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
    35. roi_head=dict(
    36. type='StandardRoIHead',
    37. shared_head=dict(
    38. type='ResLayer',
    39. depth=50,
    40. stage=3,
    41. stride=2,
    42. dilation=1,
    43. style='caffe',
    44. norm_cfg=norm_cfg,
    45. norm_eval=True,
    46. init_cfg=dict(
    47. type='Pretrained',
    48. checkpoint='open-mmlab://detectron2/resnet50_caffe')),
    49. bbox_roi_extractor=dict(
    50. type='SingleRoIExtractor',
    51. roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
    52. out_channels=1024,
    53. featmap_strides=[16]),
    54. bbox_head=dict(
    55. type='BBoxHead',
    56. with_avg_pool=True,
    57. roi_feat_size=7,
    58. in_channels=2048,
    59. num_classes=80,
    60. bbox_coder=dict(
    61. type='DeltaXYWHBBoxCoder',
    62. target_means=[0., 0., 0., 0.],
    63. target_stds=[0.1, 0.1, 0.2, 0.2]),
    64. reg_class_agnostic=False,
    65. loss_cls=dict(
    66. type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
    67. loss_bbox=dict(type='L1Loss', loss_weight=1.0))),
    68. # model training and testing settings
    69. train_cfg=dict(
    70. rpn=dict(
    71. assigner=dict(
    72. type='MaxIoUAssigner',
    73. pos_iou_thr=0.7,
    74. neg_iou_thr=0.3,
    75. min_pos_iou=0.3,
    76. match_low_quality=True,
    77. ignore_iof_thr=-1),
    78. sampler=dict(
    79. type='RandomSampler',
    80. num=256,
    81. pos_fraction=0.5,
    82. neg_pos_ub=-1,
    83. add_gt_as_proposals=False),
    84. allowed_border=-1,
    85. pos_weight=-1,
    86. debug=False),
    87. rpn_proposal=dict(
    88. nms_pre=12000,
    89. max_per_img=2000,
    90. nms=dict(type='nms', iou_threshold=0.7),
    91. min_bbox_size=0),
    92. rcnn=dict(
    93. assigner=dict(
    94. type='MaxIoUAssigner',
    95. pos_iou_thr=0.5,
    96. neg_iou_thr=0.5,
    97. min_pos_iou=0.5,
    98. match_low_quality=False,
    99. ignore_iof_thr=-1),
    100. sampler=dict(
    101. type='RandomSampler',
    102. num=512,
    103. pos_fraction=0.25,
    104. neg_pos_ub=-1,
    105. add_gt_as_proposals=True),
    106. pos_weight=-1,
    107. debug=False)),
    108. test_cfg=dict(
    109. rpn=dict(
    110. nms_pre=6000,
    111. max_per_img=1000,
    112. nms=dict(type='nms', iou_threshold=0.7),
    113. min_bbox_size=0),
    114. rcnn=dict(
    115. score_thr=0.05,
    116. nms=dict(type='nms', iou_threshold=0.5),
    117. max_per_img=100)))
    Discard
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    77
    78
    79
    80
    81
    82
    83
    84
    85
    86
    87
    88
    89
    90
    91
    92
    93
    94
    95
    96
    97
    98
    99
    100
    101
    102
    103
    104
    105
    1. # model settings
    2. norm_cfg = dict(type='BN', requires_grad=False)
    3. model = dict(
    4. type='FasterRCNN',
    5. backbone=dict(
    6. type='ResNet',
    7. depth=50,
    8. num_stages=4,
    9. strides=(1, 2, 2, 1),
    10. dilations=(1, 1, 1, 2),
    11. out_indices=(3, ),
    12. frozen_stages=1,
    13. norm_cfg=norm_cfg,
    14. norm_eval=True,
    15. style='caffe',
    16. init_cfg=dict(
    17. type='Pretrained',
    18. checkpoint='open-mmlab://detectron2/resnet50_caffe')),
    19. rpn_head=dict(
    20. type='RPNHead',
    21. in_channels=2048,
    22. feat_channels=2048,
    23. anchor_generator=dict(
    24. type='AnchorGenerator',
    25. scales=[2, 4, 8, 16, 32],
    26. ratios=[0.5, 1.0, 2.0],
    27. strides=[16]),
    28. bbox_coder=dict(
    29. type='DeltaXYWHBBoxCoder',
    30. target_means=[.0, .0, .0, .0],
    31. target_stds=[1.0, 1.0, 1.0, 1.0]),
    32. loss_cls=dict(
    33. type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
    34. loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
    35. roi_head=dict(
    36. type='StandardRoIHead',
    37. bbox_roi_extractor=dict(
    38. type='SingleRoIExtractor',
    39. roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
    40. out_channels=2048,
    41. featmap_strides=[16]),
    42. bbox_head=dict(
    43. type='Shared2FCBBoxHead',
    44. in_channels=2048,
    45. fc_out_channels=1024,
    46. roi_feat_size=7,
    47. num_classes=80,
    48. bbox_coder=dict(
    49. type='DeltaXYWHBBoxCoder',
    50. target_means=[0., 0., 0., 0.],
    51. target_stds=[0.1, 0.1, 0.2, 0.2]),
    52. reg_class_agnostic=False,
    53. loss_cls=dict(
    54. type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
    55. loss_bbox=dict(type='L1Loss', loss_weight=1.0))),
    56. # model training and testing settings
    57. train_cfg=dict(
    58. rpn=dict(
    59. assigner=dict(
    60. type='MaxIoUAssigner',
    61. pos_iou_thr=0.7,
    62. neg_iou_thr=0.3,
    63. min_pos_iou=0.3,
    64. match_low_quality=True,
    65. ignore_iof_thr=-1),
    66. sampler=dict(
    67. type='RandomSampler',
    68. num=256,
    69. pos_fraction=0.5,
    70. neg_pos_ub=-1,
    71. add_gt_as_proposals=False),
    72. allowed_border=0,
    73. pos_weight=-1,
    74. debug=False),
    75. rpn_proposal=dict(
    76. nms_pre=12000,
    77. max_per_img=2000,
    78. nms=dict(type='nms', iou_threshold=0.7),
    79. min_bbox_size=0),
    80. rcnn=dict(
    81. assigner=dict(
    82. type='MaxIoUAssigner',
    83. pos_iou_thr=0.5,
    84. neg_iou_thr=0.5,
    85. min_pos_iou=0.5,
    86. match_low_quality=False,
    87. ignore_iof_thr=-1),
    88. sampler=dict(
    89. type='RandomSampler',
    90. num=512,
    91. pos_fraction=0.25,
    92. neg_pos_ub=-1,
    93. add_gt_as_proposals=True),
    94. pos_weight=-1,
    95. debug=False)),
    96. test_cfg=dict(
    97. rpn=dict(
    98. nms=dict(type='nms', iou_threshold=0.7),
    99. nms_pre=6000,
    100. max_per_img=1000,
    101. min_bbox_size=0),
    102. rcnn=dict(
    103. score_thr=0.05,
    104. nms=dict(type='nms', iou_threshold=0.5),
    105. max_per_img=100)))
    Discard
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    77
    78
    79
    80
    81
    82
    83
    84
    85
    86
    87
    88
    89
    90
    91
    92
    93
    94
    95
    96
    97
    98
    99
    100
    101
    102
    103
    104
    105
    106
    107
    108
    1. # model settings
    2. model = dict(
    3. type='FasterRCNN',
    4. backbone=dict(
    5. type='ResNet',
    6. depth=50,
    7. num_stages=4,
    8. out_indices=(0, 1, 2, 3),
    9. frozen_stages=1,
    10. norm_cfg=dict(type='BN', requires_grad=True),
    11. norm_eval=True,
    12. style='pytorch',
    13. init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
    14. neck=dict(
    15. type='FPN',
    16. in_channels=[256, 512, 1024, 2048],
    17. out_channels=256,
    18. num_outs=5),
    19. rpn_head=dict(
    20. type='RPNHead',
    21. in_channels=256,
    22. feat_channels=256,
    23. anchor_generator=dict(
    24. type='AnchorGenerator',
    25. scales=[8],
    26. ratios=[0.5, 1.0, 2.0],
    27. strides=[4, 8, 16, 32, 64]),
    28. bbox_coder=dict(
    29. type='DeltaXYWHBBoxCoder',
    30. target_means=[.0, .0, .0, .0],
    31. target_stds=[1.0, 1.0, 1.0, 1.0]),
    32. loss_cls=dict(
    33. type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
    34. loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
    35. roi_head=dict(
    36. type='StandardRoIHead',
    37. bbox_roi_extractor=dict(
    38. type='SingleRoIExtractor',
    39. roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
    40. out_channels=256,
    41. featmap_strides=[4, 8, 16, 32]),
    42. bbox_head=dict(
    43. type='Shared2FCBBoxHead',
    44. in_channels=256,
    45. fc_out_channels=1024,
    46. roi_feat_size=7,
    47. num_classes=80,
    48. bbox_coder=dict(
    49. type='DeltaXYWHBBoxCoder',
    50. target_means=[0., 0., 0., 0.],
    51. target_stds=[0.1, 0.1, 0.2, 0.2]),
    52. reg_class_agnostic=False,
    53. loss_cls=dict(
    54. type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
    55. loss_bbox=dict(type='L1Loss', loss_weight=1.0))),
    56. # model training and testing settings
    57. train_cfg=dict(
    58. rpn=dict(
    59. assigner=dict(
    60. type='MaxIoUAssigner',
    61. pos_iou_thr=0.7,
    62. neg_iou_thr=0.3,
    63. min_pos_iou=0.3,
    64. match_low_quality=True,
    65. ignore_iof_thr=-1),
    66. sampler=dict(
    67. type='RandomSampler',
    68. num=256,
    69. pos_fraction=0.5,
    70. neg_pos_ub=-1,
    71. add_gt_as_proposals=False),
    72. allowed_border=-1,
    73. pos_weight=-1,
    74. debug=False),
    75. rpn_proposal=dict(
    76. nms_pre=2000,
    77. max_per_img=1000,
    78. nms=dict(type='nms', iou_threshold=0.7),
    79. min_bbox_size=0),
    80. rcnn=dict(
    81. assigner=dict(
    82. type='MaxIoUAssigner',
    83. pos_iou_thr=0.5,
    84. neg_iou_thr=0.5,
    85. min_pos_iou=0.5,
    86. match_low_quality=False,
    87. ignore_iof_thr=-1),
    88. sampler=dict(
    89. type='RandomSampler',
    90. num=512,
    91. pos_fraction=0.25,
    92. neg_pos_ub=-1,
    93. add_gt_as_proposals=True),
    94. pos_weight=-1,
    95. debug=False)),
    96. test_cfg=dict(
    97. rpn=dict(
    98. nms_pre=1000,
    99. max_per_img=1000,
    100. nms=dict(type='nms', iou_threshold=0.7),
    101. min_bbox_size=0),
    102. rcnn=dict(
    103. score_thr=0.05,
    104. nms=dict(type='nms', iou_threshold=0.5),
    105. max_per_img=100)
    106. # soft-nms is also supported for rcnn testing
    107. # e.g., nms=dict(type='soft_nms', iou_threshold=0.5, min_score=0.05)
    108. ))
    Discard
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    77
    78
    79
    80
    81
    82
    83
    84
    85
    86
    87
    88
    89
    90
    91
    92
    93
    94
    95
    96
    97
    98
    99
    100
    101
    102
    103
    104
    105
    106
    107
    108
    109
    110
    111
    112
    113
    114
    115
    116
    117
    118
    119
    120
    121
    122
    123
    124
    125
    1. # model settings
    2. norm_cfg = dict(type='BN', requires_grad=False)
    3. model = dict(
    4. type='MaskRCNN',
    5. backbone=dict(
    6. type='ResNet',
    7. depth=50,
    8. num_stages=3,
    9. strides=(1, 2, 2),
    10. dilations=(1, 1, 1),
    11. out_indices=(2, ),
    12. frozen_stages=1,
    13. norm_cfg=norm_cfg,
    14. norm_eval=True,
    15. style='caffe',
    16. init_cfg=dict(
    17. type='Pretrained',
    18. checkpoint='open-mmlab://detectron2/resnet50_caffe')),
    19. rpn_head=dict(
    20. type='RPNHead',
    21. in_channels=1024,
    22. feat_channels=1024,
    23. anchor_generator=dict(
    24. type='AnchorGenerator',
    25. scales=[2, 4, 8, 16, 32],
    26. ratios=[0.5, 1.0, 2.0],
    27. strides=[16]),
    28. bbox_coder=dict(
    29. type='DeltaXYWHBBoxCoder',
    30. target_means=[.0, .0, .0, .0],
    31. target_stds=[1.0, 1.0, 1.0, 1.0]),
    32. loss_cls=dict(
    33. type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
    34. loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
    35. roi_head=dict(
    36. type='StandardRoIHead',
    37. shared_head=dict(
    38. type='ResLayer',
    39. depth=50,
    40. stage=3,
    41. stride=2,
    42. dilation=1,
    43. style='caffe',
    44. norm_cfg=norm_cfg,
    45. norm_eval=True),
    46. bbox_roi_extractor=dict(
    47. type='SingleRoIExtractor',
    48. roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
    49. out_channels=1024,
    50. featmap_strides=[16]),
    51. bbox_head=dict(
    52. type='BBoxHead',
    53. with_avg_pool=True,
    54. roi_feat_size=7,
    55. in_channels=2048,
    56. num_classes=80,
    57. bbox_coder=dict(
    58. type='DeltaXYWHBBoxCoder',
    59. target_means=[0., 0., 0., 0.],
    60. target_stds=[0.1, 0.1, 0.2, 0.2]),
    61. reg_class_agnostic=False,
    62. loss_cls=dict(
    63. type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
    64. loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
    65. mask_roi_extractor=None,
    66. mask_head=dict(
    67. type='FCNMaskHead',
    68. num_convs=0,
    69. in_channels=2048,
    70. conv_out_channels=256,
    71. num_classes=80,
    72. loss_mask=dict(
    73. type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))),
    74. # model training and testing settings
    75. train_cfg=dict(
    76. rpn=dict(
    77. assigner=dict(
    78. type='MaxIoUAssigner',
    79. pos_iou_thr=0.7,
    80. neg_iou_thr=0.3,
    81. min_pos_iou=0.3,
    82. match_low_quality=True,
    83. ignore_iof_thr=-1),
    84. sampler=dict(
    85. type='RandomSampler',
    86. num=256,
    87. pos_fraction=0.5,
    88. neg_pos_ub=-1,
    89. add_gt_as_proposals=False),
    90. allowed_border=0,
    91. pos_weight=-1,
    92. debug=False),
    93. rpn_proposal=dict(
    94. nms_pre=12000,
    95. max_per_img=2000,
    96. nms=dict(type='nms', iou_threshold=0.7),
    97. min_bbox_size=0),
    98. rcnn=dict(
    99. assigner=dict(
    100. type='MaxIoUAssigner',
    101. pos_iou_thr=0.5,
    102. neg_iou_thr=0.5,
    103. min_pos_iou=0.5,
    104. match_low_quality=False,
    105. ignore_iof_thr=-1),
    106. sampler=dict(
    107. type='RandomSampler',
    108. num=512,
    109. pos_fraction=0.25,
    110. neg_pos_ub=-1,
    111. add_gt_as_proposals=True),
    112. mask_size=14,
    113. pos_weight=-1,
    114. debug=False)),
    115. test_cfg=dict(
    116. rpn=dict(
    117. nms_pre=6000,
    118. nms=dict(type='nms', iou_threshold=0.7),
    119. max_per_img=1000,
    120. min_bbox_size=0),
    121. rcnn=dict(
    122. score_thr=0.05,
    123. nms=dict(type='nms', iou_threshold=0.5),
    124. max_per_img=100,
    125. mask_thr_binary=0.5)))
    Discard
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    77
    78
    79
    80
    81
    82
    83
    84
    85
    86
    87
    88
    89
    90
    91
    92
    93
    94
    95
    96
    97
    98
    99
    100
    101
    102
    103
    104
    105
    106
    107
    108
    109
    110
    111
    112
    113
    114
    115
    116
    117
    118
    119
    120
    1. # model settings
    2. model = dict(
    3. type='MaskRCNN',
    4. backbone=dict(
    5. type='ResNet',
    6. depth=50,
    7. num_stages=4,
    8. out_indices=(0, 1, 2, 3),
    9. frozen_stages=1,
    10. norm_cfg=dict(type='BN', requires_grad=True),
    11. norm_eval=True,
    12. style='pytorch',
    13. init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
    14. neck=dict(
    15. type='FPN',
    16. in_channels=[256, 512, 1024, 2048],
    17. out_channels=256,
    18. num_outs=5),
    19. rpn_head=dict(
    20. type='RPNHead',
    21. in_channels=256,
    22. feat_channels=256,
    23. anchor_generator=dict(
    24. type='AnchorGenerator',
    25. scales=[8],
    26. ratios=[0.5, 1.0, 2.0],
    27. strides=[4, 8, 16, 32, 64]),
    28. bbox_coder=dict(
    29. type='DeltaXYWHBBoxCoder',
    30. target_means=[.0, .0, .0, .0],
    31. target_stds=[1.0, 1.0, 1.0, 1.0]),
    32. loss_cls=dict(
    33. type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
    34. loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
    35. roi_head=dict(
    36. type='StandardRoIHead',
    37. bbox_roi_extractor=dict(
    38. type='SingleRoIExtractor',
    39. roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
    40. out_channels=256,
    41. featmap_strides=[4, 8, 16, 32]),
    42. bbox_head=dict(
    43. type='Shared2FCBBoxHead',
    44. in_channels=256,
    45. fc_out_channels=1024,
    46. roi_feat_size=7,
    47. num_classes=80,
    48. bbox_coder=dict(
    49. type='DeltaXYWHBBoxCoder',
    50. target_means=[0., 0., 0., 0.],
    51. target_stds=[0.1, 0.1, 0.2, 0.2]),
    52. reg_class_agnostic=False,
    53. loss_cls=dict(
    54. type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
    55. loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
    56. mask_roi_extractor=dict(
    57. type='SingleRoIExtractor',
    58. roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
    59. out_channels=256,
    60. featmap_strides=[4, 8, 16, 32]),
    61. mask_head=dict(
    62. type='FCNMaskHead',
    63. num_convs=4,
    64. in_channels=256,
    65. conv_out_channels=256,
    66. num_classes=80,
    67. loss_mask=dict(
    68. type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))),
    69. # model training and testing settings
    70. train_cfg=dict(
    71. rpn=dict(
    72. assigner=dict(
    73. type='MaxIoUAssigner',
    74. pos_iou_thr=0.7,
    75. neg_iou_thr=0.3,
    76. min_pos_iou=0.3,
    77. match_low_quality=True,
    78. ignore_iof_thr=-1),
    79. sampler=dict(
    80. type='RandomSampler',
    81. num=256,
    82. pos_fraction=0.5,
    83. neg_pos_ub=-1,
    84. add_gt_as_proposals=False),
    85. allowed_border=-1,
    86. pos_weight=-1,
    87. debug=False),
    88. rpn_proposal=dict(
    89. nms_pre=2000,
    90. max_per_img=1000,
    91. nms=dict(type='nms', iou_threshold=0.7),
    92. min_bbox_size=0),
    93. rcnn=dict(
    94. assigner=dict(
    95. type='MaxIoUAssigner',
    96. pos_iou_thr=0.5,
    97. neg_iou_thr=0.5,
    98. min_pos_iou=0.5,
    99. match_low_quality=True,
    100. ignore_iof_thr=-1),
    101. sampler=dict(
    102. type='RandomSampler',
    103. num=512,
    104. pos_fraction=0.25,
    105. neg_pos_ub=-1,
    106. add_gt_as_proposals=True),
    107. mask_size=28,
    108. pos_weight=-1,
    109. debug=False)),
    110. test_cfg=dict(
    111. rpn=dict(
    112. nms_pre=1000,
    113. max_per_img=1000,
    114. nms=dict(type='nms', iou_threshold=0.7),
    115. min_bbox_size=0),
    116. rcnn=dict(
    117. score_thr=0.05,
    118. nms=dict(type='nms', iou_threshold=0.5),
    119. max_per_img=100,
    120. mask_thr_binary=0.5)))
    Discard
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    1. # model settings
    2. model = dict(
    3. type='RetinaNet',
    4. backbone=dict(
    5. type='ResNet',
    6. depth=50,
    7. num_stages=4,
    8. out_indices=(0, 1, 2, 3),
    9. frozen_stages=1,
    10. norm_cfg=dict(type='BN', requires_grad=True),
    11. norm_eval=True,
    12. style='pytorch',
    13. init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
    14. neck=dict(
    15. type='FPN',
    16. in_channels=[256, 512, 1024, 2048],
    17. out_channels=256,
    18. start_level=1,
    19. add_extra_convs='on_input',
    20. num_outs=5),
    21. bbox_head=dict(
    22. type='RetinaHead',
    23. num_classes=80,
    24. in_channels=256,
    25. stacked_convs=4,
    26. feat_channels=256,
    27. anchor_generator=dict(
    28. type='AnchorGenerator',
    29. octave_base_scale=4,
    30. scales_per_octave=3,
    31. ratios=[0.5, 1.0, 2.0],
    32. strides=[8, 16, 32, 64, 128]),
    33. bbox_coder=dict(
    34. type='DeltaXYWHBBoxCoder',
    35. target_means=[.0, .0, .0, .0],
    36. target_stds=[1.0, 1.0, 1.0, 1.0]),
    37. loss_cls=dict(
    38. type='FocalLoss',
    39. use_sigmoid=True,
    40. gamma=2.0,
    41. alpha=0.25,
    42. loss_weight=1.0),
    43. loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
    44. # model training and testing settings
    45. train_cfg=dict(
    46. assigner=dict(
    47. type='MaxIoUAssigner',
    48. pos_iou_thr=0.5,
    49. neg_iou_thr=0.4,
    50. min_pos_iou=0,
    51. ignore_iof_thr=-1),
    52. allowed_border=-1,
    53. pos_weight=-1,
    54. debug=False),
    55. test_cfg=dict(
    56. nms_pre=1000,
    57. min_bbox_size=0,
    58. score_thr=0.05,
    59. nms=dict(type='nms', iou_threshold=0.5),
    60. max_per_img=100))
    Discard
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    1. # model settings
    2. model = dict(
    3. type='RPN',
    4. backbone=dict(
    5. type='ResNet',
    6. depth=50,
    7. num_stages=3,
    8. strides=(1, 2, 2),
    9. dilations=(1, 1, 1),
    10. out_indices=(2, ),
    11. frozen_stages=1,
    12. norm_cfg=dict(type='BN', requires_grad=False),
    13. norm_eval=True,
    14. style='caffe',
    15. init_cfg=dict(
    16. type='Pretrained',
    17. checkpoint='open-mmlab://detectron2/resnet50_caffe')),
    18. neck=None,
    19. rpn_head=dict(
    20. type='RPNHead',
    21. in_channels=1024,
    22. feat_channels=1024,
    23. anchor_generator=dict(
    24. type='AnchorGenerator',
    25. scales=[2, 4, 8, 16, 32],
    26. ratios=[0.5, 1.0, 2.0],
    27. strides=[16]),
    28. bbox_coder=dict(
    29. type='DeltaXYWHBBoxCoder',
    30. target_means=[.0, .0, .0, .0],
    31. target_stds=[1.0, 1.0, 1.0, 1.0]),
    32. loss_cls=dict(
    33. type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
    34. loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
    35. # model training and testing settings
    36. train_cfg=dict(
    37. rpn=dict(
    38. assigner=dict(
    39. type='MaxIoUAssigner',
    40. pos_iou_thr=0.7,
    41. neg_iou_thr=0.3,
    42. min_pos_iou=0.3,
    43. ignore_iof_thr=-1),
    44. sampler=dict(
    45. type='RandomSampler',
    46. num=256,
    47. pos_fraction=0.5,
    48. neg_pos_ub=-1,
    49. add_gt_as_proposals=False),
    50. allowed_border=0,
    51. pos_weight=-1,
    52. debug=False)),
    53. test_cfg=dict(
    54. rpn=dict(
    55. nms_pre=12000,
    56. max_per_img=2000,
    57. nms=dict(type='nms', iou_threshold=0.7),
    58. min_bbox_size=0)))
    Discard
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    1. # model settings
    2. model = dict(
    3. type='RPN',
    4. backbone=dict(
    5. type='ResNet',
    6. depth=50,
    7. num_stages=4,
    8. out_indices=(0, 1, 2, 3),
    9. frozen_stages=1,
    10. norm_cfg=dict(type='BN', requires_grad=True),
    11. norm_eval=True,
    12. style='pytorch',
    13. init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
    14. neck=dict(
    15. type='FPN',
    16. in_channels=[256, 512, 1024, 2048],
    17. out_channels=256,
    18. num_outs=5),
    19. rpn_head=dict(
    20. type='RPNHead',
    21. in_channels=256,
    22. feat_channels=256,
    23. anchor_generator=dict(
    24. type='AnchorGenerator',
    25. scales=[8],
    26. ratios=[0.5, 1.0, 2.0],
    27. strides=[4, 8, 16, 32, 64]),
    28. bbox_coder=dict(
    29. type='DeltaXYWHBBoxCoder',
    30. target_means=[.0, .0, .0, .0],
    31. target_stds=[1.0, 1.0, 1.0, 1.0]),
    32. loss_cls=dict(
    33. type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
    34. loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
    35. # model training and testing settings
    36. train_cfg=dict(
    37. rpn=dict(
    38. assigner=dict(
    39. type='MaxIoUAssigner',
    40. pos_iou_thr=0.7,
    41. neg_iou_thr=0.3,
    42. min_pos_iou=0.3,
    43. ignore_iof_thr=-1),
    44. sampler=dict(
    45. type='RandomSampler',
    46. num=256,
    47. pos_fraction=0.5,
    48. neg_pos_ub=-1,
    49. add_gt_as_proposals=False),
    50. allowed_border=0,
    51. pos_weight=-1,
    52. debug=False)),
    53. test_cfg=dict(
    54. rpn=dict(
    55. nms_pre=2000,
    56. max_per_img=1000,
    57. nms=dict(type='nms', iou_threshold=0.7),
    58. min_bbox_size=0)))
    Discard
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    1. # model settings
    2. input_size = 300
    3. model = dict(
    4. type='SingleStageDetector',
    5. backbone=dict(
    6. type='SSDVGG',
    7. depth=16,
    8. with_last_pool=False,
    9. ceil_mode=True,
    10. out_indices=(3, 4),
    11. out_feature_indices=(22, 34),
    12. init_cfg=dict(
    13. type='Pretrained', checkpoint='open-mmlab://vgg16_caffe')),
    14. neck=dict(
    15. type='SSDNeck',
    16. in_channels=(512, 1024),
    17. out_channels=(512, 1024, 512, 256, 256, 256),
    18. level_strides=(2, 2, 1, 1),
    19. level_paddings=(1, 1, 0, 0),
    20. l2_norm_scale=20),
    21. bbox_head=dict(
    22. type='SSDHead',
    23. in_channels=(512, 1024, 512, 256, 256, 256),
    24. num_classes=80,
    25. anchor_generator=dict(
    26. type='SSDAnchorGenerator',
    27. scale_major=False,
    28. input_size=input_size,
    29. basesize_ratio_range=(0.15, 0.9),
    30. strides=[8, 16, 32, 64, 100, 300],
    31. ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]]),
    32. bbox_coder=dict(
    33. type='DeltaXYWHBBoxCoder',
    34. target_means=[.0, .0, .0, .0],
    35. target_stds=[0.1, 0.1, 0.2, 0.2])),
    36. # model training and testing settings
    37. train_cfg=dict(
    38. assigner=dict(
    39. type='MaxIoUAssigner',
    40. pos_iou_thr=0.5,
    41. neg_iou_thr=0.5,
    42. min_pos_iou=0.,
    43. ignore_iof_thr=-1,
    44. gt_max_assign_all=False),
    45. smoothl1_beta=1.,
    46. allowed_border=-1,
    47. pos_weight=-1,
    48. neg_pos_ratio=3,
    49. debug=False),
    50. test_cfg=dict(
    51. nms_pre=1000,
    52. nms=dict(type='nms', iou_threshold=0.45),
    53. min_bbox_size=0,
    54. score_thr=0.02,
    55. max_per_img=200))
    56. cudnn_benchmark = True
    Discard
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    1. # optimizer
    2. optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
    3. optimizer_config = dict(grad_clip=None)
    4. # learning policy
    5. lr_config = dict(
    6. policy='step',
    7. warmup='linear',
    8. warmup_iters=500,
    9. warmup_ratio=0.001,
    10. step=[8, 11])
    11. runner = dict(type='EpochBasedRunner', max_epochs=12)
    Discard
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    1. # optimizer
    2. optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
    3. optimizer_config = dict(grad_clip=None)
    4. # learning policy
    5. lr_config = dict(
    6. policy='step',
    7. warmup='linear',
    8. warmup_iters=500,
    9. warmup_ratio=0.001,
    10. step=[16, 19])
    11. runner = dict(type='EpochBasedRunner', max_epochs=20)
    Discard
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    1. # optimizer
    2. optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
    3. optimizer_config = dict(grad_clip=None)
    4. # learning policy
    5. lr_config = dict(
    6. policy='step',
    7. warmup='linear',
    8. warmup_iters=500,
    9. warmup_ratio=0.001,
    10. step=[16, 22])
    11. runner = dict(type='EpochBasedRunner', max_epochs=24)
    Discard

    Albu Example

    Albumentations: fast and flexible image augmentations

    Abstract

    Data augmentation is a commonly used technique for increasing both the size and the diversity of labeled training sets by leveraging input transformations that preserve output labels. In computer vision domain, image augmentations have become a common implicit regularization technique to combat overfitting in deep convolutional neural networks and are ubiquitously used to improve performance. While most deep learning frameworks implement basic image transformations, the list is typically limited to some variations and combinations of flipping, rotating, scaling, and cropping. Moreover, the image processing speed varies in existing tools for image augmentation. We present Albumentations, a fast and flexible library for image augmentations with many various image transform operations available, that is also an easy-to-use wrapper around other augmentation libraries. We provide examples of image augmentations for different computer vision tasks and show that Albumentations is faster than other commonly used image augmentation tools on the most of commonly used image transformations.

    Results and Models

    Backbone Style Lr schd Mem (GB) Inf time (fps) box AP mask AP Config Download
    R-50 pytorch 1x 4.4 16.6 38.0 34.5 config model | log

    Citation

    @article{2018arXiv180906839B,
      author = {A. Buslaev, A. Parinov, E. Khvedchenya, V.~I. Iglovikov and A.~A. Kalinin},
      title = "{Albumentations: fast and flexible image augmentations}",
      journal = {ArXiv e-prints},
      eprint = {1809.06839},
      year = 2018
    }
    
    Discard
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    1. _base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py'
    2. img_norm_cfg = dict(
    3. mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
    4. albu_train_transforms = [
    5. dict(
    6. type='ShiftScaleRotate',
    7. shift_limit=0.0625,
    8. scale_limit=0.0,
    9. rotate_limit=0,
    10. interpolation=1,
    11. p=0.5),
    12. dict(
    13. type='RandomBrightnessContrast',
    14. brightness_limit=[0.1, 0.3],
    15. contrast_limit=[0.1, 0.3],
    16. p=0.2),
    17. dict(
    18. type='OneOf',
    19. transforms=[
    20. dict(
    21. type='RGBShift',
    22. r_shift_limit=10,
    23. g_shift_limit=10,
    24. b_shift_limit=10,
    25. p=1.0),
    26. dict(
    27. type='HueSaturationValue',
    28. hue_shift_limit=20,
    29. sat_shift_limit=30,
    30. val_shift_limit=20,
    31. p=1.0)
    32. ],
    33. p=0.1),
    34. dict(type='JpegCompression', quality_lower=85, quality_upper=95, p=0.2),
    35. dict(type='ChannelShuffle', p=0.1),
    36. dict(
    37. type='OneOf',
    38. transforms=[
    39. dict(type='Blur', blur_limit=3, p=1.0),
    40. dict(type='MedianBlur', blur_limit=3, p=1.0)
    41. ],
    42. p=0.1),
    43. ]
    44. train_pipeline = [
    45. dict(type='LoadImageFromFile'),
    46. dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
    47. dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
    48. dict(type='Pad', size_divisor=32),
    49. dict(
    50. type='Albu',
    51. transforms=albu_train_transforms,
    52. bbox_params=dict(
    53. type='BboxParams',
    54. format='pascal_voc',
    55. label_fields=['gt_labels'],
    56. min_visibility=0.0,
    57. filter_lost_elements=True),
    58. keymap={
    59. 'img': 'image',
    60. 'gt_masks': 'masks',
    61. 'gt_bboxes': 'bboxes'
    62. },
    63. update_pad_shape=False,
    64. skip_img_without_anno=True),
    65. dict(type='Normalize', **img_norm_cfg),
    66. dict(type='DefaultFormatBundle'),
    67. dict(
    68. type='Collect',
    69. keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks'],
    70. meta_keys=('filename', 'ori_shape', 'img_shape', 'img_norm_cfg',
    71. 'pad_shape', 'scale_factor'))
    72. ]
    73. data = dict(train=dict(pipeline=train_pipeline))
    Discard

    ATSS

    Bridging the Gap Between Anchor-based and Anchor-free Detection via Adaptive Training Sample Selection

    Abstract

    Object detection has been dominated by anchor-based detectors for several years. Recently, anchor-free detectors have become popular due to the proposal of FPN and Focal Loss. In this paper, we first point out that the essential difference between anchor-based and anchor-free detection is actually how to define positive and negative training samples, which leads to the performance gap between them. If they adopt the same definition of positive and negative samples during training, there is no obvious difference in the final performance, no matter regressing from a box or a point. This shows that how to select positive and negative training samples is important for current object detectors. Then, we propose an Adaptive Training Sample Selection (ATSS) to automatically select positive and negative samples according to statistical characteristics of object. It significantly improves the performance of anchor-based and anchor-free detectors and bridges the gap between them. Finally, we discuss the necessity of tiling multiple anchors per location on the image to detect objects. Extensive experiments conducted on MS COCO support our aforementioned analysis and conclusions. With the newly introduced ATSS, we improve state-of-the-art detectors by a large margin to 50.7% AP without introducing any overhead.

    Results and Models

    Backbone Style Lr schd Mem (GB) Inf time (fps) box AP Config Download
    R-50 pytorch 1x 3.7 19.7 39.4 config model | log
    R-101 pytorch 1x 5.6 12.3 41.5 config model | log

    Citation

    @article{zhang2019bridging,
      title   =  {Bridging the Gap Between Anchor-based and Anchor-free Detection via Adaptive Training Sample Selection},
      author  =  {Zhang, Shifeng and Chi, Cheng and Yao, Yongqiang and Lei, Zhen and Li, Stan Z.},
      journal =  {arXiv preprint arXiv:1912.02424},
      year    =  {2019}
    }
    
    Discard
    1
    2
    3
    4
    5
    6
    1. _base_ = './atss_r50_fpn_1x_coco.py'
    2. model = dict(
    3. backbone=dict(
    4. depth=101,
    5. init_cfg=dict(type='Pretrained',
    6. checkpoint='torchvision://resnet101')))
    Discard
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    1. _base_ = [
    2. '../_base_/datasets/coco_detection.py',
    3. '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
    4. ]
    5. model = dict(
    6. type='ATSS',
    7. backbone=dict(
    8. type='ResNet',
    9. depth=50,
    10. num_stages=4,
    11. out_indices=(0, 1, 2, 3),
    12. frozen_stages=1,
    13. norm_cfg=dict(type='BN', requires_grad=True),
    14. norm_eval=True,
    15. style='pytorch',
    16. init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
    17. neck=dict(
    18. type='FPN',
    19. in_channels=[256, 512, 1024, 2048],
    20. out_channels=256,
    21. start_level=1,
    22. add_extra_convs='on_output',
    23. num_outs=5),
    24. bbox_head=dict(
    25. type='ATSSHead',
    26. num_classes=80,
    27. in_channels=256,
    28. stacked_convs=4,
    29. feat_channels=256,
    30. anchor_generator=dict(
    31. type='AnchorGenerator',
    32. ratios=[1.0],
    33. octave_base_scale=8,
    34. scales_per_octave=1,
    35. strides=[8, 16, 32, 64, 128]),
    36. bbox_coder=dict(
    37. type='DeltaXYWHBBoxCoder',
    38. target_means=[.0, .0, .0, .0],
    39. target_stds=[0.1, 0.1, 0.2, 0.2]),
    40. loss_cls=dict(
    41. type='FocalLoss',
    42. use_sigmoid=True,
    43. gamma=2.0,
    44. alpha=0.25,
    45. loss_weight=1.0),
    46. loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
    47. loss_centerness=dict(
    48. type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),
    49. # training and testing settings
    50. train_cfg=dict(
    51. assigner=dict(type='ATSSAssigner', topk=9),
    52. allowed_border=-1,
    53. pos_weight=-1,
    54. debug=False),
    55. test_cfg=dict(
    56. nms_pre=1000,
    57. min_bbox_size=0,
    58. score_thr=0.05,
    59. nms=dict(type='nms', iou_threshold=0.6),
    60. max_per_img=100))
    61. # optimizer
    62. optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
    Discard
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    1. Collections:
    2. - Name: ATSS
    3. Metadata:
    4. Training Data: COCO
    5. Training Techniques:
    6. - SGD with Momentum
    7. - Weight Decay
    8. Training Resources: 8x V100 GPUs
    9. Architecture:
    10. - ATSS
    11. - FPN
    12. - ResNet
    13. Paper:
    14. URL: https://arxiv.org/abs/1912.02424
    15. Title: 'Bridging the Gap Between Anchor-based and Anchor-free Detection via Adaptive Training Sample Selection'
    16. README: configs/atss/README.md
    17. Code:
    18. URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/detectors/atss.py#L6
    19. Version: v2.0.0
    20. Models:
    21. - Name: atss_r50_fpn_1x_coco
    22. In Collection: ATSS
    23. Config: configs/atss/atss_r50_fpn_1x_coco.py
    24. Metadata:
    25. Training Memory (GB): 3.7
    26. inference time (ms/im):
    27. - value: 50.76
    28. hardware: V100
    29. backend: PyTorch
    30. batch size: 1
    31. mode: FP32
    32. resolution: (800, 1333)
    33. Epochs: 12
    34. Results:
    35. - Task: Object Detection
    36. Dataset: COCO
    37. Metrics:
    38. box AP: 39.4
    39. Weights: https://download.openmmlab.com/mmdetection/v2.0/atss/atss_r50_fpn_1x_coco/atss_r50_fpn_1x_coco_20200209-985f7bd0.pth
    40. - Name: atss_r101_fpn_1x_coco
    41. In Collection: ATSS
    42. Config: configs/atss/atss_r101_fpn_1x_coco.py
    43. Metadata:
    44. Training Memory (GB): 5.6
    45. inference time (ms/im):
    46. - value: 81.3
    47. hardware: V100
    48. backend: PyTorch
    49. batch size: 1
    50. mode: FP32
    51. resolution: (800, 1333)
    52. Epochs: 12
    53. Results:
    54. - Task: Object Detection
    55. Dataset: COCO
    56. Metrics:
    57. box AP: 41.5
    58. Weights: https://download.openmmlab.com/mmdetection/v2.0/atss/atss_r101_fpn_1x_coco/atss_r101_fpn_1x_20200825-dfcadd6f.pth
    Discard

    AutoAssign

    AutoAssign: Differentiable Label Assignment for Dense Object Detection

    Abstract

    Determining positive/negative samples for object detection is known as label assignment. Here we present an anchor-free detector named AutoAssign. It requires little human knowledge and achieves appearance-aware through a fully differentiable weighting mechanism. During training, to both satisfy the prior distribution of data and adapt to category characteristics, we present Center Weighting to adjust the category-specific prior distributions. To adapt to object appearances, Confidence Weighting is proposed to adjust the specific assign strategy of each instance. The two weighting modules are then combined to generate positive and negative weights to adjust each location's confidence. Extensive experiments on the MS COCO show that our method steadily surpasses other best sampling strategies by large margins with various backbones. Moreover, our best model achieves 52.1% AP, outperforming all existing one-stage detectors. Besides, experiments on other datasets, e.g., PASCAL VOC, Objects365, and WiderFace, demonstrate the broad applicability of AutoAssign.

    Results and Models

    Backbone Style Lr schd Mem (GB) box AP Config Download
    R-50 caffe 1x 4.08 40.4 config model | log

    Note:

    1. We find that the performance is unstable with 1x setting and may fluctuate by about 0.3 mAP. mAP 40.3 ~ 40.6 is acceptable. Such fluctuation can also be found in the original implementation.
    2. You can get a more stable results ~ mAP 40.6 with a schedule total 13 epoch, and learning rate is divided by 10 at 10th and 13th epoch.

    Citation

    @article{zhu2020autoassign,
      title={AutoAssign: Differentiable Label Assignment for Dense Object Detection},
      author={Zhu, Benjin and Wang, Jianfeng and Jiang, Zhengkai and Zong, Fuhang and Liu, Songtao and Li, Zeming and Sun, Jian},
      journal={arXiv preprint arXiv:2007.03496},
      year={2020}
    }
    
    Discard
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    77
    78
    79
    80
    81
    82
    83
    84
    85
    1. # We follow the original implementation which
    2. # adopts the Caffe pre-trained backbone.
    3. _base_ = [
    4. '../_base_/datasets/coco_detection.py',
    5. '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
    6. ]
    7. model = dict(
    8. type='AutoAssign',
    9. backbone=dict(
    10. type='ResNet',
    11. depth=50,
    12. num_stages=4,
    13. out_indices=(0, 1, 2, 3),
    14. frozen_stages=1,
    15. norm_cfg=dict(type='BN', requires_grad=False),
    16. norm_eval=True,
    17. style='caffe',
    18. init_cfg=dict(
    19. type='Pretrained',
    20. checkpoint='open-mmlab://detectron2/resnet50_caffe')),
    21. neck=dict(
    22. type='FPN',
    23. in_channels=[256, 512, 1024, 2048],
    24. out_channels=256,
    25. start_level=1,
    26. add_extra_convs=True,
    27. num_outs=5,
    28. relu_before_extra_convs=True,
    29. init_cfg=dict(type='Caffe2Xavier', layer='Conv2d')),
    30. bbox_head=dict(
    31. type='AutoAssignHead',
    32. num_classes=80,
    33. in_channels=256,
    34. stacked_convs=4,
    35. feat_channels=256,
    36. strides=[8, 16, 32, 64, 128],
    37. loss_bbox=dict(type='GIoULoss', loss_weight=5.0)),
    38. train_cfg=None,
    39. test_cfg=dict(
    40. nms_pre=1000,
    41. min_bbox_size=0,
    42. score_thr=0.05,
    43. nms=dict(type='nms', iou_threshold=0.6),
    44. max_per_img=100))
    45. img_norm_cfg = dict(
    46. mean=[102.9801, 115.9465, 122.7717], std=[1.0, 1.0, 1.0], to_rgb=False)
    47. train_pipeline = [
    48. dict(type='LoadImageFromFile'),
    49. dict(type='LoadAnnotations', with_bbox=True),
    50. dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
    51. dict(type='RandomFlip', flip_ratio=0.5),
    52. dict(type='Normalize', **img_norm_cfg),
    53. dict(type='Pad', size_divisor=32),
    54. dict(type='DefaultFormatBundle'),
    55. dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
    56. ]
    57. test_pipeline = [
    58. dict(type='LoadImageFromFile'),
    59. dict(
    60. type='MultiScaleFlipAug',
    61. img_scale=(1333, 800),
    62. flip=False,
    63. transforms=[
    64. dict(type='Resize', keep_ratio=True),
    65. dict(type='RandomFlip'),
    66. dict(type='Normalize', **img_norm_cfg),
    67. dict(type='Pad', size_divisor=32),
    68. dict(type='ImageToTensor', keys=['img']),
    69. dict(type='Collect', keys=['img'])
    70. ])
    71. ]
    72. data = dict(
    73. train=dict(pipeline=train_pipeline),
    74. val=dict(pipeline=test_pipeline),
    75. test=dict(pipeline=test_pipeline))
    76. # optimizer
    77. optimizer = dict(lr=0.01, paramwise_cfg=dict(norm_decay_mult=0.))
    78. # learning policy
    79. lr_config = dict(
    80. policy='step',
    81. warmup='linear',
    82. warmup_iters=1000,
    83. warmup_ratio=1.0 / 1000,
    84. step=[8, 11])
    85. total_epochs = 12
    Discard
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    1. Collections:
    2. - Name: AutoAssign
    3. Metadata:
    4. Training Data: COCO
    5. Training Techniques:
    6. - SGD with Momentum
    7. - Weight Decay
    8. Training Resources: 8x V100 GPUs
    9. Architecture:
    10. - AutoAssign
    11. - FPN
    12. - ResNet
    13. Paper:
    14. URL: https://arxiv.org/abs/2007.03496
    15. Title: 'AutoAssign: Differentiable Label Assignment for Dense Object Detection'
    16. README: configs/autoassign/README.md
    17. Code:
    18. URL: https://github.com/open-mmlab/mmdetection/blob/v2.12.0/mmdet/models/detectors/autoassign.py#L6
    19. Version: v2.12.0
    20. Models:
    21. - Name: autoassign_r50_fpn_8x2_1x_coco
    22. In Collection: AutoAssign
    23. Config: configs/autoassign/autoassign_r50_fpn_8x2_1x_coco.py
    24. Metadata:
    25. Training Memory (GB): 4.08
    26. Epochs: 12
    27. Results:
    28. - Task: Object Detection
    29. Dataset: COCO
    30. Metrics:
    31. box AP: 40.4
    32. Weights: https://download.openmmlab.com/mmdetection/v2.0/autoassign/auto_assign_r50_fpn_1x_coco/auto_assign_r50_fpn_1x_coco_20210413_115540-5e17991f.pth
    Discard

    CARAFE

    CARAFE: Content-Aware ReAssembly of FEatures

    Abstract

    Feature upsampling is a key operation in a number of modern convolutional network architectures, e.g. feature pyramids. Its design is critical for dense prediction tasks such as object detection and semantic/instance segmentation. In this work, we propose Content-Aware ReAssembly of FEatures (CARAFE), a universal, lightweight and highly effective operator to fulfill this goal. CARAFE has several appealing properties: (1) Large field of view. Unlike previous works (e.g. bilinear interpolation) that only exploit sub-pixel neighborhood, CARAFE can aggregate contextual information within a large receptive field. (2) Content-aware handling. Instead of using a fixed kernel for all samples (e.g. deconvolution), CARAFE enables instance-specific content-aware handling, which generates adaptive kernels on-the-fly. (3) Lightweight and fast to compute. CARAFE introduces little computational overhead and can be readily integrated into modern network architectures. We conduct comprehensive evaluations on standard benchmarks in object detection, instance/semantic segmentation and inpainting. CARAFE shows consistent and substantial gains across all the tasks (1.2%, 1.3%, 1.8%, 1.1db respectively) with negligible computational overhead. It has great potential to serve as a strong building block for future research. It has great potential to serve as a strong building block for future research.

    Results and Models

    The results on COCO 2017 val is shown in the below table.

    Method Backbone Style Lr schd Test Proposal Num Inf time (fps) Box AP Mask AP Config Download
    Faster R-CNN w/ CARAFE R-50-FPN pytorch 1x 1000 16.5 38.6 38.6 config model | log
    - - - - 2000
    Mask R-CNN w/ CARAFE R-50-FPN pytorch 1x 1000 14.0 39.3 35.8 config model | log
    - - - - 2000

    Implementation

    The CUDA implementation of CARAFE can be find at https://github.com/myownskyW7/CARAFE.

    Citation

    We provide config files to reproduce the object detection & instance segmentation results in the ICCV 2019 Oral paper for CARAFE: Content-Aware ReAssembly of FEatures.

    @inproceedings{Wang_2019_ICCV,
        title = {CARAFE: Content-Aware ReAssembly of FEatures},
        author = {Wang, Jiaqi and Chen, Kai and Xu, Rui and Liu, Ziwei and Loy, Chen Change and Lin, Dahua},
        booktitle = {The IEEE International Conference on Computer Vision (ICCV)},
        month = {October},
        year = {2019}
    }
    
    Discard
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    1. _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'
    2. model = dict(
    3. neck=dict(
    4. type='FPN_CARAFE',
    5. in_channels=[256, 512, 1024, 2048],
    6. out_channels=256,
    7. num_outs=5,
    8. start_level=0,
    9. end_level=-1,
    10. norm_cfg=None,
    11. act_cfg=None,
    12. order=('conv', 'norm', 'act'),
    13. upsample_cfg=dict(
    14. type='carafe',
    15. up_kernel=5,
    16. up_group=1,
    17. encoder_kernel=3,
    18. encoder_dilation=1,
    19. compressed_channels=64)))
    20. img_norm_cfg = dict(
    21. mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
    22. train_pipeline = [
    23. dict(type='LoadImageFromFile'),
    24. dict(type='LoadAnnotations', with_bbox=True),
    25. dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
    26. dict(type='RandomFlip', flip_ratio=0.5),
    27. dict(type='Normalize', **img_norm_cfg),
    28. dict(type='Pad', size_divisor=64),
    29. dict(type='DefaultFormatBundle'),
    30. dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
    31. ]
    32. test_pipeline = [
    33. dict(type='LoadImageFromFile'),
    34. dict(
    35. type='MultiScaleFlipAug',
    36. img_scale=(1333, 800),
    37. flip=False,
    38. transforms=[
    39. dict(type='Resize', keep_ratio=True),
    40. dict(type='RandomFlip'),
    41. dict(type='Normalize', **img_norm_cfg),
    42. dict(type='Pad', size_divisor=64),
    43. dict(type='ImageToTensor', keys=['img']),
    44. dict(type='Collect', keys=['img']),
    45. ])
    46. ]
    47. data = dict(
    48. train=dict(pipeline=train_pipeline),
    49. val=dict(pipeline=test_pipeline),
    50. test=dict(pipeline=test_pipeline))
    Discard
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    1. _base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py'
    2. model = dict(
    3. neck=dict(
    4. type='FPN_CARAFE',
    5. in_channels=[256, 512, 1024, 2048],
    6. out_channels=256,
    7. num_outs=5,
    8. start_level=0,
    9. end_level=-1,
    10. norm_cfg=None,
    11. act_cfg=None,
    12. order=('conv', 'norm', 'act'),
    13. upsample_cfg=dict(
    14. type='carafe',
    15. up_kernel=5,
    16. up_group=1,
    17. encoder_kernel=3,
    18. encoder_dilation=1,
    19. compressed_channels=64)),
    20. roi_head=dict(
    21. mask_head=dict(
    22. upsample_cfg=dict(
    23. type='carafe',
    24. scale_factor=2,
    25. up_kernel=5,
    26. up_group=1,
    27. encoder_kernel=3,
    28. encoder_dilation=1,
    29. compressed_channels=64))))
    30. img_norm_cfg = dict(
    31. mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
    32. train_pipeline = [
    33. dict(type='LoadImageFromFile'),
    34. dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
    35. dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
    36. dict(type='RandomFlip', flip_ratio=0.5),
    37. dict(type='Normalize', **img_norm_cfg),
    38. dict(type='Pad', size_divisor=64),
    39. dict(type='DefaultFormatBundle'),
    40. dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
    41. ]
    42. test_pipeline = [
    43. dict(type='LoadImageFromFile'),
    44. dict(
    45. type='MultiScaleFlipAug',
    46. img_scale=(1333, 800),
    47. flip=False,
    48. transforms=[
    49. dict(type='Resize', keep_ratio=True),
    50. dict(type='RandomFlip'),
    51. dict(type='Normalize', **img_norm_cfg),
    52. dict(type='Pad', size_divisor=64),
    53. dict(type='ImageToTensor', keys=['img']),
    54. dict(type='Collect', keys=['img']),
    55. ])
    56. ]
    57. data = dict(
    58. train=dict(pipeline=train_pipeline),
    59. val=dict(pipeline=test_pipeline),
    60. test=dict(pipeline=test_pipeline))
    Discard
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    1. Collections:
    2. - Name: CARAFE
    3. Metadata:
    4. Training Data: COCO
    5. Training Techniques:
    6. - SGD with Momentum
    7. - Weight Decay
    8. Training Resources: 8x V100 GPUs
    9. Architecture:
    10. - RPN
    11. - FPN_CARAFE
    12. - ResNet
    13. - RoIPool
    14. Paper:
    15. URL: https://arxiv.org/abs/1905.02188
    16. Title: 'CARAFE: Content-Aware ReAssembly of FEatures'
    17. README: configs/carafe/README.md
    18. Code:
    19. URL: https://github.com/open-mmlab/mmdetection/blob/v2.12.0/mmdet/models/necks/fpn_carafe.py#L11
    20. Version: v2.12.0
    21. Models:
    22. - Name: faster_rcnn_r50_fpn_carafe_1x_coco
    23. In Collection: CARAFE
    24. Config: configs/carafe/faster_rcnn_r50_fpn_carafe_1x_coco.py
    25. Metadata:
    26. Training Memory (GB): 4.26
    27. Epochs: 12
    28. Results:
    29. - Task: Object Detection
    30. Dataset: COCO
    31. Metrics:
    32. box AP: 38.6
    33. - Task: Instance Segmentation
    34. Dataset: COCO
    35. Metrics:
    36. mask AP: 38.6
    37. Weights: https://download.openmmlab.com/mmdetection/v2.0/carafe/faster_rcnn_r50_fpn_carafe_1x_coco/faster_rcnn_r50_fpn_carafe_1x_coco_bbox_mAP-0.386_20200504_175733-385a75b7.pth
    38. - Name: mask_rcnn_r50_fpn_carafe_1x_coco
    39. In Collection: CARAFE
    40. Config: configs/carafe/mask_rcnn_r50_fpn_carafe_1x_coco.py
    41. Metadata:
    42. Training Memory (GB): 4.31
    43. Epochs: 12
    44. Results:
    45. - Task: Object Detection
    46. Dataset: COCO
    47. Metrics:
    48. box AP: 39.3
    49. - Task: Instance Segmentation
    50. Dataset: COCO
    51. Metrics:
    52. mask AP: 35.6
    53. Weights: https://download.openmmlab.com/mmdetection/v2.0/carafe/mask_rcnn_r50_fpn_carafe_1x_coco/mask_rcnn_r50_fpn_carafe_1x_coco_bbox_mAP-0.393__segm_mAP-0.358_20200503_135957-8687f195.pth
    Discard

    Cascade R-CNN

    Cascade R-CNN: High Quality Object Detection and Instance Segmentation

    Abstract

    In object detection, the intersection over union (IoU) threshold is frequently used to define positives/negatives. The threshold used to train a detector defines its quality. While the commonly used threshold of 0.5 leads to noisy (low-quality) detections, detection performance frequently degrades for larger thresholds. This paradox of high-quality detection has two causes: 1) overfitting, due to vanishing positive samples for large thresholds, and 2) inference-time quality mismatch between detector and test hypotheses. A multi-stage object detection architecture, the Cascade R-CNN, composed of a sequence of detectors trained with increasing IoU thresholds, is proposed to address these problems. The detectors are trained sequentially, using the output of a detector as training set for the next. This resampling progressively improves hypotheses quality, guaranteeing a positive training set of equivalent size for all detectors and minimizing overfitting. The same cascade is applied at inference, to eliminate quality mismatches between hypotheses and detectors. An implementation of the Cascade R-CNN without bells or whistles achieves state-of-the-art performance on the COCO dataset, and significantly improves high-quality detection on generic and specific object detection datasets, including VOC, KITTI, CityPerson, and WiderFace. Finally, the Cascade R-CNN is generalized to instance segmentation, with nontrivial improvements over the Mask R-CNN.

    Results and Models

    Cascade R-CNN

    Backbone Style Lr schd Mem (GB) Inf time (fps) box AP Config Download
    R-50-FPN caffe 1x 4.2 40.4 config model | log
    R-50-FPN pytorch 1x 4.4 16.1 40.3 config model | log
    R-50-FPN pytorch 20e - - 41.0 config model | log
    R-101-FPN caffe 1x 6.2 42.3 config model | log
    R-101-FPN pytorch 1x 6.4 13.5 42.0 config model | log
    R-101-FPN pytorch 20e - - 42.5 config model | log
    X-101-32x4d-FPN pytorch 1x 7.6 10.9 43.7 config model | log
    X-101-32x4d-FPN pytorch 20e 7.6 43.7 config model | log
    X-101-64x4d-FPN pytorch 1x 10.7 44.7 config model | log
    X-101-64x4d-FPN pytorch 20e 10.7 44.5 config model | log

    Cascade Mask R-CNN

    Backbone Style Lr schd Mem (GB) Inf time (fps) box AP mask AP Config Download
    R-50-FPN caffe 1x 5.9 41.2 36.0 config model | log
    R-50-FPN pytorch 1x 6.0 11.2 41.2 35.9 config model | log
    R-50-FPN pytorch 20e - - 41.9 36.5 config model | log
    R-101-FPN caffe 1x 7.8 43.2 37.6 config model | log
    R-101-FPN pytorch 1x 7.9 9.8 42.9 37.3 config model | log
    R-101-FPN pytorch 20e - - 43.4 37.8 config model | log
    X-101-32x4d-FPN pytorch 1x 9.2 8.6 44.3 38.3 config model | log
    X-101-32x4d-FPN pytorch 20e 9.2 - 45.0 39.0 config model | log
    X-101-64x4d-FPN pytorch 1x 12.2 6.7 45.3 39.2 config model | log
    X-101-64x4d-FPN pytorch 20e 12.2 45.6 39.5 config model | log

    Notes:

    • The 20e schedule in Cascade (Mask) R-CNN indicates decreasing the lr at 16 and 19 epochs, with a total of 20 epochs.

    Pre-trained Models

    We also train some models with longer schedules and multi-scale training for Cascade Mask R-CNN. The users could finetune them for downstream tasks.

    Backbone Style Lr schd Mem (GB) Inf time (fps) box AP mask AP Config Download
    R-50-FPN caffe 3x 5.7 44.0 38.1 config model | log
    R-50-FPN pytorch 3x 5.9 44.3 38.5 config model | log
    R-101-FPN caffe 3x 7.7 45.4 39.5 config model | log
    R-101-FPN pytorch 3x 7.8 45.5 39.6 config model | log
    X-101-32x4d-FPN pytorch 3x 9.0 46.3 40.1 config model | log
    X-101-32x8d-FPN pytorch 3x 12.1 46.1 39.9 config model | log
    X-101-64x4d-FPN pytorch 3x 12.0 46.6 40.3 config model | log

    Citation

    @article{Cai_2019,
       title={Cascade R-CNN: High Quality Object Detection and Instance Segmentation},
       ISSN={1939-3539},
       url={http://dx.doi.org/10.1109/tpami.2019.2956516},
       DOI={10.1109/tpami.2019.2956516},
       journal={IEEE Transactions on Pattern Analysis and Machine Intelligence},
       publisher={Institute of Electrical and Electronics Engineers (IEEE)},
       author={Cai, Zhaowei and Vasconcelos, Nuno},
       year={2019},
       pages={1–1}
    }
    
    Discard
    1
    2
    3
    4
    5
    6
    7
    1. _base_ = './cascade_mask_rcnn_r50_caffe_fpn_1x_coco.py'
    2. model = dict(
    3. backbone=dict(
    4. depth=101,
    5. init_cfg=dict(
    6. type='Pretrained',
    7. checkpoint='open-mmlab://detectron2/resnet101_caffe')))
    Discard
    1
    2
    3
    4
    5
    6
    7
    1. _base_ = './cascade_mask_rcnn_r50_caffe_fpn_mstrain_3x_coco.py'
    2. model = dict(
    3. backbone=dict(
    4. depth=101,
    5. init_cfg=dict(
    6. type='Pretrained',
    7. checkpoint='open-mmlab://detectron2/resnet101_caffe')))
    Discard
    1
    2
    3
    4
    5
    6
    1. _base_ = './cascade_mask_rcnn_r50_fpn_1x_coco.py'
    2. model = dict(
    3. backbone=dict(
    4. depth=101,
    5. init_cfg=dict(type='Pretrained',
    6. checkpoint='torchvision://resnet101')))
    Discard
    1
    2
    3
    4
    5
    6
    1. _base_ = './cascade_mask_rcnn_r50_fpn_20e_coco.py'
    2. model = dict(
    3. backbone=dict(
    4. depth=101,
    5. init_cfg=dict(type='Pretrained',
    6. checkpoint='torchvision://resnet101')))
    Discard
    1
    2
    3
    4
    5
    6
    1. _base_ = './cascade_mask_rcnn_r50_fpn_mstrain_3x_coco.py'
    2. model = dict(
    3. backbone=dict(
    4. depth=101,
    5. init_cfg=dict(type='Pretrained',
    6. checkpoint='torchvision://resnet101')))
    Discard
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    1. _base_ = ['./cascade_mask_rcnn_r50_fpn_1x_coco.py']
    2. model = dict(
    3. backbone=dict(
    4. norm_cfg=dict(requires_grad=False),
    5. norm_eval=True,
    6. style='caffe',
    7. init_cfg=dict(
    8. type='Pretrained',
    9. checkpoint='open-mmlab://detectron2/resnet50_caffe')))
    10. img_norm_cfg = dict(
    11. mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
    12. train_pipeline = [
    13. dict(type='LoadImageFromFile'),
    14. dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
    15. dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
    16. dict(type='RandomFlip', flip_ratio=0.5),
    17. dict(type='Normalize', **img_norm_cfg),
    18. dict(type='Pad', size_divisor=32),
    19. dict(type='DefaultFormatBundle'),
    20. dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
    21. ]
    22. test_pipeline = [
    23. dict(type='LoadImageFromFile'),
    24. dict(
    25. type='MultiScaleFlipAug',
    26. img_scale=(1333, 800),
    27. flip=False,
    28. transforms=[
    29. dict(type='Resize', keep_ratio=True),
    30. dict(type='RandomFlip'),
    31. dict(type='Normalize', **img_norm_cfg),
    32. dict(type='Pad', size_divisor=32),
    33. dict(type='ImageToTensor', keys=['img']),
    34. dict(type='Collect', keys=['img']),
    35. ])
    36. ]
    37. data = dict(
    38. train=dict(pipeline=train_pipeline),
    39. val=dict(pipeline=test_pipeline),
    40. test=dict(pipeline=test_pipeline))
    Discard
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    1. _base_ = ['./cascade_mask_rcnn_r50_fpn_mstrain_3x_coco.py']
    2. model = dict(
    3. backbone=dict(
    4. norm_cfg=dict(requires_grad=False),
    5. norm_eval=True,
    6. style='caffe',
    7. init_cfg=dict(
    8. type='Pretrained',
    9. checkpoint='open-mmlab://detectron2/resnet50_caffe')))
    10. # use caffe img_norm
    11. img_norm_cfg = dict(
    12. mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
    13. # In mstrain 3x config, img_scale=[(1333, 640), (1333, 800)],
    14. # multiscale_mode='range'
    15. train_pipeline = [
    16. dict(type='LoadImageFromFile'),
    17. dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
    18. dict(
    19. type='Resize',
    20. img_scale=[(1333, 640), (1333, 800)],
    21. multiscale_mode='range',
    22. keep_ratio=True),
    23. dict(type='RandomFlip', flip_ratio=0.5),
    24. dict(type='Normalize', **img_norm_cfg),
    25. dict(type='Pad', size_divisor=32),
    26. dict(type='DefaultFormatBundle'),
    27. dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
    28. ]
    29. test_pipeline = [
    30. dict(type='LoadImageFromFile'),
    31. dict(
    32. type='MultiScaleFlipAug',
    33. img_scale=(1333, 800),
    34. flip=False,
    35. transforms=[
    36. dict(type='Resize', keep_ratio=True),
    37. dict(type='RandomFlip'),
    38. dict(type='Normalize', **img_norm_cfg),
    39. dict(type='Pad', size_divisor=32),
    40. dict(type='ImageToTensor', keys=['img']),
    41. dict(type='Collect', keys=['img']),
    42. ])
    43. ]
    44. data = dict(
    45. train=dict(dataset=dict(pipeline=train_pipeline)),
    46. val=dict(pipeline=test_pipeline),
    47. test=dict(pipeline=test_pipeline))
    Discard

    Some files were not shown because too many files changed in this diff