From 8ff221a5f127ae15858285a54d0b38b197689fc9 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E9=BB=8E=E6=9C=A8=E6=9E=97?= <762129126@qq.com>
Date: Fri, 7 Apr 2023 13:31:22 +0800
Subject: [PATCH 1/4] [ADD] add Resnet50_ID3915_for_Pytorch and
resnext_ID3918_for_Pytorch
---
.../Resnet50_for_PyTorch/.gitignore | 118 ++
.../.pre-commit-config.yaml | 54 +
.../Resnet50_for_PyTorch/.readthedocs.yml | 9 +
.../Resnet50_for_PyTorch/CITATION.cff | 9 +
.../Resnet50_for_PyTorch/LICENSE | 203 +++
.../Resnet50_for_PyTorch/MANIFEST.in | 3 +
.../Resnet50_for_PyTorch/README.md | 104 ++
.../Resnet50_for_PyTorch/README_zh-CN.md | 108 ++
.../configs/_base_/datasets/cifar100_bs16.py | 36 +
.../configs/_base_/datasets/cifar10_bs16.py | 35 +
.../_base_/datasets/imagenet21k_bs128.py | 43 +
.../configs/_base_/datasets/imagenet_bs32.py | 40 +
.../datasets/imagenet_bs32_pil_resize.py | 40 +
.../configs/_base_/datasets/imagenet_bs64.py | 40 +
.../_base_/datasets/imagenet_bs64_autoaug.py | 43 +
.../datasets/imagenet_bs64_pil_resize.py | 40 +
.../imagenet_bs64_pil_resize_autoaug.py | 45 +
.../_base_/datasets/imagenet_bs64_swin_224.py | 71 +
.../_base_/datasets/imagenet_bs64_swin_384.py | 43 +
.../_base_/datasets/imagenet_bs64_t2t_224.py | 71 +
.../_base_/datasets/pipelines/auto_aug.py | 96 ++
.../_base_/datasets/pipelines/rand_aug.py | 43 +
.../configs/_base_/datasets/voc_bs16.py | 41 +
.../configs/_base_/default_runtime.py | 16 +
.../configs/_base_/models/mobilenet_v2_1x.py | 12 +
.../models/mobilenet_v3_large_imagenet.py | 14 +
.../_base_/models/mobilenet_v3_small_cifar.py | 13 +
.../models/mobilenet_v3_small_imagenet.py | 14 +
.../_base_/models/regnet/regnetx_1.6gf.py | 12 +
.../_base_/models/regnet/regnetx_12gf.py | 12 +
.../_base_/models/regnet/regnetx_3.2gf.py | 12 +
.../_base_/models/regnet/regnetx_4.0gf.py | 12 +
.../_base_/models/regnet/regnetx_400mf.py | 12 +
.../_base_/models/regnet/regnetx_6.4gf.py | 12 +
.../_base_/models/regnet/regnetx_8.0gf.py | 12 +
.../_base_/models/regnet/regnetx_800mf.py | 12 +
.../configs/_base_/models/repvgg-A0_in1k.py | 15 +
.../_base_/models/repvgg-B3_lbs-mixup_in1k.py | 23 +
.../_base_/models/res2net101-w26-s4.py | 18 +
.../configs/_base_/models/res2net50-w14-s8.py | 18 +
.../configs/_base_/models/res2net50-w26-s4.py | 18 +
.../configs/_base_/models/res2net50-w26-s6.py | 18 +
.../configs/_base_/models/res2net50-w26-s8.py | 18 +
.../configs/_base_/models/res2net50-w48-s2.py | 18 +
.../configs/_base_/models/resnest101.py | 24 +
.../configs/_base_/models/resnest200.py | 24 +
.../configs/_base_/models/resnest269.py | 24 +
.../configs/_base_/models/resnest50.py | 23 +
.../configs/_base_/models/resnet101.py | 17 +
.../configs/_base_/models/resnet101_cifar.py | 16 +
.../configs/_base_/models/resnet152.py | 17 +
.../configs/_base_/models/resnet152_cifar.py | 16 +
.../configs/_base_/models/resnet18.py | 17 +
.../configs/_base_/models/resnet18_cifar.py | 16 +
.../configs/_base_/models/resnet34.py | 17 +
.../configs/_base_/models/resnet34_cifar.py | 16 +
.../configs/_base_/models/resnet50.py | 17 +
.../configs/_base_/models/resnet50_cifar.py | 16 +
.../_base_/models/resnet50_cifar_cutmix.py | 18 +
.../_base_/models/resnet50_cifar_mixup.py | 17 +
.../configs/_base_/models/resnet50_cutmix.py | 18 +
.../_base_/models/resnet50_label_smooth.py | 18 +
.../configs/_base_/models/resnet50_mixup.py | 18 +
.../configs/_base_/models/resnetv1d101.py | 17 +
.../configs/_base_/models/resnetv1d152.py | 17 +
.../configs/_base_/models/resnetv1d50.py | 17 +
.../configs/_base_/models/resnext101_32x4d.py | 19 +
.../configs/_base_/models/resnext101_32x8d.py | 19 +
.../configs/_base_/models/resnext152_32x4d.py | 19 +
.../configs/_base_/models/resnext50_32x4d.py | 19 +
.../configs/_base_/models/seresnet101.py | 17 +
.../configs/_base_/models/seresnet50.py | 17 +
.../_base_/models/seresnext101_32x4d.py | 20 +
.../_base_/models/seresnext50_32x4d.py | 20 +
.../configs/_base_/models/shufflenet_v1_1x.py | 12 +
.../configs/_base_/models/shufflenet_v2_1x.py | 12 +
.../models/swin_transformer/base_224.py | 22 +
.../models/swin_transformer/base_384.py | 16 +
.../models/swin_transformer/large_224.py | 12 +
.../models/swin_transformer/large_384.py | 16 +
.../models/swin_transformer/small_224.py | 23 +
.../models/swin_transformer/tiny_224.py | 22 +
.../configs/_base_/models/t2t-vit-t-14.py | 41 +
.../configs/_base_/models/t2t-vit-t-19.py | 41 +
.../configs/_base_/models/t2t-vit-t-24.py | 41 +
.../_base_/models/tnt_s_patch16_224.py | 29 +
.../configs/_base_/models/vgg11.py | 10 +
.../configs/_base_/models/vgg11bn.py | 11 +
.../configs/_base_/models/vgg13.py | 10 +
.../configs/_base_/models/vgg13bn.py | 11 +
.../configs/_base_/models/vgg16.py | 10 +
.../configs/_base_/models/vgg16bn.py | 11 +
.../configs/_base_/models/vgg19.py | 10 +
.../configs/_base_/models/vgg19bn.py | 11 +
.../configs/_base_/models/vit-base-p16.py | 25 +
.../configs/_base_/models/vit-base-p32.py | 24 +
.../configs/_base_/models/vit-large-p16.py | 24 +
.../configs/_base_/models/vit-large-p32.py | 24 +
.../configs/_base_/schedules/cifar10_bs128.py | 6 +
.../schedules/imagenet_bs1024_adamw_swin.py | 30 +
.../_base_/schedules/imagenet_bs1024_coslr.py | 12 +
.../imagenet_bs1024_linearlr_bn_nowd.py | 17 +
.../_base_/schedules/imagenet_bs2048.py | 12 +
.../_base_/schedules/imagenet_bs2048_AdamW.py | 20 +
.../_base_/schedules/imagenet_bs2048_coslr.py | 12 +
.../_base_/schedules/imagenet_bs256.py | 6 +
.../_base_/schedules/imagenet_bs256_140e.py | 6 +
.../imagenet_bs256_200e_coslr_warmup.py | 11 +
.../_base_/schedules/imagenet_bs256_coslr.py | 6 +
.../schedules/imagenet_bs256_epochstep.py | 6 +
.../_base_/schedules/imagenet_bs4096_AdamW.py | 18 +
.../configs/fp16/README.md | 20 +
.../configs/fp16/metafile.yml | 35 +
.../resnet50_b32x8_fp16_dynamic_imagenet.py | 4 +
.../fp16/resnet50_b32x8_fp16_imagenet.py | 4 +
.../configs/lenet/README.md | 19 +
.../configs/lenet/lenet5_mnist.py | 59 +
.../configs/mobilenet_v2/README.md | 27 +
.../configs/mobilenet_v2/metafile.yml | 34 +
.../mobilenet_v2_b32x8_imagenet.py | 6 +
.../configs/mobilenet_v3/README.md | 31 +
.../configs/mobilenet_v3/metafile.yml | 42 +
.../mobilenet_v3_large_imagenet.py | 158 +++
.../mobilenet_v3/mobilenet_v3_small_cifar.py | 8 +
.../mobilenet_v3_small_imagenet.py | 158 +++
.../configs/regnet/README.md | 38 +
.../regnet/regnetx_1.6gf_b32x8_imagenet.py | 51 +
.../regnet/regnetx_12gf_b32x8_imagenet.py | 51 +
.../regnet/regnetx_3.2gf_b32x8_imagenet.py | 51 +
.../regnet/regnetx_4.0gf_b32x8_imagenet.py | 51 +
.../regnet/regnetx_400mf_b32x8_imagenet.py | 51 +
.../regnet/regnetx_6.4gf_b32x8_imagenet.py | 51 +
.../regnet/regnetx_8.0gf_b32x8_imagenet.py | 51 +
.../regnet/regnetx_800mf_b32x8_imagenet.py | 51 +
.../configs/repvgg/README.md | 51 +
.../repvgg-A0_deploy_4xb64-coslr-120e_in1k.py | 3 +
.../repvgg-A1_deploy_4xb64-coslr-120e_in1k.py | 3 +
.../repvgg-A2_deploy_4xb64-coslr-120e_in1k.py | 3 +
.../repvgg-B0_deploy_4xb64-coslr-120e_in1k.py | 3 +
.../repvgg-B1_deploy_4xb64-coslr-120e_in1k.py | 3 +
...epvgg-B1g2_deploy_4xb64-coslr-120e_in1k.py | 3 +
...epvgg-B1g4_deploy_4xb64-coslr-120e_in1k.py | 3 +
.../repvgg-B2_deploy_4xb64-coslr-120e_in1k.py | 3 +
...4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py | 3 +
...4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py | 3 +
...4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py | 3 +
...4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py | 3 +
.../configs/repvgg/metafile.yml | 208 +++
.../repvgg/repvgg-A0_4xb64-coslr-120e_in1k.py | 8 +
.../repvgg/repvgg-A1_4xb64-coslr-120e_in1k.py | 3 +
.../repvgg/repvgg-A2_4xb64-coslr-120e_in1k.py | 3 +
.../repvgg/repvgg-B0_4xb64-coslr-120e_in1k.py | 3 +
.../repvgg/repvgg-B1_4xb64-coslr-120e_in1k.py | 3 +
.../repvgg-B1g2_4xb64-coslr-120e_in1k.py | 3 +
.../repvgg-B1g4_4xb64-coslr-120e_in1k.py | 3 +
.../repvgg/repvgg-B2_4xb64-coslr-120e_in1k.py | 3 +
...4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py | 3 +
...4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py | 6 +
...4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py | 3 +
...4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py | 3 +
.../configs/res2net/README.md | 30 +
.../configs/res2net/metafile.yml | 67 +
.../res2net/res2net101-w26-s4_8xb32_in1k.py | 5 +
.../res2net/res2net50-w14-s8_8xb32_in1k.py | 5 +
.../res2net/res2net50-w26-s8_8xb32_in1k.py | 5 +
.../configs/resnest/README.md | 17 +
.../resnest/resnest101_b64x32_imagenet.py | 181 +++
.../resnest/resnest200_b32x64_imagenet.py | 181 +++
.../resnest/resnest269_b32x64_imagenet.py | 181 +++
.../resnest/resnest50_b64x32_imagenet.py | 181 +++
.../configs/resnet/README.md | 47 +
.../configs/resnet/metafile.yml | 217 +++
.../configs/resnet/resnet101_b16x8_cifar10.py | 5 +
.../resnet/resnet101_b32x8_imagenet.py | 4 +
.../configs/resnet/resnet152_b16x8_cifar10.py | 5 +
.../resnet/resnet152_b32x8_imagenet.py | 4 +
.../configs/resnet/resnet18_b16x8_cifar10.py | 4 +
.../configs/resnet/resnet18_b32x8_imagenet.py | 4 +
.../configs/resnet/resnet34_b16x8_cifar10.py | 4 +
.../configs/resnet/resnet34_b32x8_imagenet.py | 4 +
.../resnet/resnet50_8xb128_coslr-90e_in21k.py | 11 +
.../configs/resnet/resnet50_b16x8_cifar10.py | 4 +
.../configs/resnet/resnet50_b16x8_cifar100.py | 10 +
.../resnet/resnet50_b16x8_cifar10_mixup.py | 5 +
.../resnet/resnet50_b32x8_coslr_imagenet.py | 5 +
.../resnet/resnet50_b32x8_cutmix_imagenet.py | 5 +
.../configs/resnet/resnet50_b32x8_imagenet.py | 4 +
.../resnet50_b32x8_label_smooth_imagenet.py | 5 +
.../resnet/resnet50_b32x8_mixup_imagenet.py | 5 +
.../resnet50_b64x32_warmup_coslr_imagenet.py | 5 +
.../resnet/resnet50_b64x32_warmup_imagenet.py | 4 +
...t50_b64x32_warmup_label_smooth_imagenet.py | 12 +
.../resnet/resnetv1d101_b32x8_imagenet.py | 5 +
.../resnet/resnetv1d152_b32x8_imagenet.py | 5 +
.../resnet/resnetv1d50_b32x8_imagenet.py | 5 +
.../configs/resnext/README.md | 27 +
.../configs/resnext/metafile.yml | 73 +
.../resnext101_32x4d_b32x8_imagenet.py | 5 +
.../resnext101_32x8d_b32x8_imagenet.py | 5 +
.../resnext152_32x4d_b32x8_imagenet.py | 5 +
.../resnext/resnext50_32x4d_b32x8_imagenet.py | 5 +
.../configs/seresnet/README.md | 25 +
.../configs/seresnet/metafile.yml | 47 +
.../seresnet/seresnet101_b32x8_imagenet.py | 5 +
.../seresnet/seresnet50_b32x8_imagenet.py | 6 +
.../configs/seresnext/README.md | 16 +
.../seresnext101_32x4d_b32x8_imagenet.py | 5 +
.../seresnext50_32x4d_b32x8_imagenet.py | 5 +
.../configs/shufflenet_v1/README.md | 24 +
.../configs/shufflenet_v1/metafile.yml | 35 +
..._v1_1x_b64x16_linearlr_bn_nowd_imagenet.py | 6 +
.../configs/shufflenet_v2/README.md | 24 +
.../configs/shufflenet_v2/metafile.yml | 35 +
..._v2_1x_b64x16_linearlr_bn_nowd_imagenet.py | 6 +
.../configs/swin_transformer/README.md | 42 +
.../configs/swin_transformer/metafile.yml | 188 +++
.../swin_base_224_b16x64_300e_imagenet.py | 6 +
.../swin_base_384_evalonly_imagenet.py | 7 +
.../swin_large_224_evalonly_imagenet.py | 7 +
.../swin_large_384_evalonly_imagenet.py | 7 +
.../swin_small_224_b16x64_300e_imagenet.py | 6 +
.../swin_tiny_224_b16x64_300e_imagenet.py | 6 +
.../configs/t2t_vit/README.md | 33 +
.../configs/t2t_vit/metafile.yml | 64 +
.../t2t_vit/t2t-vit-t-14_8xb64_in1k.py | 31 +
.../t2t_vit/t2t-vit-t-19_8xb64_in1k.py | 31 +
.../t2t_vit/t2t-vit-t-24_8xb64_in1k.py | 31 +
.../configs/tnt/README.md | 32 +
.../configs/tnt/metafile.yml | 29 +
.../tnt_s_patch16_224_evalonly_imagenet.py | 39 +
.../configs/vgg/README.md | 31 +
.../configs/vgg/metafile.yml | 125 ++
.../configs/vgg/vgg11_b32x8_imagenet.py | 7 +
.../configs/vgg/vgg11bn_b32x8_imagenet.py | 5 +
.../configs/vgg/vgg13_b32x8_imagenet.py | 6 +
.../configs/vgg/vgg13bn_b32x8_imagenet.py | 5 +
.../configs/vgg/vgg16_b16x8_voc.py | 25 +
.../configs/vgg/vgg16_b32x8_imagenet.py | 6 +
.../configs/vgg/vgg16bn_b32x8_imagenet.py | 5 +
.../configs/vgg/vgg19_b32x8_imagenet.py | 6 +
.../configs/vgg/vgg19bn_b32x8_imagenet.py | 5 +
.../configs/vision_transformer/README.md | 51 +
.../configs/vision_transformer/metafile.yml | 76 +
.../vit-base-p16_ft-64xb64_in1k-384.py | 36 +
.../vit-base-p16_pt-64xb64_in1k-224.py | 12 +
.../vit-base-p32_ft-64xb64_in1k-384.py | 36 +
.../vit-base-p32_pt-64xb64_in1k-224.py | 12 +
.../vit-large-p16_ft-64xb64_in1k-384.py | 36 +
.../vit-large-p16_pt-64xb64_in1k-224.py | 12 +
.../vit-large-p32_ft-64xb64_in1k-384.py | 37 +
.../vit-large-p32_pt-64xb64_in1k-224.py | 12 +
.../Resnet50_for_PyTorch/demo/demo.JPEG | Bin 0 -> 109527 bytes
.../Resnet50_for_PyTorch/demo/image_demo.py | 25 +
.../Resnet50_for_PyTorch/docker/Dockerfile | 22 +
.../docker/serve/Dockerfile | 49 +
.../docker/serve/config.properties | 5 +
.../docker/serve/entrypoint.sh | 12 +
.../Resnet50_for_PyTorch/docs/Makefile | 20 +
.../docs/_static/css/readthedocs.css | 16 +
.../docs/_static/image/concat.JPEG | Bin 0 -> 45505 bytes
.../docs/_static/image/original.JPEG | Bin 0 -> 9414 bytes
.../docs/_static/image/pipeline.JPEG | Bin 0 -> 19054 bytes
.../docs/_static/js/custom.js | 1 +
.../Resnet50_for_PyTorch/docs/changelog.md | 403 ++++++
.../docs/community/CONTRIBUTING.md | 71 +
.../Resnet50_for_PyTorch/docs/conf.py | 297 ++++
.../docs/getting_started.md | 232 +++
.../Resnet50_for_PyTorch/docs/install.md | 142 ++
.../Resnet50_for_PyTorch/docs/model_zoo.md | 75 +
.../Resnet50_for_PyTorch/docs/stat.py | 107 ++
.../docs/tools/model_serving.md | 87 ++
.../docs/tools/onnx2tensorrt.md | 80 ++
.../docs/tools/pytorch2onnx.md | 204 +++
.../docs/tools/pytorch2torchscript.md | 56 +
.../docs/tools/visualization.md | 81 ++
.../docs/tutorials/config.md | 403 ++++++
.../docs/tutorials/data_pipeline.md | 148 ++
.../docs/tutorials/finetune.md | 237 ++++
.../docs/tutorials/new_dataset.md | 141 ++
.../docs/tutorials/new_modules.md | 272 ++++
.../Resnet50_for_PyTorch/docs_zh-CN/Makefile | 20 +
.../docs_zh-CN/_static/css/readthedocs.css | 16 +
.../docs_zh-CN/_static/image/concat.JPEG | Bin 0 -> 45505 bytes
.../docs_zh-CN/_static/image/original.JPEG | Bin 0 -> 9414 bytes
.../docs_zh-CN/_static/image/pipeline.JPEG | Bin 0 -> 19054 bytes
.../docs_zh-CN/community/CONTRIBUTING.md | 73 +
.../Resnet50_for_PyTorch/docs_zh-CN/conf.py | 284 ++++
.../docs_zh-CN/getting_started.md | 228 +++
.../docs_zh-CN/install.md | 134 ++
.../docs_zh-CN/model_zoo.md | 1 +
.../Resnet50_for_PyTorch/docs_zh-CN/stat.py | 107 ++
.../docs_zh-CN/tools/model_serving.md | 87 ++
.../docs_zh-CN/tools/onnx2tensorrt.md | 76 +
.../docs_zh-CN/tools/pytorch2onnx.md | 89 ++
.../docs_zh-CN/tools/pytorch2torchscript.md | 55 +
.../docs_zh-CN/tools/visualization.md | 82 ++
.../docs_zh-CN/tutorials/config.md | 405 ++++++
.../docs_zh-CN/tutorials/data_pipeline.md | 148 ++
.../docs_zh-CN/tutorials/finetune.md | 222 +++
.../docs_zh-CN/tutorials/new_dataset.md | 140 ++
.../docs_zh-CN/tutorials/new_modules.md | 281 ++++
.../Resnet50_for_PyTorch/mmcls/__init__.py | 60 +
.../mmcls/apis/__init__.py | 9 +
.../mmcls/apis/inference.py | 119 ++
.../Resnet50_for_PyTorch/mmcls/apis/test.py | 198 +++
.../Resnet50_for_PyTorch/mmcls/apis/train.py | 177 +++
.../mmcls/core/__init__.py | 4 +
.../mmcls/core/evaluation/__init__.py | 12 +
.../mmcls/core/evaluation/eval_hooks.py | 107 ++
.../mmcls/core/evaluation/eval_metrics.py | 248 ++++
.../mmcls/core/evaluation/mean_ap.py | 74 +
.../evaluation/multilabel_eval_metrics.py | 72 +
.../mmcls/core/export/__init__.py | 4 +
.../mmcls/core/export/test.py | 96 ++
.../mmcls/core/fp16/__init__.py | 5 +
.../mmcls/core/fp16/decorators.py | 161 +++
.../mmcls/core/fp16/hooks.py | 129 ++
.../mmcls/core/fp16/utils.py | 24 +
.../mmcls/core/utils/__init__.py | 5 +
.../mmcls/core/utils/dist_utils.py | 57 +
.../mmcls/core/utils/misc.py | 8 +
.../mmcls/core/visualization/__init__.py | 7 +
.../mmcls/core/visualization/image.py | 326 +++++
.../mmcls/datasets/__init__.py | 19 +
.../mmcls/datasets/base_dataset.py | 206 +++
.../mmcls/datasets/builder.py | 122 ++
.../mmcls/datasets/cifar.py | 133 ++
.../mmcls/datasets/dataset_wrappers.py | 172 +++
.../mmcls/datasets/imagenet.py | 1103 +++++++++++++++
.../mmcls/datasets/imagenet21k.py | 141 ++
.../mmcls/datasets/mnist.py | 185 +++
.../mmcls/datasets/multi_label.py | 83 ++
.../mmcls/datasets/pipelines/__init__.py | 22 +
.../mmcls/datasets/pipelines/auto_augment.py | 921 ++++++++++++
.../mmcls/datasets/pipelines/compose.py | 43 +
.../mmcls/datasets/pipelines/formating.py | 9 +
.../mmcls/datasets/pipelines/formatting.py | 180 +++
.../mmcls/datasets/pipelines/loading.py | 70 +
.../mmcls/datasets/pipelines/transforms.py | 1065 ++++++++++++++
.../mmcls/datasets/samplers/__init__.py | 4 +
.../datasets/samplers/distributed_sampler.py | 43 +
.../mmcls/datasets/utils.py | 153 ++
.../mmcls/datasets/voc.py | 69 +
.../mmcls/models/__init__.py | 14 +
.../mmcls/models/backbones/__init__.py | 29 +
.../mmcls/models/backbones/alexnet.py | 56 +
.../mmcls/models/backbones/base_backbone.py | 33 +
.../mmcls/models/backbones/lenet.py | 42 +
.../mmcls/models/backbones/mobilenet_v2.py | 264 ++++
.../mmcls/models/backbones/mobilenet_v3.py | 195 +++
.../mmcls/models/backbones/regnet.py | 312 +++++
.../mmcls/models/backbones/repvgg.py | 537 +++++++
.../mmcls/models/backbones/res2net.py | 306 ++++
.../mmcls/models/backbones/resnest.py | 339 +++++
.../mmcls/models/backbones/resnet.py | 651 +++++++++
.../mmcls/models/backbones/resnet_cifar.py | 81 ++
.../mmcls/models/backbones/resnext.py | 148 ++
.../mmcls/models/backbones/seresnet.py | 125 ++
.../mmcls/models/backbones/seresnext.py | 155 ++
.../mmcls/models/backbones/shufflenet_v1.py | 321 +++++
.../mmcls/models/backbones/shufflenet_v2.py | 297 ++++
.../models/backbones/swin_transformer.py | 401 ++++++
.../mmcls/models/backbones/t2t_vit.py | 367 +++++
.../mmcls/models/backbones/timm_backbone.py | 57 +
.../mmcls/models/backbones/tnt.py | 367 +++++
.../mmcls/models/backbones/vgg.py | 183 +++
.../models/backbones/vision_transformer.py | 368 +++++
.../mmcls/models/builder.py | 38 +
.../mmcls/models/classifiers/__init__.py | 5 +
.../mmcls/models/classifiers/base.py | 215 +++
.../mmcls/models/classifiers/image.py | 141 ++
.../mmcls/models/heads/__init__.py | 12 +
.../mmcls/models/heads/base_head.py | 15 +
.../mmcls/models/heads/cls_head.py | 78 ++
.../mmcls/models/heads/linear_head.py | 54 +
.../mmcls/models/heads/multi_label_head.py | 64 +
.../models/heads/multi_label_linear_head.py | 59 +
.../mmcls/models/heads/stacked_head.py | 137 ++
.../models/heads/vision_transformer_head.py | 87 ++
.../mmcls/models/losses/__init__.py | 17 +
.../mmcls/models/losses/accuracy.py | 130 ++
.../mmcls/models/losses/asymmetric_loss.py | 112 ++
.../mmcls/models/losses/cross_entropy_loss.py | 189 +++
.../mmcls/models/losses/focal_loss.py | 114 ++
.../mmcls/models/losses/label_smooth_loss.py | 167 +++
.../mmcls/models/losses/seesaw_loss.py | 173 +++
.../mmcls/models/losses/utils.py | 121 ++
.../mmcls/models/necks/__init__.py | 4 +
.../mmcls/models/necks/gap.py | 45 +
.../mmcls/models/utils/__init__.py | 16 +
.../mmcls/models/utils/attention.py | 370 +++++
.../mmcls/models/utils/augment/__init__.py | 7 +
.../mmcls/models/utils/augment/augments.py | 73 +
.../mmcls/models/utils/augment/builder.py | 8 +
.../mmcls/models/utils/augment/cutmix.py | 140 ++
.../mmcls/models/utils/augment/identity.py | 30 +
.../mmcls/models/utils/augment/mixup.py | 57 +
.../mmcls/models/utils/channel_shuffle.py | 29 +
.../mmcls/models/utils/embed.py | 253 ++++
.../mmcls/models/utils/helpers.py | 42 +
.../mmcls/models/utils/inverted_residual.py | 114 ++
.../mmcls/models/utils/make_divisible.py | 25 +
.../mmcls/models/utils/se_layer.py | 74 +
.../mmcls/utils/__init__.py | 5 +
.../mmcls/utils/collect_env.py | 17 +
.../mmcls/utils/logger.py | 8 +
.../Resnet50_for_PyTorch/mmcls/version.py | 28 +
.../Resnet50_for_PyTorch/model-index.yml | 15 +
.../Resnet50_for_PyTorch/requirements.txt | 3 +
.../requirements/docs.txt | 7 +
.../requirements/mminstall.txt | 1 +
.../requirements/optional.txt | 2 +
.../requirements/readthedocs.txt | 3 +
.../requirements/runtime.txt | 3 +
.../requirements/tests.txt | 8 +
.../Resnet50_for_PyTorch/setup.cfg | 24 +
.../Resnet50_for_PyTorch/setup.py | 174 +++
.../Resnet50_for_PyTorch/test/env_npu.sh | 55 +
.../Resnet50_for_PyTorch/test/set_conda.sh | 2 +
.../test/train_ID3915_performance_8p.sh | 141 ++
.../test/train_ID3918_performance_8p.sh | 142 ++
.../tests/data/dataset/ann.txt | 3 +
.../tests/data/dataset/b/2.jpeg | 0
.../tests/data/retinanet.py | 82 ++
.../test_data/test_datasets/test_common.py | 295 ++++
.../test_datasets/test_dataset_utils.py | 22 +
.../test_datasets/test_dataset_wrapper.py | 84 ++
.../test_pipelines/test_auto_augment.py | 1241 +++++++++++++++++
.../test_data/test_pipelines/test_loading.py | 59 +
.../test_pipelines/test_transform.py | 1188 ++++++++++++++++
.../test_downstream/test_mmdet_inference.py | 96 ++
.../tests/test_metrics/test_losses.py | 303 ++++
.../tests/test_metrics/test_metrics.py | 57 +
.../test_backbones/test_mobilenet_v2.py | 259 ++++
.../test_backbones/test_mobilenet_v3.py | 175 +++
.../test_models/test_backbones/test_regnet.py | 94 ++
.../test_models/test_backbones/test_repvgg.py | 293 ++++
.../test_backbones/test_res2net.py | 71 +
.../test_backbones/test_resnest.py | 44 +
.../test_models/test_backbones/test_resnet.py | 566 ++++++++
.../test_backbones/test_resnet_cifar.py | 67 +
.../test_backbones/test_resnext.py | 61 +
.../test_backbones/test_seresnet.py | 247 ++++
.../test_backbones/test_seresnext.py | 74 +
.../test_backbones/test_shufflenet_v1.py | 246 ++++
.../test_backbones/test_shufflenet_v2.py | 205 +++
.../test_backbones/test_swin_transformer.py | 168 +++
.../test_backbones/test_t2t_vit.py | 84 ++
.../test_backbones/test_timm_backbone.py | 43 +
.../test_models/test_backbones/test_tnt.py | 50 +
.../test_models/test_backbones/test_vgg.py | 139 ++
.../test_backbones/test_vision_transformer.py | 162 +++
.../tests/test_models/test_classifiers.py | 296 ++++
.../tests/test_models/test_heads.py | 152 ++
.../tests/test_models/test_neck.py | 39 +
.../test_models/test_utils/test_attention.py | 178 +++
.../test_models/test_utils/test_augment.py | 52 +
.../test_models/test_utils/test_embed.py | 83 ++
.../test_utils/test_inverted_residual.py | 82 ++
.../tests/test_models/test_utils/test_misc.py | 60 +
.../tests/test_models/test_utils/test_se.py | 94 ++
.../tests/test_runtime/test_eval_hook.py | 219 +++
.../tests/test_utils/test_version_utils.py | 21 +
.../tests/test_utils/test_visualization.py | 106 ++
.../tools/analysis_tools/analyze_logs.py | 183 +++
.../tools/analysis_tools/analyze_results.py | 126 ++
.../tools/analysis_tools/eval_metric.py | 75 +
.../tools/analysis_tools/get_flops.py | 55 +
.../convert_models/mobilenetv2_to_mmcls.py | 135 ++
.../tools/convert_models/publish_model.py | 55 +
.../convert_models/reparameterize_repvgg.py | 46 +
.../tools/convert_models/repvgg_to_mmcls.py | 59 +
.../convert_models/shufflenetv2_to_mmcls.py | 113 ++
.../tools/convert_models/vgg_to_mmcls.py | 117 ++
.../tools/deployment/mmcls2torchserve.py | 111 ++
.../tools/deployment/mmcls_handler.py | 51 +
.../tools/deployment/onnx2tensorrt.py | 142 ++
.../tools/deployment/pytorch2onnx.py | 233 ++++
.../tools/deployment/pytorch2torchscript.py | 139 ++
.../tools/deployment/test.py | 116 ++
.../tools/deployment/test_torchserver.py | 44 +
.../Resnet50_for_PyTorch/tools/dist_test.sh | 10 +
.../Resnet50_for_PyTorch/tools/dist_train.sh | 9 +
.../tools/misc/print_config.py | 55 +
.../tools/misc/verify_dataset.py | 131 ++
.../Resnet50_for_PyTorch/tools/slurm_test.sh | 24 +
.../Resnet50_for_PyTorch/tools/slurm_train.sh | 24 +
.../Resnet50_for_PyTorch/tools/test.py | 219 +++
.../Resnet50_for_PyTorch/tools/train.py | 185 +++
.../tools/visualizations/vis_pipeline.py | 257 ++++
490 files changed, 39275 insertions(+)
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/.gitignore
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/.pre-commit-config.yaml
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/.readthedocs.yml
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/CITATION.cff
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/LICENSE
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/MANIFEST.in
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/README.md
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/README_zh-CN.md
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/cifar100_bs16.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/cifar10_bs16.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet21k_bs128.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet_bs32.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet_bs32_pil_resize.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet_bs64.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet_bs64_autoaug.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet_bs64_pil_resize.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet_bs64_pil_resize_autoaug.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet_bs64_swin_224.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet_bs64_swin_384.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet_bs64_t2t_224.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/pipelines/auto_aug.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/pipelines/rand_aug.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/voc_bs16.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/default_runtime.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/mobilenet_v2_1x.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/mobilenet_v3_large_imagenet.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/mobilenet_v3_small_cifar.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/mobilenet_v3_small_imagenet.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/regnet/regnetx_1.6gf.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/regnet/regnetx_12gf.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/regnet/regnetx_3.2gf.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/regnet/regnetx_4.0gf.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/regnet/regnetx_400mf.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/regnet/regnetx_6.4gf.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/regnet/regnetx_8.0gf.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/regnet/regnetx_800mf.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/repvgg-A0_in1k.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/repvgg-B3_lbs-mixup_in1k.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/res2net101-w26-s4.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/res2net50-w14-s8.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/res2net50-w26-s4.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/res2net50-w26-s6.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/res2net50-w26-s8.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/res2net50-w48-s2.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnest101.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnest200.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnest269.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnest50.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet101.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet101_cifar.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet152.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet152_cifar.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet18.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet18_cifar.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet34.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet34_cifar.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet50.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet50_cifar.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet50_cifar_cutmix.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet50_cifar_mixup.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet50_cutmix.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet50_label_smooth.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet50_mixup.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnetv1d101.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnetv1d152.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnetv1d50.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnext101_32x4d.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnext101_32x8d.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnext152_32x4d.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnext50_32x4d.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/seresnet101.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/seresnet50.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/seresnext101_32x4d.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/seresnext50_32x4d.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/shufflenet_v1_1x.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/shufflenet_v2_1x.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/swin_transformer/base_224.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/swin_transformer/base_384.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/swin_transformer/large_224.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/swin_transformer/large_384.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/swin_transformer/small_224.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/swin_transformer/tiny_224.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/t2t-vit-t-14.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/t2t-vit-t-19.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/t2t-vit-t-24.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/tnt_s_patch16_224.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vgg11.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vgg11bn.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vgg13.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vgg13bn.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vgg16.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vgg16bn.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vgg19.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vgg19bn.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vit-base-p16.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vit-base-p32.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vit-large-p16.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vit-large-p32.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/cifar10_bs128.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs1024_adamw_swin.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs1024_coslr.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs1024_linearlr_bn_nowd.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs2048.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs2048_AdamW.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs2048_coslr.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs256.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs256_140e.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs256_200e_coslr_warmup.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs256_coslr.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs256_epochstep.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs4096_AdamW.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/fp16/README.md
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/fp16/metafile.yml
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/fp16/resnet50_b32x8_fp16_dynamic_imagenet.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/fp16/resnet50_b32x8_fp16_imagenet.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/lenet/README.md
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/lenet/lenet5_mnist.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/mobilenet_v2/README.md
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/mobilenet_v2/metafile.yml
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/mobilenet_v2/mobilenet_v2_b32x8_imagenet.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/mobilenet_v3/README.md
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/mobilenet_v3/metafile.yml
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/mobilenet_v3/mobilenet_v3_large_imagenet.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/mobilenet_v3/mobilenet_v3_small_cifar.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/mobilenet_v3/mobilenet_v3_small_imagenet.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/regnet/README.md
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/regnet/regnetx_1.6gf_b32x8_imagenet.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/regnet/regnetx_12gf_b32x8_imagenet.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/regnet/regnetx_3.2gf_b32x8_imagenet.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/regnet/regnetx_4.0gf_b32x8_imagenet.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/regnet/regnetx_400mf_b32x8_imagenet.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/regnet/regnetx_6.4gf_b32x8_imagenet.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/regnet/regnetx_8.0gf_b32x8_imagenet.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/regnet/regnetx_800mf_b32x8_imagenet.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/README.md
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-A0_deploy_4xb64-coslr-120e_in1k.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-A1_deploy_4xb64-coslr-120e_in1k.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-A2_deploy_4xb64-coslr-120e_in1k.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-B0_deploy_4xb64-coslr-120e_in1k.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-B1_deploy_4xb64-coslr-120e_in1k.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-B1g2_deploy_4xb64-coslr-120e_in1k.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-B1g4_deploy_4xb64-coslr-120e_in1k.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-B2_deploy_4xb64-coslr-120e_in1k.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-B2g4_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-B3_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-B3g4_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-D2se_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/metafile.yml
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-A0_4xb64-coslr-120e_in1k.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-A1_4xb64-coslr-120e_in1k.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-A2_4xb64-coslr-120e_in1k.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-B0_4xb64-coslr-120e_in1k.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-B1_4xb64-coslr-120e_in1k.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-B1g2_4xb64-coslr-120e_in1k.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-B1g4_4xb64-coslr-120e_in1k.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-B2_4xb64-coslr-120e_in1k.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-B2g4_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-B3_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-B3g4_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-D2se_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/res2net/README.md
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/res2net/metafile.yml
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/res2net/res2net101-w26-s4_8xb32_in1k.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/res2net/res2net50-w14-s8_8xb32_in1k.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/res2net/res2net50-w26-s8_8xb32_in1k.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnest/README.md
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnest/resnest101_b64x32_imagenet.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnest/resnest200_b32x64_imagenet.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnest/resnest269_b32x64_imagenet.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnest/resnest50_b64x32_imagenet.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/README.md
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/metafile.yml
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet101_b16x8_cifar10.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet101_b32x8_imagenet.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet152_b16x8_cifar10.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet152_b32x8_imagenet.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet18_b16x8_cifar10.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet18_b32x8_imagenet.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet34_b16x8_cifar10.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet34_b32x8_imagenet.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_8xb128_coslr-90e_in21k.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b16x8_cifar10.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b16x8_cifar100.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b16x8_cifar10_mixup.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b32x8_coslr_imagenet.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b32x8_cutmix_imagenet.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b32x8_imagenet.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b32x8_label_smooth_imagenet.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b32x8_mixup_imagenet.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b64x32_warmup_coslr_imagenet.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b64x32_warmup_imagenet.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b64x32_warmup_label_smooth_imagenet.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnetv1d101_b32x8_imagenet.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnetv1d152_b32x8_imagenet.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnetv1d50_b32x8_imagenet.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnext/README.md
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnext/metafile.yml
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnext/resnext101_32x4d_b32x8_imagenet.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnext/resnext101_32x8d_b32x8_imagenet.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnext/resnext152_32x4d_b32x8_imagenet.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnext/resnext50_32x4d_b32x8_imagenet.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/seresnet/README.md
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/seresnet/metafile.yml
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/seresnet/seresnet101_b32x8_imagenet.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/seresnet/seresnet50_b32x8_imagenet.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/seresnext/README.md
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/seresnext/seresnext101_32x4d_b32x8_imagenet.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/seresnext/seresnext50_32x4d_b32x8_imagenet.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/shufflenet_v1/README.md
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/shufflenet_v1/metafile.yml
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/shufflenet_v1/shufflenet_v1_1x_b64x16_linearlr_bn_nowd_imagenet.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/shufflenet_v2/README.md
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/shufflenet_v2/metafile.yml
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/shufflenet_v2/shufflenet_v2_1x_b64x16_linearlr_bn_nowd_imagenet.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/swin_transformer/README.md
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/swin_transformer/metafile.yml
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/swin_transformer/swin_base_224_b16x64_300e_imagenet.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/swin_transformer/swin_base_384_evalonly_imagenet.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/swin_transformer/swin_large_224_evalonly_imagenet.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/swin_transformer/swin_large_384_evalonly_imagenet.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/swin_transformer/swin_small_224_b16x64_300e_imagenet.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/swin_transformer/swin_tiny_224_b16x64_300e_imagenet.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/t2t_vit/README.md
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/t2t_vit/metafile.yml
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/t2t_vit/t2t-vit-t-14_8xb64_in1k.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/t2t_vit/t2t-vit-t-19_8xb64_in1k.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/t2t_vit/t2t-vit-t-24_8xb64_in1k.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/tnt/README.md
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/tnt/metafile.yml
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/tnt/tnt_s_patch16_224_evalonly_imagenet.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/README.md
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/metafile.yml
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/vgg11_b32x8_imagenet.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/vgg11bn_b32x8_imagenet.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/vgg13_b32x8_imagenet.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/vgg13bn_b32x8_imagenet.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/vgg16_b16x8_voc.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/vgg16_b32x8_imagenet.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/vgg16bn_b32x8_imagenet.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/vgg19_b32x8_imagenet.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/vgg19bn_b32x8_imagenet.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/README.md
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/metafile.yml
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/vit-base-p16_ft-64xb64_in1k-384.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/vit-base-p16_pt-64xb64_in1k-224.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/vit-base-p32_ft-64xb64_in1k-384.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/vit-base-p32_pt-64xb64_in1k-224.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/vit-large-p16_ft-64xb64_in1k-384.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/vit-large-p16_pt-64xb64_in1k-224.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/vit-large-p32_ft-64xb64_in1k-384.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/vit-large-p32_pt-64xb64_in1k-224.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/demo/demo.JPEG
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/demo/image_demo.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docker/Dockerfile
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docker/serve/Dockerfile
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docker/serve/config.properties
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docker/serve/entrypoint.sh
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/Makefile
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/_static/css/readthedocs.css
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/_static/image/concat.JPEG
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/_static/image/original.JPEG
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/_static/image/pipeline.JPEG
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/_static/js/custom.js
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/changelog.md
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/community/CONTRIBUTING.md
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/conf.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/getting_started.md
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/install.md
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/model_zoo.md
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/stat.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/tools/model_serving.md
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/tools/onnx2tensorrt.md
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/tools/pytorch2onnx.md
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/tools/pytorch2torchscript.md
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/tools/visualization.md
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/tutorials/config.md
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/tutorials/data_pipeline.md
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/tutorials/finetune.md
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/tutorials/new_dataset.md
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/tutorials/new_modules.md
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/Makefile
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/_static/css/readthedocs.css
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/_static/image/concat.JPEG
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/_static/image/original.JPEG
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/_static/image/pipeline.JPEG
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/community/CONTRIBUTING.md
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/conf.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/getting_started.md
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/install.md
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/model_zoo.md
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/stat.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/tools/model_serving.md
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/tools/onnx2tensorrt.md
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/tools/pytorch2onnx.md
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/tools/pytorch2torchscript.md
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/tools/visualization.md
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/tutorials/config.md
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/tutorials/data_pipeline.md
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/tutorials/finetune.md
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/tutorials/new_dataset.md
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/tutorials/new_modules.md
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/__init__.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/apis/__init__.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/apis/inference.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/apis/test.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/apis/train.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/__init__.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/evaluation/__init__.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/evaluation/eval_hooks.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/evaluation/eval_metrics.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/evaluation/mean_ap.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/evaluation/multilabel_eval_metrics.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/export/__init__.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/export/test.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/fp16/__init__.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/fp16/decorators.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/fp16/hooks.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/fp16/utils.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/utils/__init__.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/utils/dist_utils.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/utils/misc.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/visualization/__init__.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/visualization/image.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/__init__.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/base_dataset.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/builder.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/cifar.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/dataset_wrappers.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/imagenet.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/imagenet21k.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/mnist.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/multi_label.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/pipelines/__init__.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/pipelines/auto_augment.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/pipelines/compose.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/pipelines/formating.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/pipelines/formatting.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/pipelines/loading.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/pipelines/transforms.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/samplers/__init__.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/samplers/distributed_sampler.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/utils.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/voc.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/__init__.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/__init__.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/alexnet.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/base_backbone.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/lenet.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/mobilenet_v2.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/mobilenet_v3.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/regnet.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/repvgg.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/res2net.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/resnest.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/resnet.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/resnet_cifar.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/resnext.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/seresnet.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/seresnext.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/shufflenet_v1.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/shufflenet_v2.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/swin_transformer.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/t2t_vit.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/timm_backbone.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/tnt.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/vgg.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/vision_transformer.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/builder.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/classifiers/__init__.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/classifiers/base.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/classifiers/image.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/heads/__init__.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/heads/base_head.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/heads/cls_head.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/heads/linear_head.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/heads/multi_label_head.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/heads/multi_label_linear_head.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/heads/stacked_head.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/heads/vision_transformer_head.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/losses/__init__.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/losses/accuracy.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/losses/asymmetric_loss.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/losses/cross_entropy_loss.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/losses/focal_loss.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/losses/label_smooth_loss.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/losses/seesaw_loss.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/losses/utils.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/necks/__init__.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/necks/gap.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/utils/__init__.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/utils/attention.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/utils/augment/__init__.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/utils/augment/augments.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/utils/augment/builder.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/utils/augment/cutmix.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/utils/augment/identity.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/utils/augment/mixup.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/utils/channel_shuffle.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/utils/embed.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/utils/helpers.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/utils/inverted_residual.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/utils/make_divisible.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/utils/se_layer.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/utils/__init__.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/utils/collect_env.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/utils/logger.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/version.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/model-index.yml
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/requirements.txt
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/requirements/docs.txt
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/requirements/mminstall.txt
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/requirements/optional.txt
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/requirements/readthedocs.txt
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/requirements/runtime.txt
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/requirements/tests.txt
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/setup.cfg
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/setup.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/test/env_npu.sh
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/test/set_conda.sh
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/test/train_ID3915_performance_8p.sh
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/test/train_ID3918_performance_8p.sh
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/data/dataset/ann.txt
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/data/dataset/b/2.jpeg
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/data/retinanet.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_data/test_datasets/test_common.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_data/test_datasets/test_dataset_utils.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_data/test_datasets/test_dataset_wrapper.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_data/test_pipelines/test_auto_augment.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_data/test_pipelines/test_loading.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_data/test_pipelines/test_transform.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_downstream/test_mmdet_inference.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_metrics/test_losses.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_metrics/test_metrics.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_mobilenet_v2.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_mobilenet_v3.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_regnet.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_repvgg.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_res2net.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_resnest.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_resnet.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_resnet_cifar.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_resnext.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_seresnet.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_seresnext.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_shufflenet_v1.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_shufflenet_v2.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_swin_transformer.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_t2t_vit.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_timm_backbone.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_tnt.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_vgg.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_vision_transformer.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_classifiers.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_heads.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_neck.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_utils/test_attention.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_utils/test_augment.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_utils/test_embed.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_utils/test_inverted_residual.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_utils/test_misc.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_utils/test_se.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_runtime/test_eval_hook.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_utils/test_version_utils.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_utils/test_visualization.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/analysis_tools/analyze_logs.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/analysis_tools/analyze_results.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/analysis_tools/eval_metric.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/analysis_tools/get_flops.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/convert_models/mobilenetv2_to_mmcls.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/convert_models/publish_model.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/convert_models/reparameterize_repvgg.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/convert_models/repvgg_to_mmcls.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/convert_models/shufflenetv2_to_mmcls.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/convert_models/vgg_to_mmcls.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/deployment/mmcls2torchserve.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/deployment/mmcls_handler.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/deployment/onnx2tensorrt.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/deployment/pytorch2onnx.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/deployment/pytorch2torchscript.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/deployment/test.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/deployment/test_torchserver.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/dist_test.sh
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/dist_train.sh
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/misc/print_config.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/misc/verify_dataset.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/slurm_test.sh
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/slurm_train.sh
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/test.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/train.py
create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/visualizations/vis_pipeline.py
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/.gitignore b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/.gitignore
new file mode 100644
index 0000000000..786a839695
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/.gitignore
@@ -0,0 +1,118 @@
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
+**/*.pyc
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+MANIFEST
+
+# PyInstaller
+# Usually these files are written by a python script from a template
+# before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*.cover
+.hypothesis/
+.pytest_cache/
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+local_settings.py
+db.sqlite3
+
+# Flask stuff:
+instance/
+.webassets-cache
+
+# Scrapy stuff:
+.scrapy
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+target/
+
+# Jupyter Notebook
+.ipynb_checkpoints
+
+# pyenv
+.python-version
+
+# celery beat schedule file
+celerybeat-schedule
+
+# SageMath parsed files
+*.sage.py
+
+# Environments
+.env
+.venv
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+
+# Spyder project settings
+.spyderproject
+.spyproject
+
+# Rope project settings
+.ropeproject
+
+# mkdocs documentation
+/site
+
+# mypy
+.mypy_cache/
+
+# custom
+/data
+.vscode
+.idea
+*.pkl
+*.pkl.json
+*.log.json
+/work_dirs
+/mmcls/.mim
+
+# Pytorch
+*.pth
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/.pre-commit-config.yaml b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/.pre-commit-config.yaml
new file mode 100644
index 0000000000..19e9f8d481
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/.pre-commit-config.yaml
@@ -0,0 +1,54 @@
+exclude: ^tests/data/
+repos:
+ - repo: https://gitlab.com/pycqa/flake8.git
+ rev: 3.8.3
+ hooks:
+ - id: flake8
+ - repo: https://github.com/asottile/seed-isort-config
+ rev: v2.2.0
+ hooks:
+ - id: seed-isort-config
+ - repo: https://github.com/timothycrosley/isort
+ rev: 4.3.21
+ hooks:
+ - id: isort
+ - repo: https://github.com/pre-commit/mirrors-yapf
+ rev: v0.30.0
+ hooks:
+ - id: yapf
+ - repo: https://github.com/pre-commit/pre-commit-hooks
+ rev: v3.1.0
+ hooks:
+ - id: trailing-whitespace
+ - id: check-yaml
+ - id: end-of-file-fixer
+ - id: requirements-txt-fixer
+ - id: double-quote-string-fixer
+ - id: check-merge-conflict
+ - id: fix-encoding-pragma
+ args: ["--remove"]
+ - id: mixed-line-ending
+ args: ["--fix=lf"]
+ - repo: https://github.com/jumanjihouse/pre-commit-hooks
+ rev: 2.1.4
+ hooks:
+ - id: markdownlint
+ args: ["-r", "~MD002,~MD013,~MD029,~MD033,~MD034",
+ "-t", "allow_different_nesting"]
+ - repo: https://github.com/codespell-project/codespell
+ rev: v2.1.0
+ hooks:
+ - id: codespell
+ - repo: https://github.com/myint/docformatter
+ rev: v1.3.1
+ hooks:
+ - id: docformatter
+ args: ["--in-place", "--wrap-descriptions", "79"]
+ # - repo: local
+ # hooks:
+ # - id: clang-format
+ # name: clang-format
+ # description: Format files with ClangFormat
+ # entry: clang-format -style=google -i
+ # language: system
+ # files: \.(c|cc|cxx|cpp|cu|h|hpp|hxx|cuh|proto)$
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/.readthedocs.yml b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/.readthedocs.yml
new file mode 100644
index 0000000000..6cfbf5d310
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/.readthedocs.yml
@@ -0,0 +1,9 @@
+version: 2
+
+formats: all
+
+python:
+ version: 3.7
+ install:
+ - requirements: requirements/docs.txt
+ - requirements: requirements/readthedocs.txt
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/CITATION.cff b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/CITATION.cff
new file mode 100644
index 0000000000..0c0d773021
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/CITATION.cff
@@ -0,0 +1,9 @@
+cff-version: 1.2.0
+message: "If you use this software, please cite it as below."
+title: "OpenMMLab's Image Classification Toolbox and Benchmark"
+authors:
+ - name: "MMClassification Contributors"
+version: 0.15.0
+date-released: 2020-07-09
+repository-code: "https://github.com/open-mmlab/mmclassification"
+license: Apache-2.0
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/LICENSE b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/LICENSE
new file mode 100644
index 0000000000..f731325b2c
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/LICENSE
@@ -0,0 +1,203 @@
+Copyright (c) OpenMMLab. All rights reserved
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2020 MMClassification Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/MANIFEST.in b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/MANIFEST.in
new file mode 100644
index 0000000000..c4ce6d7f6c
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/MANIFEST.in
@@ -0,0 +1,3 @@
+include mmcls/.mim/model-index.yml
+recursive-include mmcls/.mim/configs *.py *.yml
+recursive-include mmcls/.mim/tools *.py *.sh
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/README.md b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/README.md
new file mode 100644
index 0000000000..12209eefc8
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/README.md
@@ -0,0 +1,104 @@
+
+

+
+
+[](https://github.com/open-mmlab/mmclassification/actions)
+[](https://mmclassification.readthedocs.io/en/latest/?badge=latest)
+[](https://codecov.io/gh/open-mmlab/mmclassification)
+[](https://github.com/open-mmlab/mmclassification/blob/master/LICENSE)
+
+## Introduction
+
+English | [简体中文](/README_zh-CN.md)
+
+MMClassification is an open source image classification toolbox based on PyTorch. It is
+a part of the [OpenMMLab](https://openmmlab.com/) project.
+
+Documentation: https://mmclassification.readthedocs.io/en/latest/
+
+
+
+### Major features
+
+- Various backbones and pretrained models
+- Bag of training tricks
+- Large-scale training configs
+- High efficiency and extensibility
+
+## License
+
+This project is released under the [Apache 2.0 license](LICENSE).
+
+## Changelog
+
+v0.17.0 was released in 29/10/2021.
+
+Highlights of the new version:
+- Support **Tokens-to-Token ViT** backbone and **Res2Net** backbone. Welcome to use!
+- Support **ImageNet21k** dataset.
+- Add a **pipeline visualization** tool. Try it with the [tutorials](https://mmclassification.readthedocs.io/en/latest/tools/visualization.html#pipeline-visualization)!
+
+Please refer to [changelog.md](docs/changelog.md) for more details and other release history.
+
+## Benchmark and model zoo
+
+Results and models are available in the [model zoo](docs/model_zoo.md).
+
+Supported backbones:
+
+- [x] ResNet
+- [x] ResNeXt
+- [x] SE-ResNet
+- [x] SE-ResNeXt
+- [x] RegNet
+- [x] ShuffleNetV1
+- [x] ShuffleNetV2
+- [x] MobileNetV2
+- [x] MobileNetV3
+- [x] Swin-Transformer
+
+## Installation
+
+Please refer to [install.md](docs/install.md) for installation and dataset preparation.
+
+## Getting Started
+
+Please see [getting_started.md](docs/getting_started.md) for the basic usage of MMClassification. There are also tutorials for [finetuning models](docs/tutorials/finetune.md), [adding new dataset](docs/tutorials/new_dataset.md), [designing data pipeline](docs/tutorials/data_pipeline.md), and [adding new modules](docs/tutorials/new_modules.md).
+
+## Citation
+
+If you find this project useful in your research, please consider cite:
+
+```BibTeX
+@misc{2020mmclassification,
+ title={OpenMMLab's Image Classification Toolbox and Benchmark},
+ author={MMClassification Contributors},
+ howpublished = {\url{https://github.com/open-mmlab/mmclassification}},
+ year={2020}
+}
+```
+
+## Contributing
+
+We appreciate all contributions to improve MMClassification.
+Please refer to [CONTRUBUTING.md](docs/community/CONTRIBUTING.md) for the contributing guideline.
+
+## Acknowledgement
+
+MMClassification is an open source project that is contributed by researchers and engineers from various colleges and companies. We appreciate all the contributors who implement their methods or add new features, as well as users who give valuable feedbacks.
+We wish that the toolbox and benchmark could serve the growing research community by providing a flexible toolkit to reimplement existing methods and develop their own new classifiers.
+
+## Projects in OpenMMLab
+
+- [MMCV](https://github.com/open-mmlab/mmcv): OpenMMLab foundational library for computer vision.
+- [MIM](https://github.com/open-mmlab/mim): MIM Installs OpenMMLab Packages.
+- [MMClassification](https://github.com/open-mmlab/mmclassification): OpenMMLab image classification toolbox and benchmark.
+- [MMDetection](https://github.com/open-mmlab/mmdetection): OpenMMLab detection toolbox and benchmark.
+- [MMDetection3D](https://github.com/open-mmlab/mmdetection3d): OpenMMLab's next-generation platform for general 3D object detection.
+- [MMSegmentation](https://github.com/open-mmlab/mmsegmentation): OpenMMLab semantic segmentation toolbox and benchmark.
+- [MMAction2](https://github.com/open-mmlab/mmaction2): OpenMMLab's next-generation action understanding toolbox and benchmark.
+- [MMTracking](https://github.com/open-mmlab/mmtracking): OpenMMLab video perception toolbox and benchmark.
+- [MMPose](https://github.com/open-mmlab/mmpose): OpenMMLab pose estimation toolbox and benchmark.
+- [MMEditing](https://github.com/open-mmlab/mmediting): OpenMMLab image and video editing toolbox.
+- [MMOCR](https://github.com/open-mmlab/mmocr): OpenMMLab toolbox for text detection, recognition and understanding.
+- [MMGeneration](https://github.com/open-mmlab/mmgeneration): OpenMMlab toolkit for generative models.
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/README_zh-CN.md b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/README_zh-CN.md
new file mode 100644
index 0000000000..206b9771b8
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/README_zh-CN.md
@@ -0,0 +1,108 @@
+
+

+
+
+[English](/README.md) | 简体中文
+
+[](https://github.com/open-mmlab/mmclassification/actions)
+[](https://mmclassification.readthedocs.io/en/latest/?badge=latest)
+[](https://codecov.io/gh/open-mmlab/mmclassification)
+[](https://github.com/open-mmlab/mmclassification/blob/master/LICENSE)
+
+## Introduction
+
+MMClassification 是一款基于 PyTorch 的开源图像分类工具箱,是 [OpenMMLab](https://openmmlab.com/) 项目的成员之一
+
+参考文档:https://mmclassification.readthedocs.io/en/latest/
+
+
+
+### 主要特性
+
+- 支持多样的主干网络与预训练模型
+- 支持配置多种训练技巧
+- 大量的训练配置文件
+- 高效率和高可扩展性
+
+## 许可证
+
+该项目开源自 [Apache 2.0 license](LICENSE).
+
+## 更新日志
+
+2021/10/29 发布了 v0.17.0 版本
+
+新版本的一些新功能如下:
+- 支持了 **Tokens-to-Token ViT** 主干网络和 **Res2Net** 主干网络,欢迎使用!
+- 支持了 **ImageNet21k** 数据集
+- 添加了一个**可视化数据预处理**的工具,可以参考[教程](https://mmclassification.readthedocs.io/zh_CN/latest/tools/visualization.html#id2)使用
+
+发布历史和更新细节请参考 [更新日志](docs/changelog.md)
+
+## 基准测试及模型库
+
+相关结果和模型可在 [model zoo](docs/model_zoo.md) 中获得
+
+支持的主干网络:
+
+- [x] ResNet
+- [x] ResNeXt
+- [x] SE-ResNet
+- [x] SE-ResNeXt
+- [x] RegNet
+- [x] ShuffleNetV1
+- [x] ShuffleNetV2
+- [x] MobileNetV2
+- [x] MobileNetV3
+- [x] Swin-Transformer
+
+## 安装
+
+请参考 [安装指南](docs_zh-CN/install.md) 进行安装
+
+## 基础教程
+
+请参考 [基础教程](docs_zh-CN/getting_started.md) 来了解 MMClassification 的基本使用。其中还包含了 [如何微调模型](docs_zh-CN/tutorials/finetune.md), [如何增加新数据集](docs_zh-CN/tutorials/new_dataset.md), [如何设计数据处理流程](docs_zh-CN/tutorials/data_pipeline.md), 以及 [如何增加新模块](docs_zh-CN/tutorials/new_modules.md) 等指南。
+
+## 参与贡献
+
+我们非常欢迎任何有助于提升 MMClassification 的贡献,请参考 [贡献指南](docs_zh-CN/community/CONTRIBUTING.md) 来了解如何参与贡献。
+
+## 致谢
+
+MMClassification 是一款由不同学校和公司共同贡献的开源项目。我们感谢所有为项目提供算法复现和新功能支持的贡献者,以及提供宝贵反馈的用户。
+
+我们希望该工具箱和基准测试可以为社区提供灵活的代码工具,供用户复现现有算法并开发自己的新模型,从而不断为开源社区提供贡献。
+
+## OpenMMLab 的其他项目
+
+- [MMCV](https://github.com/open-mmlab/mmcv): OpenMMLab 计算机视觉基础库
+- [MIM](https://github.com/open-mmlab/mim): MIM 是 OpenMMlab 项目、算法、模型的统一入口
+- [MMDetection](https://github.com/open-mmlab/mmdetection): OpenMMLab 检测工具箱与测试基准
+- [MMDetection3D](https://github.com/open-mmlab/mmdetection3d): OpenMMLab 新一代通用 3D 目标检测平台
+- [MMSegmentation](https://github.com/open-mmlab/mmsegmentation): OpenMMLab 语义分割工具箱与测试基准
+- [MMAction2](https://github.com/open-mmlab/mmaction2): OpenMMLab 新一代视频理解工具箱与测试基准
+- [MMTracking](https://github.com/open-mmlab/mmtracking): OpenMMLab 一体化视频目标感知平台
+- [MMPose](https://github.com/open-mmlab/mmpose): OpenMMLab 姿态估计工具箱与测试基准
+- [MMEditing](https://github.com/open-mmlab/mmediting): OpenMMLab 图像视频编辑工具箱
+- [MMOCR](https://github.com/open-mmlab/mmocr): OpenMMLab 全流程文字检测识别理解工具包
+- [MMGeneration](https://github.com/open-mmlab/mmgeneration): OpenMMLab 生成模型工具箱
+
+## 欢迎加入 OpenMMLab 社区
+
+扫描下方的二维码可关注 OpenMMLab 团队的 [知乎官方账号](https://www.zhihu.com/people/openmmlab),加入 OpenMMLab 团队的 [官方交流 QQ 群](https://jq.qq.com/?_wv=1027&k=GJP18SjI)
+
+
+

+
+
+我们会在 OpenMMLab 社区为大家
+
+- 📢 分享 AI 框架的前沿核心技术
+- 💻 解读 PyTorch 常用模块源码
+- 📰 发布 OpenMMLab 的相关新闻
+- 🚀 介绍 OpenMMLab 开发的前沿算法
+- 🏃 获取更高效的问题答疑和意见反馈
+- 🔥 提供与各行各业开发者充分交流的平台
+
+干货满满 📘,等你来撩 💗,OpenMMLab 社区期待您的加入 👬
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/cifar100_bs16.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/cifar100_bs16.py
new file mode 100644
index 0000000000..d4f8db75f8
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/cifar100_bs16.py
@@ -0,0 +1,36 @@
+# dataset settings
+dataset_type = 'CIFAR100'
+img_norm_cfg = dict(
+ mean=[129.304, 124.070, 112.434],
+ std=[68.170, 65.392, 70.418],
+ to_rgb=False)
+train_pipeline = [
+ dict(type='RandomCrop', size=32, padding=4),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+test_pipeline = [
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+data = dict(
+ samples_per_gpu=16,
+ workers_per_gpu=2,
+ train=dict(
+ type=dataset_type,
+ data_prefix='data/cifar100',
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ data_prefix='data/cifar100',
+ pipeline=test_pipeline,
+ test_mode=True),
+ test=dict(
+ type=dataset_type,
+ data_prefix='data/cifar100',
+ pipeline=test_pipeline,
+ test_mode=True))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/cifar10_bs16.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/cifar10_bs16.py
new file mode 100644
index 0000000000..0d28adf5bf
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/cifar10_bs16.py
@@ -0,0 +1,35 @@
+# dataset settings
+dataset_type = 'CIFAR10'
+img_norm_cfg = dict(
+ mean=[125.307, 122.961, 113.8575],
+ std=[51.5865, 50.847, 51.255],
+ to_rgb=False)
+train_pipeline = [
+ dict(type='RandomCrop', size=32, padding=4),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+test_pipeline = [
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+data = dict(
+ samples_per_gpu=16,
+ workers_per_gpu=2,
+ train=dict(
+ type=dataset_type, data_prefix='data/cifar10',
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ data_prefix='data/cifar10',
+ pipeline=test_pipeline,
+ test_mode=True),
+ test=dict(
+ type=dataset_type,
+ data_prefix='data/cifar10',
+ pipeline=test_pipeline,
+ test_mode=True))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet21k_bs128.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet21k_bs128.py
new file mode 100644
index 0000000000..b81a7466f4
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet21k_bs128.py
@@ -0,0 +1,43 @@
+# dataset settings
+dataset_type = 'ImageNet21k'
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='RandomResizedCrop', size=224),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='Resize', size=(256, -1)),
+ dict(type='CenterCrop', crop_size=224),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+data = dict(
+ samples_per_gpu=128,
+ workers_per_gpu=2,
+ train=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet21k/train',
+ pipeline=train_pipeline,
+ recursion_subdir=True),
+ val=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet21k/val',
+ ann_file='data/imagenet21k/meta/val.txt',
+ pipeline=test_pipeline,
+ recursion_subdir=True),
+ test=dict(
+ # replace `data/val` with `data/test` for standard test
+ type=dataset_type,
+ data_prefix='data/imagenet21k/val',
+ ann_file='data/imagenet21k/meta/val.txt',
+ pipeline=test_pipeline,
+ recursion_subdir=True))
+evaluation = dict(interval=1, metric='accuracy')
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet_bs32.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet_bs32.py
new file mode 100644
index 0000000000..8a5465902a
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet_bs32.py
@@ -0,0 +1,40 @@
+# dataset settings
+dataset_type = 'ImageNet'
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='RandomResizedCrop', size=224),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='Resize', size=(256, -1)),
+ dict(type='CenterCrop', crop_size=224),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+data = dict(
+ samples_per_gpu=32,
+ workers_per_gpu=2,
+ train=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/train',
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline),
+ test=dict(
+ # replace `data/val` with `data/test` for standard test
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline))
+evaluation = dict(interval=1, metric='accuracy')
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet_bs32_pil_resize.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet_bs32_pil_resize.py
new file mode 100644
index 0000000000..22b74f76b1
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet_bs32_pil_resize.py
@@ -0,0 +1,40 @@
+# dataset settings
+dataset_type = 'ImageNet'
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='RandomResizedCrop', size=224, backend='pillow'),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='Resize', size=(256, -1), backend='pillow'),
+ dict(type='CenterCrop', crop_size=224),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+data = dict(
+ samples_per_gpu=32,
+ workers_per_gpu=2,
+ train=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/train',
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline),
+ test=dict(
+ # replace `data/val` with `data/test` for standard test
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline))
+evaluation = dict(interval=1, metric='accuracy')
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet_bs64.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet_bs64.py
new file mode 100644
index 0000000000..b9f866a404
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet_bs64.py
@@ -0,0 +1,40 @@
+# dataset settings
+dataset_type = 'ImageNet'
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='RandomResizedCrop', size=224),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='Resize', size=(256, -1)),
+ dict(type='CenterCrop', crop_size=224),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+data = dict(
+ samples_per_gpu=64,
+ workers_per_gpu=2,
+ train=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/train',
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline),
+ test=dict(
+ # replace `data/val` with `data/test` for standard test
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline))
+evaluation = dict(interval=1, metric='accuracy')
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet_bs64_autoaug.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet_bs64_autoaug.py
new file mode 100644
index 0000000000..a1092a3124
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet_bs64_autoaug.py
@@ -0,0 +1,43 @@
+_base_ = ['./pipelines/auto_aug.py']
+
+# dataset settings
+dataset_type = 'ImageNet'
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='RandomResizedCrop', size=224),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(type='AutoAugment', policies={{_base_.auto_increasing_policies}}),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='Resize', size=(256, -1)),
+ dict(type='CenterCrop', crop_size=224),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+data = dict(
+ samples_per_gpu=64,
+ workers_per_gpu=2,
+ train=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/train',
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline),
+ test=dict(
+ # replace `data/val` with `data/test` for standard test
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline))
+evaluation = dict(interval=1, metric='accuracy')
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet_bs64_pil_resize.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet_bs64_pil_resize.py
new file mode 100644
index 0000000000..95d0e1f25a
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet_bs64_pil_resize.py
@@ -0,0 +1,40 @@
+# dataset settings
+dataset_type = 'ImageNet'
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='RandomResizedCrop', size=224, backend='pillow'),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='Resize', size=(256, -1), backend='pillow'),
+ dict(type='CenterCrop', crop_size=224),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+data = dict(
+ samples_per_gpu=64,
+ workers_per_gpu=2,
+ train=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/train',
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline),
+ test=dict(
+ # replace `data/val` with `data/test` for standard test
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline))
+evaluation = dict(interval=1, metric='accuracy')
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet_bs64_pil_resize_autoaug.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet_bs64_pil_resize_autoaug.py
new file mode 100644
index 0000000000..f9c50267af
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet_bs64_pil_resize_autoaug.py
@@ -0,0 +1,45 @@
+_base_ = [
+ 'pipelines/auto_aug.py',
+]
+
+# dataset settings
+dataset_type = 'ImageNet'
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='RandomResizedCrop', size=224, backend='pillow'),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(type='AutoAugment', policies={{_base_.policy_imagenet}}),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='Resize', size=(256, -1), backend='pillow'),
+ dict(type='CenterCrop', crop_size=224),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+data = dict(
+ samples_per_gpu=64,
+ workers_per_gpu=2,
+ train=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/train',
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline),
+ test=dict(
+ # replace `data/val` with `data/test` for standard test
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline))
+evaluation = dict(interval=1, metric='accuracy')
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet_bs64_swin_224.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet_bs64_swin_224.py
new file mode 100644
index 0000000000..4a059a3313
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet_bs64_swin_224.py
@@ -0,0 +1,71 @@
+_base_ = ['./pipelines/rand_aug.py']
+
+# dataset settings
+dataset_type = 'ImageNet'
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='RandomResizedCrop',
+ size=224,
+ backend='pillow',
+ interpolation='bicubic'),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(
+ type='RandAugment',
+ policies={{_base_.rand_increasing_policies}},
+ num_policies=2,
+ total_level=10,
+ magnitude_level=9,
+ magnitude_std=0.5,
+ hparams=dict(
+ pad_val=[round(x) for x in img_norm_cfg['mean'][::-1]],
+ interpolation='bicubic')),
+ dict(
+ type='RandomErasing',
+ erase_prob=0.25,
+ mode='rand',
+ min_area_ratio=0.02,
+ max_area_ratio=1 / 3,
+ fill_color=img_norm_cfg['mean'][::-1],
+ fill_std=img_norm_cfg['std'][::-1]),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='Resize',
+ size=(256, -1),
+ backend='pillow',
+ interpolation='bicubic'),
+ dict(type='CenterCrop', crop_size=224),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+data = dict(
+ samples_per_gpu=64,
+ workers_per_gpu=8,
+ train=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/train',
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline),
+ test=dict(
+ # replace `data/val` with `data/test` for standard test
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline))
+
+evaluation = dict(interval=10, metric='accuracy')
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet_bs64_swin_384.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet_bs64_swin_384.py
new file mode 100644
index 0000000000..d263939929
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet_bs64_swin_384.py
@@ -0,0 +1,43 @@
+# dataset settings
+dataset_type = 'ImageNet'
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='RandomResizedCrop',
+ size=384,
+ backend='pillow',
+ interpolation='bicubic'),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='Resize', size=384, backend='pillow', interpolation='bicubic'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+data = dict(
+ samples_per_gpu=64,
+ workers_per_gpu=8,
+ train=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/train',
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline),
+ test=dict(
+ # replace `data/val` with `data/test` for standard test
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline))
+evaluation = dict(interval=10, metric='accuracy')
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet_bs64_t2t_224.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet_bs64_t2t_224.py
new file mode 100644
index 0000000000..375775debd
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet_bs64_t2t_224.py
@@ -0,0 +1,71 @@
+_base_ = ['./pipelines/rand_aug.py']
+
+# dataset settings
+dataset_type = 'ImageNet'
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='RandomResizedCrop',
+ size=224,
+ backend='pillow',
+ interpolation='bicubic'),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(
+ type='RandAugment',
+ policies={{_base_.rand_increasing_policies}},
+ num_policies=2,
+ total_level=10,
+ magnitude_level=9,
+ magnitude_std=0.5,
+ hparams=dict(
+ pad_val=[round(x) for x in img_norm_cfg['mean'][::-1]],
+ interpolation='bicubic')),
+ dict(
+ type='RandomErasing',
+ erase_prob=0.25,
+ mode='rand',
+ min_area_ratio=0.02,
+ max_area_ratio=1 / 3,
+ fill_color=img_norm_cfg['mean'][::-1],
+ fill_std=img_norm_cfg['std'][::-1]),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='Resize',
+ size=(248, -1),
+ backend='pillow',
+ interpolation='bicubic'),
+ dict(type='CenterCrop', crop_size=224),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+data = dict(
+ samples_per_gpu=64,
+ workers_per_gpu=4,
+ train=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/train',
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline),
+ test=dict(
+ # replace `data/val` with `data/test` for standard test
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline))
+
+evaluation = dict(interval=10, metric='accuracy')
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/pipelines/auto_aug.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/pipelines/auto_aug.py
new file mode 100644
index 0000000000..5a10f7eec6
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/pipelines/auto_aug.py
@@ -0,0 +1,96 @@
+# Policy for ImageNet, refers to
+# https://github.com/DeepVoltaire/AutoAugment/blame/master/autoaugment.py
+policy_imagenet = [
+ [
+ dict(type='Posterize', bits=4, prob=0.4),
+ dict(type='Rotate', angle=30., prob=0.6)
+ ],
+ [
+ dict(type='Solarize', thr=256 / 9 * 4, prob=0.6),
+ dict(type='AutoContrast', prob=0.6)
+ ],
+ [dict(type='Equalize', prob=0.8),
+ dict(type='Equalize', prob=0.6)],
+ [
+ dict(type='Posterize', bits=5, prob=0.6),
+ dict(type='Posterize', bits=5, prob=0.6)
+ ],
+ [
+ dict(type='Equalize', prob=0.4),
+ dict(type='Solarize', thr=256 / 9 * 5, prob=0.2)
+ ],
+ [
+ dict(type='Equalize', prob=0.4),
+ dict(type='Rotate', angle=30 / 9 * 8, prob=0.8)
+ ],
+ [
+ dict(type='Solarize', thr=256 / 9 * 6, prob=0.6),
+ dict(type='Equalize', prob=0.6)
+ ],
+ [dict(type='Posterize', bits=6, prob=0.8),
+ dict(type='Equalize', prob=1.)],
+ [
+ dict(type='Rotate', angle=10., prob=0.2),
+ dict(type='Solarize', thr=256 / 9, prob=0.6)
+ ],
+ [
+ dict(type='Equalize', prob=0.6),
+ dict(type='Posterize', bits=5, prob=0.4)
+ ],
+ [
+ dict(type='Rotate', angle=30 / 9 * 8, prob=0.8),
+ dict(type='ColorTransform', magnitude=0., prob=0.4)
+ ],
+ [
+ dict(type='Rotate', angle=30., prob=0.4),
+ dict(type='Equalize', prob=0.6)
+ ],
+ [dict(type='Equalize', prob=0.0),
+ dict(type='Equalize', prob=0.8)],
+ [dict(type='Invert', prob=0.6),
+ dict(type='Equalize', prob=1.)],
+ [
+ dict(type='ColorTransform', magnitude=0.4, prob=0.6),
+ dict(type='Contrast', magnitude=0.8, prob=1.)
+ ],
+ [
+ dict(type='Rotate', angle=30 / 9 * 8, prob=0.8),
+ dict(type='ColorTransform', magnitude=0.2, prob=1.)
+ ],
+ [
+ dict(type='ColorTransform', magnitude=0.8, prob=0.8),
+ dict(type='Solarize', thr=256 / 9 * 2, prob=0.8)
+ ],
+ [
+ dict(type='Sharpness', magnitude=0.7, prob=0.4),
+ dict(type='Invert', prob=0.6)
+ ],
+ [
+ dict(
+ type='Shear',
+ magnitude=0.3 / 9 * 5,
+ prob=0.6,
+ direction='horizontal'),
+ dict(type='Equalize', prob=1.)
+ ],
+ [
+ dict(type='ColorTransform', magnitude=0., prob=0.4),
+ dict(type='Equalize', prob=0.6)
+ ],
+ [
+ dict(type='Equalize', prob=0.4),
+ dict(type='Solarize', thr=256 / 9 * 5, prob=0.2)
+ ],
+ [
+ dict(type='Solarize', thr=256 / 9 * 4, prob=0.6),
+ dict(type='AutoContrast', prob=0.6)
+ ],
+ [dict(type='Invert', prob=0.6),
+ dict(type='Equalize', prob=1.)],
+ [
+ dict(type='ColorTransform', magnitude=0.4, prob=0.6),
+ dict(type='Contrast', magnitude=0.8, prob=1.)
+ ],
+ [dict(type='Equalize', prob=0.8),
+ dict(type='Equalize', prob=0.6)],
+]
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/pipelines/rand_aug.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/pipelines/rand_aug.py
new file mode 100644
index 0000000000..f2bab3c364
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/pipelines/rand_aug.py
@@ -0,0 +1,43 @@
+# Refers to `_RAND_INCREASING_TRANSFORMS` in pytorch-image-models
+rand_increasing_policies = [
+ dict(type='AutoContrast'),
+ dict(type='Equalize'),
+ dict(type='Invert'),
+ dict(type='Rotate', magnitude_key='angle', magnitude_range=(0, 30)),
+ dict(type='Posterize', magnitude_key='bits', magnitude_range=(4, 0)),
+ dict(type='Solarize', magnitude_key='thr', magnitude_range=(256, 0)),
+ dict(
+ type='SolarizeAdd',
+ magnitude_key='magnitude',
+ magnitude_range=(0, 110)),
+ dict(
+ type='ColorTransform',
+ magnitude_key='magnitude',
+ magnitude_range=(0, 0.9)),
+ dict(type='Contrast', magnitude_key='magnitude', magnitude_range=(0, 0.9)),
+ dict(
+ type='Brightness', magnitude_key='magnitude',
+ magnitude_range=(0, 0.9)),
+ dict(
+ type='Sharpness', magnitude_key='magnitude', magnitude_range=(0, 0.9)),
+ dict(
+ type='Shear',
+ magnitude_key='magnitude',
+ magnitude_range=(0, 0.3),
+ direction='horizontal'),
+ dict(
+ type='Shear',
+ magnitude_key='magnitude',
+ magnitude_range=(0, 0.3),
+ direction='vertical'),
+ dict(
+ type='Translate',
+ magnitude_key='magnitude',
+ magnitude_range=(0, 0.45),
+ direction='horizontal'),
+ dict(
+ type='Translate',
+ magnitude_key='magnitude',
+ magnitude_range=(0, 0.45),
+ direction='vertical')
+]
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/voc_bs16.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/voc_bs16.py
new file mode 100644
index 0000000000..73fa0bcc8b
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/voc_bs16.py
@@ -0,0 +1,41 @@
+# dataset settings
+dataset_type = 'VOC'
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='RandomResizedCrop', size=224),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='Resize', size=(256, -1)),
+ dict(type='CenterCrop', crop_size=224),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+data = dict(
+ samples_per_gpu=16,
+ workers_per_gpu=2,
+ train=dict(
+ type=dataset_type,
+ data_prefix='data/VOCdevkit/VOC2007/',
+ ann_file='data/VOCdevkit/VOC2007/ImageSets/Main/trainval.txt',
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ data_prefix='data/VOCdevkit/VOC2007/',
+ ann_file='data/VOCdevkit/VOC2007/ImageSets/Main/test.txt',
+ pipeline=test_pipeline),
+ test=dict(
+ type=dataset_type,
+ data_prefix='data/VOCdevkit/VOC2007/',
+ ann_file='data/VOCdevkit/VOC2007/ImageSets/Main/test.txt',
+ pipeline=test_pipeline))
+evaluation = dict(
+ interval=1, metric=['mAP', 'CP', 'OP', 'CR', 'OR', 'CF1', 'OF1'])
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/default_runtime.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/default_runtime.py
new file mode 100644
index 0000000000..ba965a4547
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/default_runtime.py
@@ -0,0 +1,16 @@
+# checkpoint saving
+checkpoint_config = dict(interval=1)
+# yapf:disable
+log_config = dict(
+ interval=100,
+ hooks=[
+ dict(type='TextLoggerHook'),
+ # dict(type='TensorboardLoggerHook')
+ ])
+# yapf:enable
+
+dist_params = dict(backend='nccl')
+log_level = 'INFO'
+load_from = None
+resume_from = None
+workflow = [('train', 1)]
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/mobilenet_v2_1x.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/mobilenet_v2_1x.py
new file mode 100644
index 0000000000..6ebff1eff9
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/mobilenet_v2_1x.py
@@ -0,0 +1,12 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(type='MobileNetV2', widen_factor=1.0),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=1280,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/mobilenet_v3_large_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/mobilenet_v3_large_imagenet.py
new file mode 100644
index 0000000000..b6fdafab6e
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/mobilenet_v3_large_imagenet.py
@@ -0,0 +1,14 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(type='MobileNetV3', arch='large'),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='StackedLinearClsHead',
+ num_classes=1000,
+ in_channels=960,
+ mid_channels=[1280],
+ dropout_rate=0.2,
+ act_cfg=dict(type='HSwish'),
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5)))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/mobilenet_v3_small_cifar.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/mobilenet_v3_small_cifar.py
new file mode 100644
index 0000000000..5dbe980c47
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/mobilenet_v3_small_cifar.py
@@ -0,0 +1,13 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(type='MobileNetV3', arch='small'),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='StackedLinearClsHead',
+ num_classes=10,
+ in_channels=576,
+ mid_channels=[1280],
+ act_cfg=dict(type='HSwish'),
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5)))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/mobilenet_v3_small_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/mobilenet_v3_small_imagenet.py
new file mode 100644
index 0000000000..5b8af1f9ac
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/mobilenet_v3_small_imagenet.py
@@ -0,0 +1,14 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(type='MobileNetV3', arch='small'),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='StackedLinearClsHead',
+ num_classes=1000,
+ in_channels=576,
+ mid_channels=[1024],
+ dropout_rate=0.2,
+ act_cfg=dict(type='HSwish'),
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5)))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/regnet/regnetx_1.6gf.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/regnet/regnetx_1.6gf.py
new file mode 100644
index 0000000000..b81f0ad25b
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/regnet/regnetx_1.6gf.py
@@ -0,0 +1,12 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(type='RegNet', arch='regnetx_1.6gf'),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=912,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/regnet/regnetx_12gf.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/regnet/regnetx_12gf.py
new file mode 100644
index 0000000000..383d4f8799
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/regnet/regnetx_12gf.py
@@ -0,0 +1,12 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(type='RegNet', arch='regnetx_12gf'),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=2240,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/regnet/regnetx_3.2gf.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/regnet/regnetx_3.2gf.py
new file mode 100644
index 0000000000..67d4541395
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/regnet/regnetx_3.2gf.py
@@ -0,0 +1,12 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(type='RegNet', arch='regnetx_3.2gf'),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=1008,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/regnet/regnetx_4.0gf.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/regnet/regnetx_4.0gf.py
new file mode 100644
index 0000000000..01419c64bd
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/regnet/regnetx_4.0gf.py
@@ -0,0 +1,12 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(type='RegNet', arch='regnetx_4.0gf'),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=1360,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/regnet/regnetx_400mf.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/regnet/regnetx_400mf.py
new file mode 100644
index 0000000000..ef518b9f7d
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/regnet/regnetx_400mf.py
@@ -0,0 +1,12 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(type='RegNet', arch='regnetx_400mf'),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=384,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/regnet/regnetx_6.4gf.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/regnet/regnetx_6.4gf.py
new file mode 100644
index 0000000000..44e6222af0
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/regnet/regnetx_6.4gf.py
@@ -0,0 +1,12 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(type='RegNet', arch='regnetx_6.4gf'),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=1624,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/regnet/regnetx_8.0gf.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/regnet/regnetx_8.0gf.py
new file mode 100644
index 0000000000..29298268d7
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/regnet/regnetx_8.0gf.py
@@ -0,0 +1,12 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(type='RegNet', arch='regnetx_8.0gf'),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=1920,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/regnet/regnetx_800mf.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/regnet/regnetx_800mf.py
new file mode 100644
index 0000000000..210f760fe2
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/regnet/regnetx_800mf.py
@@ -0,0 +1,12 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(type='RegNet', arch='regnetx_800mf'),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=672,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/repvgg-A0_in1k.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/repvgg-A0_in1k.py
new file mode 100644
index 0000000000..093ffb7eea
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/repvgg-A0_in1k.py
@@ -0,0 +1,15 @@
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='RepVGG',
+ arch='A0',
+ out_indices=(3, ),
+ ),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=1280,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/repvgg-B3_lbs-mixup_in1k.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/repvgg-B3_lbs-mixup_in1k.py
new file mode 100644
index 0000000000..5bb07db54d
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/repvgg-B3_lbs-mixup_in1k.py
@@ -0,0 +1,23 @@
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='RepVGG',
+ arch='B3',
+ out_indices=(3, ),
+ ),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=2560,
+ loss=dict(
+ type='LabelSmoothLoss',
+ loss_weight=1.0,
+ label_smooth_val=0.1,
+ mode='classy_vision',
+ num_classes=1000),
+ topk=(1, 5),
+ ),
+ train_cfg=dict(
+ augments=dict(type='BatchMixup', alpha=0.2, num_classes=1000,
+ prob=1.)))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/res2net101-w26-s4.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/res2net101-w26-s4.py
new file mode 100644
index 0000000000..3bf64c508f
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/res2net101-w26-s4.py
@@ -0,0 +1,18 @@
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='Res2Net',
+ depth=101,
+ scales=4,
+ base_width=26,
+ deep_stem=False,
+ avg_down=False,
+ ),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=2048,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/res2net50-w14-s8.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/res2net50-w14-s8.py
new file mode 100644
index 0000000000..5875142c34
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/res2net50-w14-s8.py
@@ -0,0 +1,18 @@
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='Res2Net',
+ depth=50,
+ scales=8,
+ base_width=14,
+ deep_stem=False,
+ avg_down=False,
+ ),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=2048,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/res2net50-w26-s4.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/res2net50-w26-s4.py
new file mode 100644
index 0000000000..be8fdb5859
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/res2net50-w26-s4.py
@@ -0,0 +1,18 @@
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='Res2Net',
+ depth=50,
+ scales=4,
+ base_width=26,
+ deep_stem=False,
+ avg_down=False,
+ ),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=2048,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/res2net50-w26-s6.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/res2net50-w26-s6.py
new file mode 100644
index 0000000000..281b136a67
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/res2net50-w26-s6.py
@@ -0,0 +1,18 @@
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='Res2Net',
+ depth=50,
+ scales=6,
+ base_width=26,
+ deep_stem=False,
+ avg_down=False,
+ ),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=2048,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/res2net50-w26-s8.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/res2net50-w26-s8.py
new file mode 100644
index 0000000000..b4f62f3ed1
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/res2net50-w26-s8.py
@@ -0,0 +1,18 @@
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='Res2Net',
+ depth=50,
+ scales=8,
+ base_width=26,
+ deep_stem=False,
+ avg_down=False,
+ ),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=2048,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/res2net50-w48-s2.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/res2net50-w48-s2.py
new file mode 100644
index 0000000000..8675c91fa0
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/res2net50-w48-s2.py
@@ -0,0 +1,18 @@
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='Res2Net',
+ depth=50,
+ scales=2,
+ base_width=48,
+ deep_stem=False,
+ avg_down=False,
+ ),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=2048,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnest101.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnest101.py
new file mode 100644
index 0000000000..97f7749cc3
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnest101.py
@@ -0,0 +1,24 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='ResNeSt',
+ depth=101,
+ num_stages=4,
+ stem_channels=128,
+ out_indices=(3, ),
+ style='pytorch'),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=2048,
+ loss=dict(
+ type='LabelSmoothLoss',
+ label_smooth_val=0.1,
+ num_classes=1000,
+ reduction='mean',
+ loss_weight=1.0),
+ topk=(1, 5),
+ cal_acc=False))
+train_cfg = dict(mixup=dict(alpha=0.2, num_classes=1000))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnest200.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnest200.py
new file mode 100644
index 0000000000..4610017814
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnest200.py
@@ -0,0 +1,24 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='ResNeSt',
+ depth=200,
+ num_stages=4,
+ stem_channels=128,
+ out_indices=(3, ),
+ style='pytorch'),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=2048,
+ loss=dict(
+ type='LabelSmoothLoss',
+ label_smooth_val=0.1,
+ num_classes=1000,
+ reduction='mean',
+ loss_weight=1.0),
+ topk=(1, 5),
+ cal_acc=False))
+train_cfg = dict(mixup=dict(alpha=0.2, num_classes=1000))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnest269.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnest269.py
new file mode 100644
index 0000000000..ad365d03e1
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnest269.py
@@ -0,0 +1,24 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='ResNeSt',
+ depth=269,
+ num_stages=4,
+ stem_channels=128,
+ out_indices=(3, ),
+ style='pytorch'),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=2048,
+ loss=dict(
+ type='LabelSmoothLoss',
+ label_smooth_val=0.1,
+ num_classes=1000,
+ reduction='mean',
+ loss_weight=1.0),
+ topk=(1, 5),
+ cal_acc=False))
+train_cfg = dict(mixup=dict(alpha=0.2, num_classes=1000))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnest50.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnest50.py
new file mode 100644
index 0000000000..15269d4a82
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnest50.py
@@ -0,0 +1,23 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='ResNeSt',
+ depth=50,
+ num_stages=4,
+ out_indices=(3, ),
+ style='pytorch'),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=2048,
+ loss=dict(
+ type='LabelSmoothLoss',
+ label_smooth_val=0.1,
+ num_classes=1000,
+ reduction='mean',
+ loss_weight=1.0),
+ topk=(1, 5),
+ cal_acc=False))
+train_cfg = dict(mixup=dict(alpha=0.2, num_classes=1000))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet101.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet101.py
new file mode 100644
index 0000000000..1147cd4be9
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet101.py
@@ -0,0 +1,17 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='ResNet',
+ depth=101,
+ num_stages=4,
+ out_indices=(3, ),
+ style='pytorch'),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=2048,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet101_cifar.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet101_cifar.py
new file mode 100644
index 0000000000..a84d470e3a
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet101_cifar.py
@@ -0,0 +1,16 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='ResNet_CIFAR',
+ depth=101,
+ num_stages=4,
+ out_indices=(3, ),
+ style='pytorch'),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=10,
+ in_channels=2048,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ ))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet152.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet152.py
new file mode 100644
index 0000000000..94a718c3ce
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet152.py
@@ -0,0 +1,17 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='ResNet',
+ depth=152,
+ num_stages=4,
+ out_indices=(3, ),
+ style='pytorch'),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=2048,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet152_cifar.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet152_cifar.py
new file mode 100644
index 0000000000..55c0cc6c66
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet152_cifar.py
@@ -0,0 +1,16 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='ResNet_CIFAR',
+ depth=152,
+ num_stages=4,
+ out_indices=(3, ),
+ style='pytorch'),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=10,
+ in_channels=2048,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ ))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet18.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet18.py
new file mode 100644
index 0000000000..7c66758ee4
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet18.py
@@ -0,0 +1,17 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='ResNet',
+ depth=18,
+ num_stages=4,
+ out_indices=(3, ),
+ style='pytorch'),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=512,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet18_cifar.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet18_cifar.py
new file mode 100644
index 0000000000..7b9cf1e733
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet18_cifar.py
@@ -0,0 +1,16 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='ResNet_CIFAR',
+ depth=18,
+ num_stages=4,
+ out_indices=(3, ),
+ style='pytorch'),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=10,
+ in_channels=512,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ ))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet34.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet34.py
new file mode 100644
index 0000000000..100ee286be
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet34.py
@@ -0,0 +1,17 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='ResNet',
+ depth=34,
+ num_stages=4,
+ out_indices=(3, ),
+ style='pytorch'),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=512,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet34_cifar.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet34_cifar.py
new file mode 100644
index 0000000000..55d033bc30
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet34_cifar.py
@@ -0,0 +1,16 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='ResNet_CIFAR',
+ depth=34,
+ num_stages=4,
+ out_indices=(3, ),
+ style='pytorch'),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=10,
+ in_channels=512,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ ))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet50.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet50.py
new file mode 100644
index 0000000000..129a2bb50c
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet50.py
@@ -0,0 +1,17 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='ResNet',
+ depth=50,
+ num_stages=4,
+ out_indices=(3, ),
+ style='pytorch'),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=2048,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet50_cifar.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet50_cifar.py
new file mode 100644
index 0000000000..33b66d5264
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet50_cifar.py
@@ -0,0 +1,16 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='ResNet_CIFAR',
+ depth=50,
+ num_stages=4,
+ out_indices=(3, ),
+ style='pytorch'),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=10,
+ in_channels=2048,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ ))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet50_cifar_cutmix.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet50_cifar_cutmix.py
new file mode 100644
index 0000000000..73c38be271
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet50_cifar_cutmix.py
@@ -0,0 +1,18 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='ResNet_CIFAR',
+ depth=50,
+ num_stages=4,
+ out_indices=(3, ),
+ style='pytorch'),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='MultiLabelLinearClsHead',
+ num_classes=10,
+ in_channels=2048,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0, use_soft=True)),
+ train_cfg=dict(
+ augments=dict(type='BatchCutMix', alpha=1.0, num_classes=10,
+ prob=1.0)))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet50_cifar_mixup.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet50_cifar_mixup.py
new file mode 100644
index 0000000000..3de14f3f2a
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet50_cifar_mixup.py
@@ -0,0 +1,17 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='ResNet_CIFAR',
+ depth=50,
+ num_stages=4,
+ out_indices=(3, ),
+ style='pytorch'),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='MultiLabelLinearClsHead',
+ num_classes=10,
+ in_channels=2048,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0, use_soft=True)),
+ train_cfg=dict(
+ augments=dict(type='BatchMixup', alpha=1., num_classes=10, prob=1.)))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet50_cutmix.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet50_cutmix.py
new file mode 100644
index 0000000000..fb79088b79
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet50_cutmix.py
@@ -0,0 +1,18 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='ResNet',
+ depth=50,
+ num_stages=4,
+ out_indices=(3, ),
+ style='pytorch'),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='MultiLabelLinearClsHead',
+ num_classes=1000,
+ in_channels=2048,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0, use_soft=True)),
+ train_cfg=dict(
+ augments=dict(
+ type='BatchCutMix', alpha=1.0, num_classes=1000, prob=1.0)))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet50_label_smooth.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet50_label_smooth.py
new file mode 100644
index 0000000000..b6f7937519
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet50_label_smooth.py
@@ -0,0 +1,18 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='ResNet',
+ depth=50,
+ num_stages=4,
+ out_indices=(3, ),
+ style='pytorch'),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=2048,
+ loss=dict(
+ type='LabelSmoothLoss', label_smooth_val=0.1, loss_weight=1.0),
+ topk=(1, 5),
+ ))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet50_mixup.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet50_mixup.py
new file mode 100644
index 0000000000..8ff9522605
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet50_mixup.py
@@ -0,0 +1,18 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='ResNet',
+ depth=50,
+ num_stages=4,
+ out_indices=(3, ),
+ style='pytorch'),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='MultiLabelLinearClsHead',
+ num_classes=1000,
+ in_channels=2048,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0, use_soft=True)),
+ train_cfg=dict(
+ augments=dict(type='BatchMixup', alpha=0.2, num_classes=1000,
+ prob=1.)))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnetv1d101.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnetv1d101.py
new file mode 100644
index 0000000000..1e56223121
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnetv1d101.py
@@ -0,0 +1,17 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='ResNetV1d',
+ depth=101,
+ num_stages=4,
+ out_indices=(3, ),
+ style='pytorch'),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=2048,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnetv1d152.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnetv1d152.py
new file mode 100644
index 0000000000..58cc73beb3
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnetv1d152.py
@@ -0,0 +1,17 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='ResNetV1d',
+ depth=152,
+ num_stages=4,
+ out_indices=(3, ),
+ style='pytorch'),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=2048,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnetv1d50.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnetv1d50.py
new file mode 100644
index 0000000000..015aaa3d81
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnetv1d50.py
@@ -0,0 +1,17 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='ResNetV1d',
+ depth=50,
+ num_stages=4,
+ out_indices=(3, ),
+ style='pytorch'),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=2048,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnext101_32x4d.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnext101_32x4d.py
new file mode 100644
index 0000000000..1c89fb6488
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnext101_32x4d.py
@@ -0,0 +1,19 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='ResNeXt',
+ depth=101,
+ num_stages=4,
+ out_indices=(3, ),
+ groups=32,
+ width_per_group=4,
+ style='pytorch'),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=2048,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnext101_32x8d.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnext101_32x8d.py
new file mode 100644
index 0000000000..2bb63f3aeb
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnext101_32x8d.py
@@ -0,0 +1,19 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='ResNeXt',
+ depth=101,
+ num_stages=4,
+ out_indices=(3, ),
+ groups=32,
+ width_per_group=8,
+ style='pytorch'),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=2048,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnext152_32x4d.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnext152_32x4d.py
new file mode 100644
index 0000000000..d392eff3dc
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnext152_32x4d.py
@@ -0,0 +1,19 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='ResNeXt',
+ depth=152,
+ num_stages=4,
+ out_indices=(3, ),
+ groups=32,
+ width_per_group=4,
+ style='pytorch'),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=2048,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnext50_32x4d.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnext50_32x4d.py
new file mode 100644
index 0000000000..060426231e
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnext50_32x4d.py
@@ -0,0 +1,19 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='ResNeXt',
+ depth=50,
+ num_stages=4,
+ out_indices=(3, ),
+ groups=32,
+ width_per_group=4,
+ style='pytorch'),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=2048,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/seresnet101.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/seresnet101.py
new file mode 100644
index 0000000000..137a6f90f6
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/seresnet101.py
@@ -0,0 +1,17 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='SEResNet',
+ depth=101,
+ num_stages=4,
+ out_indices=(3, ),
+ style='pytorch'),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=2048,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/seresnet50.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/seresnet50.py
new file mode 100644
index 0000000000..e5f6bfce8d
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/seresnet50.py
@@ -0,0 +1,17 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='SEResNet',
+ depth=50,
+ num_stages=4,
+ out_indices=(3, ),
+ style='pytorch'),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=2048,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/seresnext101_32x4d.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/seresnext101_32x4d.py
new file mode 100644
index 0000000000..cc8a62c393
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/seresnext101_32x4d.py
@@ -0,0 +1,20 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='SEResNeXt',
+ depth=101,
+ num_stages=4,
+ out_indices=(3, ),
+ groups=32,
+ width_per_group=4,
+ se_ratio=16,
+ style='pytorch'),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=2048,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/seresnext50_32x4d.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/seresnext50_32x4d.py
new file mode 100644
index 0000000000..0cdf7cb696
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/seresnext50_32x4d.py
@@ -0,0 +1,20 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='SEResNeXt',
+ depth=50,
+ num_stages=4,
+ out_indices=(3, ),
+ groups=32,
+ width_per_group=4,
+ se_ratio=16,
+ style='pytorch'),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=2048,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/shufflenet_v1_1x.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/shufflenet_v1_1x.py
new file mode 100644
index 0000000000..f0f9d1fbdd
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/shufflenet_v1_1x.py
@@ -0,0 +1,12 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(type='ShuffleNetV1', groups=3),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=960,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/shufflenet_v2_1x.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/shufflenet_v2_1x.py
new file mode 100644
index 0000000000..190800e343
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/shufflenet_v2_1x.py
@@ -0,0 +1,12 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(type='ShuffleNetV2', widen_factor=1.0),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=1024,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/swin_transformer/base_224.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/swin_transformer/base_224.py
new file mode 100644
index 0000000000..e16b4e6099
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/swin_transformer/base_224.py
@@ -0,0 +1,22 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='SwinTransformer', arch='base', img_size=224, drop_path_rate=0.5),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=1024,
+ init_cfg=None, # suppress the default init_cfg of LinearClsHead.
+ loss=dict(
+ type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'),
+ cal_acc=False),
+ init_cfg=[
+ dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.),
+ dict(type='Constant', layer='LayerNorm', val=1., bias=0.)
+ ],
+ train_cfg=dict(augments=[
+ dict(type='BatchMixup', alpha=0.8, num_classes=1000, prob=0.5),
+ dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5)
+ ]))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/swin_transformer/base_384.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/swin_transformer/base_384.py
new file mode 100644
index 0000000000..ce78981fb0
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/swin_transformer/base_384.py
@@ -0,0 +1,16 @@
+# model settings
+# Only for evaluation
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='SwinTransformer',
+ arch='base',
+ img_size=384,
+ stage_cfgs=dict(block_cfgs=dict(window_size=12))),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=1024,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5)))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/swin_transformer/large_224.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/swin_transformer/large_224.py
new file mode 100644
index 0000000000..747d00e44d
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/swin_transformer/large_224.py
@@ -0,0 +1,12 @@
+# model settings
+# Only for evaluation
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(type='SwinTransformer', arch='large', img_size=224),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=1536,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5)))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/swin_transformer/large_384.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/swin_transformer/large_384.py
new file mode 100644
index 0000000000..7026f81a31
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/swin_transformer/large_384.py
@@ -0,0 +1,16 @@
+# model settings
+# Only for evaluation
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='SwinTransformer',
+ arch='large',
+ img_size=384,
+ stage_cfgs=dict(block_cfgs=dict(window_size=12))),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=1536,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5)))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/swin_transformer/small_224.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/swin_transformer/small_224.py
new file mode 100644
index 0000000000..78739866f9
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/swin_transformer/small_224.py
@@ -0,0 +1,23 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='SwinTransformer', arch='small', img_size=224,
+ drop_path_rate=0.3),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=768,
+ init_cfg=None, # suppress the default init_cfg of LinearClsHead.
+ loss=dict(
+ type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'),
+ cal_acc=False),
+ init_cfg=[
+ dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.),
+ dict(type='Constant', layer='LayerNorm', val=1., bias=0.)
+ ],
+ train_cfg=dict(augments=[
+ dict(type='BatchMixup', alpha=0.8, num_classes=1000, prob=0.5),
+ dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5)
+ ]))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/swin_transformer/tiny_224.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/swin_transformer/tiny_224.py
new file mode 100644
index 0000000000..2d68d66b50
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/swin_transformer/tiny_224.py
@@ -0,0 +1,22 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='SwinTransformer', arch='tiny', img_size=224, drop_path_rate=0.2),
+ neck=dict(type='GlobalAveragePooling'),
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=768,
+ init_cfg=None, # suppress the default init_cfg of LinearClsHead.
+ loss=dict(
+ type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'),
+ cal_acc=False),
+ init_cfg=[
+ dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.),
+ dict(type='Constant', layer='LayerNorm', val=1., bias=0.)
+ ],
+ train_cfg=dict(augments=[
+ dict(type='BatchMixup', alpha=0.8, num_classes=1000, prob=0.5),
+ dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5)
+ ]))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/t2t-vit-t-14.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/t2t-vit-t-14.py
new file mode 100644
index 0000000000..91dbb67621
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/t2t-vit-t-14.py
@@ -0,0 +1,41 @@
+# model settings
+embed_dims = 384
+num_classes = 1000
+
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='T2T_ViT',
+ img_size=224,
+ in_channels=3,
+ embed_dims=embed_dims,
+ t2t_cfg=dict(
+ token_dims=64,
+ use_performer=False,
+ ),
+ num_layers=14,
+ layer_cfgs=dict(
+ num_heads=6,
+ feedforward_channels=3 * embed_dims, # mlp_ratio = 3
+ ),
+ drop_path_rate=0.1,
+ init_cfg=[
+ dict(type='TruncNormal', layer='Linear', std=.02),
+ dict(type='Constant', layer='LayerNorm', val=1., bias=0.),
+ ]),
+ neck=None,
+ head=dict(
+ type='VisionTransformerClsHead',
+ num_classes=num_classes,
+ in_channels=embed_dims,
+ loss=dict(
+ type='LabelSmoothLoss',
+ label_smooth_val=0.1,
+ mode='original',
+ ),
+ topk=(1, 5),
+ init_cfg=dict(type='TruncNormal', layer='Linear', std=.02)),
+ train_cfg=dict(augments=[
+ dict(type='BatchMixup', alpha=0.8, prob=0.5, num_classes=num_classes),
+ dict(type='BatchCutMix', alpha=1.0, prob=0.5, num_classes=num_classes),
+ ]))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/t2t-vit-t-19.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/t2t-vit-t-19.py
new file mode 100644
index 0000000000..8ab139d679
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/t2t-vit-t-19.py
@@ -0,0 +1,41 @@
+# model settings
+embed_dims = 448
+num_classes = 1000
+
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='T2T_ViT',
+ img_size=224,
+ in_channels=3,
+ embed_dims=embed_dims,
+ t2t_cfg=dict(
+ token_dims=64,
+ use_performer=False,
+ ),
+ num_layers=19,
+ layer_cfgs=dict(
+ num_heads=7,
+ feedforward_channels=3 * embed_dims, # mlp_ratio = 3
+ ),
+ drop_path_rate=0.1,
+ init_cfg=[
+ dict(type='TruncNormal', layer='Linear', std=.02),
+ dict(type='Constant', layer='LayerNorm', val=1., bias=0.),
+ ]),
+ neck=None,
+ head=dict(
+ type='VisionTransformerClsHead',
+ num_classes=num_classes,
+ in_channels=embed_dims,
+ loss=dict(
+ type='LabelSmoothLoss',
+ label_smooth_val=0.1,
+ mode='original',
+ ),
+ topk=(1, 5),
+ init_cfg=dict(type='TruncNormal', layer='Linear', std=.02)),
+ train_cfg=dict(augments=[
+ dict(type='BatchMixup', alpha=0.8, prob=0.5, num_classes=num_classes),
+ dict(type='BatchCutMix', alpha=1.0, prob=0.5, num_classes=num_classes),
+ ]))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/t2t-vit-t-24.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/t2t-vit-t-24.py
new file mode 100644
index 0000000000..5990960ab4
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/t2t-vit-t-24.py
@@ -0,0 +1,41 @@
+# model settings
+embed_dims = 512
+num_classes = 1000
+
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='T2T_ViT',
+ img_size=224,
+ in_channels=3,
+ embed_dims=embed_dims,
+ t2t_cfg=dict(
+ token_dims=64,
+ use_performer=False,
+ ),
+ num_layers=24,
+ layer_cfgs=dict(
+ num_heads=8,
+ feedforward_channels=3 * embed_dims, # mlp_ratio = 3
+ ),
+ drop_path_rate=0.1,
+ init_cfg=[
+ dict(type='TruncNormal', layer='Linear', std=.02),
+ dict(type='Constant', layer='LayerNorm', val=1., bias=0.),
+ ]),
+ neck=None,
+ head=dict(
+ type='VisionTransformerClsHead',
+ num_classes=num_classes,
+ in_channels=embed_dims,
+ loss=dict(
+ type='LabelSmoothLoss',
+ label_smooth_val=0.1,
+ mode='original',
+ ),
+ topk=(1, 5),
+ init_cfg=dict(type='TruncNormal', layer='Linear', std=.02)),
+ train_cfg=dict(augments=[
+ dict(type='BatchMixup', alpha=0.8, prob=0.5, num_classes=num_classes),
+ dict(type='BatchCutMix', alpha=1.0, prob=0.5, num_classes=num_classes),
+ ]))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/tnt_s_patch16_224.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/tnt_s_patch16_224.py
new file mode 100644
index 0000000000..5e13d07828
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/tnt_s_patch16_224.py
@@ -0,0 +1,29 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='TNT',
+ arch='s',
+ img_size=224,
+ patch_size=16,
+ in_channels=3,
+ ffn_ratio=4,
+ qkv_bias=False,
+ drop_rate=0.,
+ attn_drop_rate=0.,
+ drop_path_rate=0.1,
+ first_stride=4,
+ num_fcs=2,
+ init_cfg=[
+ dict(type='TruncNormal', layer='Linear', std=.02),
+ dict(type='Constant', layer='LayerNorm', val=1., bias=0.)
+ ]),
+ neck=None,
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=384,
+ loss=dict(
+ type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'),
+ topk=(1, 5),
+ init_cfg=dict(type='TruncNormal', layer='Linear', std=.02)))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vgg11.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vgg11.py
new file mode 100644
index 0000000000..2b6ee1426a
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vgg11.py
@@ -0,0 +1,10 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(type='VGG', depth=11, num_classes=1000),
+ neck=None,
+ head=dict(
+ type='ClsHead',
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vgg11bn.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vgg11bn.py
new file mode 100644
index 0000000000..cb4c64e95a
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vgg11bn.py
@@ -0,0 +1,11 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='VGG', depth=11, norm_cfg=dict(type='BN'), num_classes=1000),
+ neck=None,
+ head=dict(
+ type='ClsHead',
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vgg13.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vgg13.py
new file mode 100644
index 0000000000..a9389100a6
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vgg13.py
@@ -0,0 +1,10 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(type='VGG', depth=13, num_classes=1000),
+ neck=None,
+ head=dict(
+ type='ClsHead',
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vgg13bn.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vgg13bn.py
new file mode 100644
index 0000000000..b12173b51b
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vgg13bn.py
@@ -0,0 +1,11 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='VGG', depth=13, norm_cfg=dict(type='BN'), num_classes=1000),
+ neck=None,
+ head=dict(
+ type='ClsHead',
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vgg16.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vgg16.py
new file mode 100644
index 0000000000..93ce864fac
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vgg16.py
@@ -0,0 +1,10 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(type='VGG', depth=16, num_classes=1000),
+ neck=None,
+ head=dict(
+ type='ClsHead',
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vgg16bn.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vgg16bn.py
new file mode 100644
index 0000000000..765e34f636
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vgg16bn.py
@@ -0,0 +1,11 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='VGG', depth=16, norm_cfg=dict(type='BN'), num_classes=1000),
+ neck=None,
+ head=dict(
+ type='ClsHead',
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vgg19.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vgg19.py
new file mode 100644
index 0000000000..6f4ab061b2
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vgg19.py
@@ -0,0 +1,10 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(type='VGG', depth=19, num_classes=1000),
+ neck=None,
+ head=dict(
+ type='ClsHead',
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vgg19bn.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vgg19bn.py
new file mode 100644
index 0000000000..c468b5dea2
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vgg19bn.py
@@ -0,0 +1,11 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='VGG', depth=19, norm_cfg=dict(type='BN'), num_classes=1000),
+ neck=None,
+ head=dict(
+ type='ClsHead',
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vit-base-p16.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vit-base-p16.py
new file mode 100644
index 0000000000..bb42bed5fa
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vit-base-p16.py
@@ -0,0 +1,25 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='VisionTransformer',
+ arch='b',
+ img_size=224,
+ patch_size=16,
+ drop_rate=0.1,
+ init_cfg=[
+ dict(
+ type='Kaiming',
+ layer='Conv2d',
+ mode='fan_in',
+ nonlinearity='linear')
+ ]),
+ neck=None,
+ head=dict(
+ type='VisionTransformerClsHead',
+ num_classes=1000,
+ in_channels=768,
+ loss=dict(
+ type='LabelSmoothLoss', label_smooth_val=0.1,
+ mode='classy_vision'),
+ ))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vit-base-p32.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vit-base-p32.py
new file mode 100644
index 0000000000..ad550ef9b9
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vit-base-p32.py
@@ -0,0 +1,24 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='VisionTransformer',
+ arch='b',
+ img_size=224,
+ patch_size=32,
+ drop_rate=0.1,
+ init_cfg=[
+ dict(
+ type='Kaiming',
+ layer='Conv2d',
+ mode='fan_in',
+ nonlinearity='linear')
+ ]),
+ neck=None,
+ head=dict(
+ type='VisionTransformerClsHead',
+ num_classes=1000,
+ in_channels=768,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vit-large-p16.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vit-large-p16.py
new file mode 100644
index 0000000000..9716230456
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vit-large-p16.py
@@ -0,0 +1,24 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='VisionTransformer',
+ arch='l',
+ img_size=224,
+ patch_size=16,
+ drop_rate=0.1,
+ init_cfg=[
+ dict(
+ type='Kaiming',
+ layer='Conv2d',
+ mode='fan_in',
+ nonlinearity='linear')
+ ]),
+ neck=None,
+ head=dict(
+ type='VisionTransformerClsHead',
+ num_classes=1000,
+ in_channels=1024,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vit-large-p32.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vit-large-p32.py
new file mode 100644
index 0000000000..f9491bb561
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vit-large-p32.py
@@ -0,0 +1,24 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(
+ type='VisionTransformer',
+ arch='l',
+ img_size=224,
+ patch_size=32,
+ drop_rate=0.1,
+ init_cfg=[
+ dict(
+ type='Kaiming',
+ layer='Conv2d',
+ mode='fan_in',
+ nonlinearity='linear')
+ ]),
+ neck=None,
+ head=dict(
+ type='VisionTransformerClsHead',
+ num_classes=1000,
+ in_channels=1024,
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ topk=(1, 5),
+ ))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/cifar10_bs128.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/cifar10_bs128.py
new file mode 100644
index 0000000000..f134dbce3b
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/cifar10_bs128.py
@@ -0,0 +1,6 @@
+# optimizer
+optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0001)
+optimizer_config = dict(grad_clip=None)
+# learning policy
+lr_config = dict(policy='step', step=[100, 150])
+runner = dict(type='EpochBasedRunner', max_epochs=200)
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs1024_adamw_swin.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs1024_adamw_swin.py
new file mode 100644
index 0000000000..1a523e44dd
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs1024_adamw_swin.py
@@ -0,0 +1,30 @@
+paramwise_cfg = dict(
+ norm_decay_mult=0.0,
+ bias_decay_mult=0.0,
+ custom_keys={
+ '.absolute_pos_embed': dict(decay_mult=0.0),
+ '.relative_position_bias_table': dict(decay_mult=0.0)
+ })
+
+# for batch in each gpu is 128, 8 gpu
+# lr = 5e-4 * 128 * 8 / 512 = 0.001
+optimizer = dict(
+ type='AdamW',
+ lr=5e-4 * 128 * 8 / 512,
+ weight_decay=0.05,
+ eps=1e-8,
+ betas=(0.9, 0.999),
+ paramwise_cfg=paramwise_cfg)
+optimizer_config = dict(grad_clip=dict(max_norm=5.0))
+
+# learning policy
+lr_config = dict(
+ policy='CosineAnnealing',
+ by_epoch=False,
+ min_lr_ratio=1e-2,
+ warmup='linear',
+ warmup_ratio=1e-3,
+ warmup_iters=20 * 1252,
+ warmup_by_epoch=False)
+
+runner = dict(type='EpochBasedRunner', max_epochs=300)
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs1024_coslr.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs1024_coslr.py
new file mode 100644
index 0000000000..ee84e7a6f6
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs1024_coslr.py
@@ -0,0 +1,12 @@
+# optimizer
+optimizer = dict(type='SGD', lr=0.8, momentum=0.9, weight_decay=5e-5)
+optimizer_config = dict(grad_clip=None)
+# learning policy
+lr_config = dict(
+ policy='CosineAnnealing',
+ min_lr=0,
+ warmup='linear',
+ warmup_iters=5,
+ warmup_ratio=0.1,
+ warmup_by_epoch=True)
+runner = dict(type='EpochBasedRunner', max_epochs=100)
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs1024_linearlr_bn_nowd.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs1024_linearlr_bn_nowd.py
new file mode 100644
index 0000000000..99fbdda9f5
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs1024_linearlr_bn_nowd.py
@@ -0,0 +1,17 @@
+# optimizer
+optimizer = dict(
+ type='SGD',
+ lr=0.5,
+ momentum=0.9,
+ weight_decay=0.00004,
+ paramwise_cfg=dict(norm_decay_mult=0))
+optimizer_config = dict(grad_clip=None)
+# learning policy
+lr_config = dict(
+ policy='poly',
+ min_lr=0,
+ by_epoch=False,
+ warmup='constant',
+ warmup_iters=5000,
+)
+runner = dict(type='EpochBasedRunner', max_epochs=300)
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs2048.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs2048.py
new file mode 100644
index 0000000000..93fdebfdd1
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs2048.py
@@ -0,0 +1,12 @@
+# optimizer
+optimizer = dict(
+ type='SGD', lr=0.8, momentum=0.9, weight_decay=0.0001, nesterov=True)
+optimizer_config = dict(grad_clip=None)
+# learning policy
+lr_config = dict(
+ policy='step',
+ warmup='linear',
+ warmup_iters=2500,
+ warmup_ratio=0.25,
+ step=[30, 60, 90])
+runner = dict(type='EpochBasedRunner', max_epochs=100)
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs2048_AdamW.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs2048_AdamW.py
new file mode 100644
index 0000000000..6d4f2081b9
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs2048_AdamW.py
@@ -0,0 +1,20 @@
+# optimizer
+# In ClassyVision, the lr is set to 0.003 for bs4096.
+# In this implementation(bs2048), lr = 0.003 / 4096 * (32bs * 64gpus) = 0.0015
+optimizer = dict(type='AdamW', lr=0.0015, weight_decay=0.3)
+optimizer_config = dict(grad_clip=dict(max_norm=1.0))
+
+# specific to vit pretrain
+paramwise_cfg = dict(
+ custom_keys={
+ '.backbone.cls_token': dict(decay_mult=0.0),
+ '.backbone.pos_embed': dict(decay_mult=0.0)
+ })
+# learning policy
+lr_config = dict(
+ policy='CosineAnnealing',
+ min_lr=0,
+ warmup='linear',
+ warmup_iters=10000,
+ warmup_ratio=1e-4)
+runner = dict(type='EpochBasedRunner', max_epochs=300)
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs2048_coslr.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs2048_coslr.py
new file mode 100644
index 0000000000..b9e77f2c6a
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs2048_coslr.py
@@ -0,0 +1,12 @@
+# optimizer
+optimizer = dict(
+ type='SGD', lr=0.8, momentum=0.9, weight_decay=0.0001, nesterov=True)
+optimizer_config = dict(grad_clip=None)
+# learning policy
+lr_config = dict(
+ policy='CosineAnnealing',
+ min_lr=0,
+ warmup='linear',
+ warmup_iters=2500,
+ warmup_ratio=0.25)
+runner = dict(type='EpochBasedRunner', max_epochs=100)
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs256.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs256.py
new file mode 100644
index 0000000000..3b5d19847a
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs256.py
@@ -0,0 +1,6 @@
+# optimizer
+optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0001)
+optimizer_config = dict(grad_clip=None)
+# learning policy
+lr_config = dict(policy='step', step=[30, 60, 90])
+runner = dict(type='EpochBasedRunner', max_epochs=100)
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs256_140e.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs256_140e.py
new file mode 100644
index 0000000000..caba1577c7
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs256_140e.py
@@ -0,0 +1,6 @@
+# optimizer
+optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0001)
+optimizer_config = dict(grad_clip=None)
+# learning policy
+lr_config = dict(policy='step', step=[40, 80, 120])
+runner = dict(type='EpochBasedRunner', max_epochs=140)
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs256_200e_coslr_warmup.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs256_200e_coslr_warmup.py
new file mode 100644
index 0000000000..49456b2cd0
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs256_200e_coslr_warmup.py
@@ -0,0 +1,11 @@
+# optimizer
+optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0001)
+optimizer_config = dict(grad_clip=None)
+# learning policy
+lr_config = dict(
+ policy='CosineAnnealing',
+ min_lr=0,
+ warmup='linear',
+ warmup_iters=25025,
+ warmup_ratio=0.25)
+runner = dict(type='EpochBasedRunner', max_epochs=200)
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs256_coslr.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs256_coslr.py
new file mode 100644
index 0000000000..779b4792ed
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs256_coslr.py
@@ -0,0 +1,6 @@
+# optimizer
+optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0001)
+optimizer_config = dict(grad_clip=None)
+# learning policy
+lr_config = dict(policy='CosineAnnealing', min_lr=0)
+runner = dict(type='EpochBasedRunner', max_epochs=100)
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs256_epochstep.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs256_epochstep.py
new file mode 100644
index 0000000000..2347a04354
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs256_epochstep.py
@@ -0,0 +1,6 @@
+# optimizer
+optimizer = dict(type='SGD', lr=0.045, momentum=0.9, weight_decay=0.00004)
+optimizer_config = dict(grad_clip=None)
+# learning policy
+lr_config = dict(policy='step', gamma=0.98, step=1)
+runner = dict(type='EpochBasedRunner', max_epochs=300)
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs4096_AdamW.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs4096_AdamW.py
new file mode 100644
index 0000000000..859cf4b23a
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs4096_AdamW.py
@@ -0,0 +1,18 @@
+# optimizer
+optimizer = dict(type='AdamW', lr=0.003, weight_decay=0.3)
+optimizer_config = dict(grad_clip=dict(max_norm=1.0))
+
+# specific to vit pretrain
+paramwise_cfg = dict(
+ custom_keys={
+ '.backbone.cls_token': dict(decay_mult=0.0),
+ '.backbone.pos_embed': dict(decay_mult=0.0)
+ })
+# learning policy
+lr_config = dict(
+ policy='CosineAnnealing',
+ min_lr=0,
+ warmup='linear',
+ warmup_iters=10000,
+ warmup_ratio=1e-4)
+runner = dict(type='EpochBasedRunner', max_epochs=300)
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/fp16/README.md b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/fp16/README.md
new file mode 100644
index 0000000000..2ef4ea13db
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/fp16/README.md
@@ -0,0 +1,20 @@
+# Mixed Precision Training
+
+## Introduction
+
+
+
+```latex
+@article{micikevicius2017mixed,
+ title={Mixed precision training},
+ author={Micikevicius, Paulius and Narang, Sharan and Alben, Jonah and Diamos, Gregory and Elsen, Erich and Garcia, David and Ginsburg, Boris and Houston, Michael and Kuchaiev, Oleksii and Venkatesh, Ganesh and others},
+ journal={arXiv preprint arXiv:1710.03740},
+ year={2017}
+}
+```
+
+## Results and models
+
+| Model | Params(M) | Flops(G) | Mem (GB) | Top-1 (%) | Top-5 (%) | Config | Download |
+|:---------------------:|:---------:|:--------:|:---------:|:---------:|:---------:| :---------:|:--------:|
+| ResNet-50 | 25.56 | 4.12 | 1.9 |76.30 | 93.07 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/fp16/resnet50_b32x8_fp16_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/fp16/resnet50_batch256_fp16_imagenet_20210320-b3964210.pth) | [log](https://download.openmmlab.com/mmclassification/v0/fp16/resnet50_batch256_fp16_imagenet_20210320-b3964210.log.json) |
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/fp16/metafile.yml b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/fp16/metafile.yml
new file mode 100644
index 0000000000..20b42840d5
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/fp16/metafile.yml
@@ -0,0 +1,35 @@
+Collections:
+ - Name: FP16
+ Metadata:
+ Training Data: ImageNet-1k
+ Training Techniques:
+ - SGD with Momentum
+ - Weight Decay
+ - Mixed Precision Training
+ Training Resources: 8x V100 GPUs
+ Paper:
+ URL: https://arxiv.org/abs/1710.03740
+ Title: Mixed Precision Training
+ README: configs/fp16/README.md
+ Code:
+ URL: https://github.com/open-mmlab/mmclassification/blob/a41cb2fa938d957101cc446e271486206188bf5b/mmcls/core/fp16/hooks.py#L13
+ Version: v0.15.0
+
+Models:
+ - Name: resnet50_b32x8_fp16_dynamic_imagenet
+ Metadata:
+ FLOPs: 4120000000
+ Parameters: 25560000
+ Epochs: 100
+ Batch Size: 256
+ Architecture:
+ - ResNet
+ In Collection: FP16
+ Results:
+ - Task: Image Classification
+ Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 76.30
+ Top 5 Accuracy: 93.07
+ Weights: https://download.openmmlab.com/mmclassification/v0/fp16/resnet50_batch256_fp16_imagenet_20210320-b3964210.pth
+ Config: configs/fp16/resnet50_b32x8_fp16_dynamic_imagenet.py
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/fp16/resnet50_b32x8_fp16_dynamic_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/fp16/resnet50_b32x8_fp16_dynamic_imagenet.py
new file mode 100644
index 0000000000..35b4ff5423
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/fp16/resnet50_b32x8_fp16_dynamic_imagenet.py
@@ -0,0 +1,4 @@
+_base_ = ['../resnet/resnet50_b32x8_imagenet.py']
+
+# fp16 settings
+fp16 = dict(loss_scale='dynamic')
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/fp16/resnet50_b32x8_fp16_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/fp16/resnet50_b32x8_fp16_imagenet.py
new file mode 100644
index 0000000000..fbab0cc1ec
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/fp16/resnet50_b32x8_fp16_imagenet.py
@@ -0,0 +1,4 @@
+_base_ = ['../resnet/resnet50_b32x8_imagenet.py']
+
+# fp16 settings
+fp16 = dict(loss_scale=512.)
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/lenet/README.md b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/lenet/README.md
new file mode 100644
index 0000000000..49647ce4a1
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/lenet/README.md
@@ -0,0 +1,19 @@
+# Backpropagation Applied to Handwritten Zip Code Recognition
+
+
+## Introduction
+
+
+
+```latex
+@ARTICLE{6795724,
+ author={Y. {LeCun} and B. {Boser} and J. S. {Denker} and D. {Henderson} and R. E. {Howard} and W. {Hubbard} and L. D. {Jackel}},
+ journal={Neural Computation},
+ title={Backpropagation Applied to Handwritten Zip Code Recognition},
+ year={1989},
+ volume={1},
+ number={4},
+ pages={541-551},
+ doi={10.1162/neco.1989.1.4.541}}
+}
+```
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/lenet/lenet5_mnist.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/lenet/lenet5_mnist.py
new file mode 100644
index 0000000000..7286b798ff
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/lenet/lenet5_mnist.py
@@ -0,0 +1,59 @@
+# model settings
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(type='LeNet5', num_classes=10),
+ neck=None,
+ head=dict(
+ type='ClsHead',
+ loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
+ ))
+# dataset settings
+dataset_type = 'MNIST'
+img_norm_cfg = dict(mean=[33.46], std=[78.87], to_rgb=True)
+train_pipeline = [
+ dict(type='Resize', size=32),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label']),
+]
+test_pipeline = [
+ dict(type='Resize', size=32),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img']),
+]
+data = dict(
+ samples_per_gpu=128,
+ workers_per_gpu=2,
+ train=dict(
+ type=dataset_type, data_prefix='data/mnist', pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type, data_prefix='data/mnist', pipeline=test_pipeline),
+ test=dict(
+ type=dataset_type, data_prefix='data/mnist', pipeline=test_pipeline))
+evaluation = dict(
+ interval=5, metric='accuracy', metric_options={'topk': (1, )})
+# optimizer
+optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
+optimizer_config = dict(grad_clip=None)
+# learning policy
+lr_config = dict(policy='step', step=[15])
+# checkpoint saving
+checkpoint_config = dict(interval=1)
+# yapf:disable
+log_config = dict(
+ interval=150,
+ hooks=[
+ dict(type='TextLoggerHook'),
+ # dict(type='TensorboardLoggerHook')
+ ])
+# yapf:enable
+# runtime settings
+runner = dict(type='EpochBasedRunner', max_epochs=5)
+dist_params = dict(backend='nccl')
+log_level = 'INFO'
+work_dir = './work_dirs/mnist/'
+load_from = None
+resume_from = None
+workflow = [('train', 1)]
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/mobilenet_v2/README.md b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/mobilenet_v2/README.md
new file mode 100644
index 0000000000..75008d3cad
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/mobilenet_v2/README.md
@@ -0,0 +1,27 @@
+# MobileNetV2: Inverted Residuals and Linear Bottlenecks
+
+
+## Introduction
+
+
+
+```latex
+@INPROCEEDINGS{8578572,
+ author={M. {Sandler} and A. {Howard} and M. {Zhu} and A. {Zhmoginov} and L. {Chen}},
+ booktitle={2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition},
+ title={MobileNetV2: Inverted Residuals and Linear Bottlenecks},
+ year={2018},
+ volume={},
+ number={},
+ pages={4510-4520},
+ doi={10.1109/CVPR.2018.00474}}
+}
+```
+
+## Results and models
+
+### ImageNet
+
+| Model | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download |
+|:---------------------:|:---------:|:--------:|:---------:|:---------:|:---------:|:--------:|
+| MobileNet V2 | 3.5 | 0.319 | 71.86 | 90.42 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/mobilenet_v2/mobilenet_v2_b32x8_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth) | [log](https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.log.json) |
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/mobilenet_v2/metafile.yml b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/mobilenet_v2/metafile.yml
new file mode 100644
index 0000000000..3765f0ca85
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/mobilenet_v2/metafile.yml
@@ -0,0 +1,34 @@
+Collections:
+ - Name: MobileNet V2
+ Metadata:
+ Training Data: ImageNet-1k
+ Training Techniques:
+ - SGD with Momentum
+ - Weight Decay
+ Training Resources: 8x V100 GPUs
+ Epochs: 300
+ Batch Size: 256
+ Architecture:
+ - MobileNet V2
+ Paper:
+ URL: https://arxiv.org/abs/1801.04381
+ Title: "MobileNetV2: Inverted Residuals and Linear Bottlenecks"
+ README: configs/mobilenet_v2/README.md
+ Code:
+ URL: https://github.com/open-mmlab/mmclassification/blob/v0.15.0/mmcls/models/backbones/mobilenet_v2.py#L101
+ Version: v0.15.0
+
+Models:
+ - Name: mobilenet_v2_b32x8_imagenet
+ Metadata:
+ FLOPs: 319000000
+ Parameters: 3500000
+ In Collection: MobileNet V2
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 71.86
+ Top 5 Accuracy: 90.42
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth
+ Config: configs/mobilenet_v2/mobilenet_v2_b32x8_imagenet.py
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/mobilenet_v2/mobilenet_v2_b32x8_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/mobilenet_v2/mobilenet_v2_b32x8_imagenet.py
new file mode 100644
index 0000000000..afd2d9795a
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/mobilenet_v2/mobilenet_v2_b32x8_imagenet.py
@@ -0,0 +1,6 @@
+_base_ = [
+ '../_base_/models/mobilenet_v2_1x.py',
+ '../_base_/datasets/imagenet_bs32_pil_resize.py',
+ '../_base_/schedules/imagenet_bs256_epochstep.py',
+ '../_base_/default_runtime.py'
+]
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/mobilenet_v3/README.md b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/mobilenet_v3/README.md
new file mode 100644
index 0000000000..2bb95508da
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/mobilenet_v3/README.md
@@ -0,0 +1,31 @@
+# Searching for MobileNetV3
+
+
+## Introduction
+
+
+
+```latex
+@inproceedings{Howard_2019_ICCV,
+ author = {Howard, Andrew and Sandler, Mark and Chu, Grace and Chen, Liang-Chieh and Chen, Bo and Tan, Mingxing and Wang, Weijun and Zhu, Yukun and Pang, Ruoming and Vasudevan, Vijay and Le, Quoc V. and Adam, Hartwig},
+ title = {Searching for MobileNetV3},
+ booktitle = {Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV)},
+ month = {October},
+ year = {2019}
+}
+```
+
+## Pretrain model
+
+The pre-trained modles are converted from [torchvision](https://pytorch.org/vision/stable/_modules/torchvision/models/mobilenetv3.html).
+
+### ImageNet
+
+| Model | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Download |
+|:---------------------:|:---------:|:--------:|:---------:|:---------:|:--------:|
+| MobileNetV3-Large | 5.48 | 0.23 | 74.04 | 91.34 | [model](https://download.openmmlab.com/mmclassification/v0/mobilenet_v3/convert/mobilenet_v3_large-3ea3c186.pth)|
+| MobileNetV3-Small | 2.54 | 0.06 | 67.66 | 87.41 | [model](https://download.openmmlab.com/mmclassification/v0/mobilenet_v3/convert/mobilenet_v3_small-8427ecf0.pth)|
+
+## Results and models
+
+Waiting for adding.
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/mobilenet_v3/metafile.yml b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/mobilenet_v3/metafile.yml
new file mode 100644
index 0000000000..c978fd8f42
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/mobilenet_v3/metafile.yml
@@ -0,0 +1,42 @@
+Collections:
+ - Name: MobileNet V3
+ Metadata:
+ Training Data: ImageNet-1k
+ Training Techniques:
+ - RMSprop with Momentum
+ - Weight Decay
+ Training Resources: 8x V100 GPUs
+ Epochs: 600
+ Batch Size: 1024
+ Architecture:
+ - MobileNet V3
+ Paper: https://arxiv.org/abs/1905.02244
+ README: configs/mobilenet_v3/README.md
+
+Models:
+ - Name: mobilenet_v3_small_imagenet
+ Metadata:
+ FLOPs: 60000000
+ Parameters: 2540000
+ In Collection: MobileNet V3
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 67.66
+ Top 5 Accuracy: 87.41
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/mobilenet_v3/convert/mobilenet_v3_small-8427ecf0.pth
+ Config: configs/mobilenet_v3/mobilenet_v3_small_imagenet.py
+ - Name: mobilenet_v3_large_imagenet
+ Metadata:
+ FLOPs: 230000000
+ Parameters: 5480000
+ In Collection: MobileNet V3
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 74.04
+ Top 5 Accuracy: 91.34
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/mobilenet_v3/convert/mobilenet_v3_large-3ea3c186.pth
+ Config: configs/mobilenet_v3/mobilenet_v3_large_imagenet.py
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/mobilenet_v3/mobilenet_v3_large_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/mobilenet_v3/mobilenet_v3_large_imagenet.py
new file mode 100644
index 0000000000..985ef520d5
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/mobilenet_v3/mobilenet_v3_large_imagenet.py
@@ -0,0 +1,158 @@
+# Refer to https://pytorch.org/blog/ml-models-torchvision-v0.9/#classification
+# ----------------------------
+# -[x] auto_augment='imagenet'
+# -[x] batch_size=128 (per gpu)
+# -[x] epochs=600
+# -[x] opt='rmsprop'
+# -[x] lr=0.064
+# -[x] eps=0.0316
+# -[x] alpha=0.9
+# -[x] weight_decay=1e-05
+# -[x] momentum=0.9
+# -[x] lr_gamma=0.973
+# -[x] lr_step_size=2
+# -[x] nproc_per_node=8
+# -[x] random_erase=0.2
+# -[x] workers=16 (workers_per_gpu)
+# - modify: RandomErasing use RE-M instead of RE-0
+
+_base_ = [
+ '../_base_/models/mobilenet_v3_large_imagenet.py',
+ '../_base_/datasets/imagenet_bs32_pil_resize.py',
+ '../_base_/default_runtime.py'
+]
+
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+
+policies = [
+ [
+ dict(type='Posterize', bits=4, prob=0.4),
+ dict(type='Rotate', angle=30., prob=0.6)
+ ],
+ [
+ dict(type='Solarize', thr=256 / 9 * 4, prob=0.6),
+ dict(type='AutoContrast', prob=0.6)
+ ],
+ [dict(type='Equalize', prob=0.8),
+ dict(type='Equalize', prob=0.6)],
+ [
+ dict(type='Posterize', bits=5, prob=0.6),
+ dict(type='Posterize', bits=5, prob=0.6)
+ ],
+ [
+ dict(type='Equalize', prob=0.4),
+ dict(type='Solarize', thr=256 / 9 * 5, prob=0.2)
+ ],
+ [
+ dict(type='Equalize', prob=0.4),
+ dict(type='Rotate', angle=30 / 9 * 8, prob=0.8)
+ ],
+ [
+ dict(type='Solarize', thr=256 / 9 * 6, prob=0.6),
+ dict(type='Equalize', prob=0.6)
+ ],
+ [dict(type='Posterize', bits=6, prob=0.8),
+ dict(type='Equalize', prob=1.)],
+ [
+ dict(type='Rotate', angle=10., prob=0.2),
+ dict(type='Solarize', thr=256 / 9, prob=0.6)
+ ],
+ [
+ dict(type='Equalize', prob=0.6),
+ dict(type='Posterize', bits=5, prob=0.4)
+ ],
+ [
+ dict(type='Rotate', angle=30 / 9 * 8, prob=0.8),
+ dict(type='ColorTransform', magnitude=0., prob=0.4)
+ ],
+ [
+ dict(type='Rotate', angle=30., prob=0.4),
+ dict(type='Equalize', prob=0.6)
+ ],
+ [dict(type='Equalize', prob=0.0),
+ dict(type='Equalize', prob=0.8)],
+ [dict(type='Invert', prob=0.6),
+ dict(type='Equalize', prob=1.)],
+ [
+ dict(type='ColorTransform', magnitude=0.4, prob=0.6),
+ dict(type='Contrast', magnitude=0.8, prob=1.)
+ ],
+ [
+ dict(type='Rotate', angle=30 / 9 * 8, prob=0.8),
+ dict(type='ColorTransform', magnitude=0.2, prob=1.)
+ ],
+ [
+ dict(type='ColorTransform', magnitude=0.8, prob=0.8),
+ dict(type='Solarize', thr=256 / 9 * 2, prob=0.8)
+ ],
+ [
+ dict(type='Sharpness', magnitude=0.7, prob=0.4),
+ dict(type='Invert', prob=0.6)
+ ],
+ [
+ dict(
+ type='Shear',
+ magnitude=0.3 / 9 * 5,
+ prob=0.6,
+ direction='horizontal'),
+ dict(type='Equalize', prob=1.)
+ ],
+ [
+ dict(type='ColorTransform', magnitude=0., prob=0.4),
+ dict(type='Equalize', prob=0.6)
+ ],
+ [
+ dict(type='Equalize', prob=0.4),
+ dict(type='Solarize', thr=256 / 9 * 5, prob=0.2)
+ ],
+ [
+ dict(type='Solarize', thr=256 / 9 * 4, prob=0.6),
+ dict(type='AutoContrast', prob=0.6)
+ ],
+ [dict(type='Invert', prob=0.6),
+ dict(type='Equalize', prob=1.)],
+ [
+ dict(type='ColorTransform', magnitude=0.4, prob=0.6),
+ dict(type='Contrast', magnitude=0.8, prob=1.)
+ ],
+ [dict(type='Equalize', prob=0.8),
+ dict(type='Equalize', prob=0.6)],
+]
+
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='RandomResizedCrop', size=224, backend='pillow'),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(type='AutoAugment', policies=policies),
+ dict(
+ type='RandomErasing',
+ erase_prob=0.2,
+ mode='const',
+ min_area_ratio=0.02,
+ max_area_ratio=1 / 3,
+ fill_color=img_norm_cfg['mean']),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+
+data = dict(
+ samples_per_gpu=128,
+ workers_per_gpu=4,
+ train=dict(pipeline=train_pipeline))
+evaluation = dict(interval=10, metric='accuracy')
+
+# optimizer
+optimizer = dict(
+ type='RMSprop',
+ lr=0.064,
+ alpha=0.9,
+ momentum=0.9,
+ eps=0.0316,
+ weight_decay=1e-5)
+optimizer_config = dict(grad_clip=None)
+# learning policy
+lr_config = dict(policy='step', step=2, gamma=0.973, by_epoch=True)
+runner = dict(type='EpochBasedRunner', max_epochs=600)
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/mobilenet_v3/mobilenet_v3_small_cifar.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/mobilenet_v3/mobilenet_v3_small_cifar.py
new file mode 100644
index 0000000000..2b5c2b1f07
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/mobilenet_v3/mobilenet_v3_small_cifar.py
@@ -0,0 +1,8 @@
+_base_ = [
+ '../_base_/models/mobilenet_v3_small_cifar.py',
+ '../_base_/datasets/cifar10_bs16.py',
+ '../_base_/schedules/cifar10_bs128.py', '../_base_/default_runtime.py'
+]
+
+lr_config = dict(policy='step', step=[120, 170])
+runner = dict(type='EpochBasedRunner', max_epochs=200)
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/mobilenet_v3/mobilenet_v3_small_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/mobilenet_v3/mobilenet_v3_small_imagenet.py
new file mode 100644
index 0000000000..2612166fd2
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/mobilenet_v3/mobilenet_v3_small_imagenet.py
@@ -0,0 +1,158 @@
+# Refer to https://pytorch.org/blog/ml-models-torchvision-v0.9/#classification
+# ----------------------------
+# -[x] auto_augment='imagenet'
+# -[x] batch_size=128 (per gpu)
+# -[x] epochs=600
+# -[x] opt='rmsprop'
+# -[x] lr=0.064
+# -[x] eps=0.0316
+# -[x] alpha=0.9
+# -[x] weight_decay=1e-05
+# -[x] momentum=0.9
+# -[x] lr_gamma=0.973
+# -[x] lr_step_size=2
+# -[x] nproc_per_node=8
+# -[x] random_erase=0.2
+# -[x] workers=16 (workers_per_gpu)
+# - modify: RandomErasing use RE-M instead of RE-0
+
+_base_ = [
+ '../_base_/models/mobilenet_v3_small_imagenet.py',
+ '../_base_/datasets/imagenet_bs32_pil_resize.py',
+ '../_base_/default_runtime.py'
+]
+
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+
+policies = [
+ [
+ dict(type='Posterize', bits=4, prob=0.4),
+ dict(type='Rotate', angle=30., prob=0.6)
+ ],
+ [
+ dict(type='Solarize', thr=256 / 9 * 4, prob=0.6),
+ dict(type='AutoContrast', prob=0.6)
+ ],
+ [dict(type='Equalize', prob=0.8),
+ dict(type='Equalize', prob=0.6)],
+ [
+ dict(type='Posterize', bits=5, prob=0.6),
+ dict(type='Posterize', bits=5, prob=0.6)
+ ],
+ [
+ dict(type='Equalize', prob=0.4),
+ dict(type='Solarize', thr=256 / 9 * 5, prob=0.2)
+ ],
+ [
+ dict(type='Equalize', prob=0.4),
+ dict(type='Rotate', angle=30 / 9 * 8, prob=0.8)
+ ],
+ [
+ dict(type='Solarize', thr=256 / 9 * 6, prob=0.6),
+ dict(type='Equalize', prob=0.6)
+ ],
+ [dict(type='Posterize', bits=6, prob=0.8),
+ dict(type='Equalize', prob=1.)],
+ [
+ dict(type='Rotate', angle=10., prob=0.2),
+ dict(type='Solarize', thr=256 / 9, prob=0.6)
+ ],
+ [
+ dict(type='Equalize', prob=0.6),
+ dict(type='Posterize', bits=5, prob=0.4)
+ ],
+ [
+ dict(type='Rotate', angle=30 / 9 * 8, prob=0.8),
+ dict(type='ColorTransform', magnitude=0., prob=0.4)
+ ],
+ [
+ dict(type='Rotate', angle=30., prob=0.4),
+ dict(type='Equalize', prob=0.6)
+ ],
+ [dict(type='Equalize', prob=0.0),
+ dict(type='Equalize', prob=0.8)],
+ [dict(type='Invert', prob=0.6),
+ dict(type='Equalize', prob=1.)],
+ [
+ dict(type='ColorTransform', magnitude=0.4, prob=0.6),
+ dict(type='Contrast', magnitude=0.8, prob=1.)
+ ],
+ [
+ dict(type='Rotate', angle=30 / 9 * 8, prob=0.8),
+ dict(type='ColorTransform', magnitude=0.2, prob=1.)
+ ],
+ [
+ dict(type='ColorTransform', magnitude=0.8, prob=0.8),
+ dict(type='Solarize', thr=256 / 9 * 2, prob=0.8)
+ ],
+ [
+ dict(type='Sharpness', magnitude=0.7, prob=0.4),
+ dict(type='Invert', prob=0.6)
+ ],
+ [
+ dict(
+ type='Shear',
+ magnitude=0.3 / 9 * 5,
+ prob=0.6,
+ direction='horizontal'),
+ dict(type='Equalize', prob=1.)
+ ],
+ [
+ dict(type='ColorTransform', magnitude=0., prob=0.4),
+ dict(type='Equalize', prob=0.6)
+ ],
+ [
+ dict(type='Equalize', prob=0.4),
+ dict(type='Solarize', thr=256 / 9 * 5, prob=0.2)
+ ],
+ [
+ dict(type='Solarize', thr=256 / 9 * 4, prob=0.6),
+ dict(type='AutoContrast', prob=0.6)
+ ],
+ [dict(type='Invert', prob=0.6),
+ dict(type='Equalize', prob=1.)],
+ [
+ dict(type='ColorTransform', magnitude=0.4, prob=0.6),
+ dict(type='Contrast', magnitude=0.8, prob=1.)
+ ],
+ [dict(type='Equalize', prob=0.8),
+ dict(type='Equalize', prob=0.6)],
+]
+
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='RandomResizedCrop', size=224, backend='pillow'),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(type='AutoAugment', policies=policies),
+ dict(
+ type='RandomErasing',
+ erase_prob=0.2,
+ mode='const',
+ min_area_ratio=0.02,
+ max_area_ratio=1 / 3,
+ fill_color=img_norm_cfg['mean']),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+
+data = dict(
+ samples_per_gpu=128,
+ workers_per_gpu=4,
+ train=dict(pipeline=train_pipeline))
+evaluation = dict(interval=10, metric='accuracy')
+
+# optimizer
+optimizer = dict(
+ type='RMSprop',
+ lr=0.064,
+ alpha=0.9,
+ momentum=0.9,
+ eps=0.0316,
+ weight_decay=1e-5)
+optimizer_config = dict(grad_clip=None)
+# learning policy
+lr_config = dict(policy='step', step=2, gamma=0.973, by_epoch=True)
+runner = dict(type='EpochBasedRunner', max_epochs=600)
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/regnet/README.md b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/regnet/README.md
new file mode 100644
index 0000000000..10c42d4289
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/regnet/README.md
@@ -0,0 +1,38 @@
+# Designing Network Design Spaces
+
+
+## Introduction
+
+
+
+```latex
+@article{radosavovic2020designing,
+ title={Designing Network Design Spaces},
+ author={Ilija Radosavovic and Raj Prateek Kosaraju and Ross Girshick and Kaiming He and Piotr Dollár},
+ year={2020},
+ eprint={2003.13678},
+ archivePrefix={arXiv},
+ primaryClass={cs.CV}
+}
+```
+
+## Pretrain model
+
+The pre-trained modles are converted from [model zoo of pycls](https://github.com/facebookresearch/pycls/blob/master/MODEL_ZOO.md).
+
+### ImageNet
+
+| Model | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Download |
+|:---------------------:|:---------:|:--------:|:---------:|:---------:|:--------:|
+| RegNetX-400MF | 5.16 | 0.41 | 72.55 | 90.91 | [model](https://download.openmmlab.com/mmclassification/v0/regnet/convert/RegNetX-400MF-0db9f35c.pth)|
+| RegNetX-800MF | 7.26 | 0.81 | 75.21 | 92.37 | [model](https://download.openmmlab.com/mmclassification/v0/regnet/convert/RegNetX-800MF-4f9d1e8a.pth)|
+| RegNetX-1.6GF | 9.19 | 1.63 | 77.04 | 93.51 | [model](https://download.openmmlab.com/mmclassification/v0/regnet/convert/RegNetX-1.6GF-cfb32375.pth)|
+| RegNetX-3.2GF | 15.3 | 3.21 | 78.26 | 94.20 | [model](https://download.openmmlab.com/mmclassification/v0/regnet/convert/RegNetX-3.2GF-82c43fd5.pth)|
+| RegNetX-4.0GF | 22.12 | 4.0 | 78.72 | 94.22 | [model](https://download.openmmlab.com/mmclassification/v0/regnet/convert/RegNetX-4.0GF-ef8bb32c.pth)|
+| RegNetX-6.4GF | 26.21 | 6.51 | 79.22 | 94.61 | [model](https://download.openmmlab.com/mmclassification/v0/regnet/convert/RegNetX-6.4GF-6888c0ea.pth)|
+| RegNetX-8.0GF | 39.57 | 8.03 | 79.31 | 94.57 | [model](https://download.openmmlab.com/mmclassification/v0/regnet/convert/RegNetX-8.0GF-cb4c77ec.pth)|
+| RegNetX-12GF | 46.11 | 12.15 | 79.91 | 94.78 | [model](https://download.openmmlab.com/mmclassification/v0/regnet/convert/RegNetX-12GF-0574538f.pth)|
+
+## Results and models
+
+Waiting for adding.
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/regnet/regnetx_1.6gf_b32x8_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/regnet/regnetx_1.6gf_b32x8_imagenet.py
new file mode 100644
index 0000000000..cfa956ff78
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/regnet/regnetx_1.6gf_b32x8_imagenet.py
@@ -0,0 +1,51 @@
+_base_ = [
+ '../_base_/models/regnet/regnetx_1.6gf.py',
+ '../_base_/datasets/imagenet_bs32.py',
+ '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py'
+]
+
+# dataset settings
+dataset_type = 'ImageNet'
+
+img_norm_cfg = dict(
+ # The mean and std are used in PyCls when training RegNets
+ mean=[103.53, 116.28, 123.675],
+ std=[57.375, 57.12, 58.395],
+ to_rgb=False)
+
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='RandomResizedCrop', size=224),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='Resize', size=(256, -1)),
+ dict(type='CenterCrop', crop_size=224),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+data = dict(
+ samples_per_gpu=32,
+ workers_per_gpu=2,
+ train=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/train',
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline),
+ test=dict(
+ # replace `data/val` with `data/test` for standard test
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline))
+evaluation = dict(interval=1, metric='accuracy')
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/regnet/regnetx_12gf_b32x8_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/regnet/regnetx_12gf_b32x8_imagenet.py
new file mode 100644
index 0000000000..17796a4b78
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/regnet/regnetx_12gf_b32x8_imagenet.py
@@ -0,0 +1,51 @@
+_base_ = [
+ '../_base_/models/regnet/regnetx_12gf.py',
+ '../_base_/datasets/imagenet_bs32.py',
+ '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py'
+]
+
+# dataset settings
+dataset_type = 'ImageNet'
+
+img_norm_cfg = dict(
+ # The mean and std are used in PyCls when training RegNets
+ mean=[103.53, 116.28, 123.675],
+ std=[57.375, 57.12, 58.395],
+ to_rgb=False)
+
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='RandomResizedCrop', size=224),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='Resize', size=(256, -1)),
+ dict(type='CenterCrop', crop_size=224),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+data = dict(
+ samples_per_gpu=32,
+ workers_per_gpu=2,
+ train=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/train',
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline),
+ test=dict(
+ # replace `data/val` with `data/test` for standard test
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline))
+evaluation = dict(interval=1, metric='accuracy')
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/regnet/regnetx_3.2gf_b32x8_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/regnet/regnetx_3.2gf_b32x8_imagenet.py
new file mode 100644
index 0000000000..b772c78604
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/regnet/regnetx_3.2gf_b32x8_imagenet.py
@@ -0,0 +1,51 @@
+_base_ = [
+ '../_base_/models/regnet/regnetx_3.2gf.py',
+ '../_base_/datasets/imagenet_bs32.py',
+ '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py'
+]
+
+# dataset settings
+dataset_type = 'ImageNet'
+
+img_norm_cfg = dict(
+ # The mean and std are used in PyCls when training RegNets
+ mean=[103.53, 116.28, 123.675],
+ std=[57.375, 57.12, 58.395],
+ to_rgb=False)
+
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='RandomResizedCrop', size=224),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='Resize', size=(256, -1)),
+ dict(type='CenterCrop', crop_size=224),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+data = dict(
+ samples_per_gpu=32,
+ workers_per_gpu=2,
+ train=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/train',
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline),
+ test=dict(
+ # replace `data/val` with `data/test` for standard test
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline))
+evaluation = dict(interval=1, metric='accuracy')
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/regnet/regnetx_4.0gf_b32x8_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/regnet/regnetx_4.0gf_b32x8_imagenet.py
new file mode 100644
index 0000000000..98e6c53b88
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/regnet/regnetx_4.0gf_b32x8_imagenet.py
@@ -0,0 +1,51 @@
+_base_ = [
+ '../_base_/models/regnet/regnetx_4.0gf.py',
+ '../_base_/datasets/imagenet_bs32.py',
+ '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py'
+]
+
+# dataset settings
+dataset_type = 'ImageNet'
+
+img_norm_cfg = dict(
+ # The mean and std are used in PyCls when training RegNets
+ mean=[103.53, 116.28, 123.675],
+ std=[57.375, 57.12, 58.395],
+ to_rgb=False)
+
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='RandomResizedCrop', size=224),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='Resize', size=(256, -1)),
+ dict(type='CenterCrop', crop_size=224),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+data = dict(
+ samples_per_gpu=32,
+ workers_per_gpu=2,
+ train=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/train',
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline),
+ test=dict(
+ # replace `data/val` with `data/test` for standard test
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline))
+evaluation = dict(interval=1, metric='accuracy')
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/regnet/regnetx_400mf_b32x8_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/regnet/regnetx_400mf_b32x8_imagenet.py
new file mode 100644
index 0000000000..88ccec943d
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/regnet/regnetx_400mf_b32x8_imagenet.py
@@ -0,0 +1,51 @@
+_base_ = [
+ '../_base_/models/regnet/regnetx_400mf.py',
+ '../_base_/datasets/imagenet_bs32.py',
+ '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py'
+]
+
+# dataset settings
+dataset_type = 'ImageNet'
+
+img_norm_cfg = dict(
+ # The mean and std are used in PyCls when training RegNets
+ mean=[103.53, 116.28, 123.675],
+ std=[57.375, 57.12, 58.395],
+ to_rgb=False)
+
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='RandomResizedCrop', size=224),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='Resize', size=(256, -1)),
+ dict(type='CenterCrop', crop_size=224),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+data = dict(
+ samples_per_gpu=32,
+ workers_per_gpu=2,
+ train=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/train',
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline),
+ test=dict(
+ # replace `data/val` with `data/test` for standard test
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline))
+evaluation = dict(interval=1, metric='accuracy')
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/regnet/regnetx_6.4gf_b32x8_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/regnet/regnetx_6.4gf_b32x8_imagenet.py
new file mode 100644
index 0000000000..4e5e36a07d
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/regnet/regnetx_6.4gf_b32x8_imagenet.py
@@ -0,0 +1,51 @@
+_base_ = [
+ '../_base_/models/regnet/regnetx_6.4gf.py',
+ '../_base_/datasets/imagenet_bs32.py',
+ '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py'
+]
+
+# dataset settings
+dataset_type = 'ImageNet'
+
+img_norm_cfg = dict(
+ # The mean and std are used in PyCls when training RegNets
+ mean=[103.53, 116.28, 123.675],
+ std=[57.375, 57.12, 58.395],
+ to_rgb=False)
+
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='RandomResizedCrop', size=224),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='Resize', size=(256, -1)),
+ dict(type='CenterCrop', crop_size=224),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+data = dict(
+ samples_per_gpu=32,
+ workers_per_gpu=2,
+ train=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/train',
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline),
+ test=dict(
+ # replace `data/val` with `data/test` for standard test
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline))
+evaluation = dict(interval=1, metric='accuracy')
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/regnet/regnetx_8.0gf_b32x8_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/regnet/regnetx_8.0gf_b32x8_imagenet.py
new file mode 100644
index 0000000000..37d7c8fbfb
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/regnet/regnetx_8.0gf_b32x8_imagenet.py
@@ -0,0 +1,51 @@
+_base_ = [
+ '../_base_/models/regnet/regnetx_8.0gf.py',
+ '../_base_/datasets/imagenet_bs32.py',
+ '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py'
+]
+
+# dataset settings
+dataset_type = 'ImageNet'
+
+img_norm_cfg = dict(
+ # The mean and std are used in PyCls when training RegNets
+ mean=[103.53, 116.28, 123.675],
+ std=[57.375, 57.12, 58.395],
+ to_rgb=False)
+
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='RandomResizedCrop', size=224),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='Resize', size=(256, -1)),
+ dict(type='CenterCrop', crop_size=224),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+data = dict(
+ samples_per_gpu=32,
+ workers_per_gpu=2,
+ train=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/train',
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline),
+ test=dict(
+ # replace `data/val` with `data/test` for standard test
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline))
+evaluation = dict(interval=1, metric='accuracy')
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/regnet/regnetx_800mf_b32x8_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/regnet/regnetx_800mf_b32x8_imagenet.py
new file mode 100644
index 0000000000..3db65b36ef
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/regnet/regnetx_800mf_b32x8_imagenet.py
@@ -0,0 +1,51 @@
+_base_ = [
+ '../_base_/models/regnet/regnetx_800mf.py',
+ '../_base_/datasets/imagenet_bs32.py',
+ '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py'
+]
+
+# dataset settings
+dataset_type = 'ImageNet'
+
+img_norm_cfg = dict(
+ # The mean and std are used in PyCls when training RegNets
+ mean=[103.53, 116.28, 123.675],
+ std=[57.375, 57.12, 58.395],
+ to_rgb=False)
+
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='RandomResizedCrop', size=224),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='Resize', size=(256, -1)),
+ dict(type='CenterCrop', crop_size=224),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+data = dict(
+ samples_per_gpu=32,
+ workers_per_gpu=2,
+ train=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/train',
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline),
+ test=dict(
+ # replace `data/val` with `data/test` for standard test
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline))
+evaluation = dict(interval=1, metric='accuracy')
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/README.md b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/README.md
new file mode 100644
index 0000000000..20e8c35b3f
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/README.md
@@ -0,0 +1,51 @@
+# Repvgg: Making vgg-style convnets great again
+
+
+## Introduction
+
+
+
+```latex
+@inproceedings{ding2021repvgg,
+ title={Repvgg: Making vgg-style convnets great again},
+ author={Ding, Xiaohan and Zhang, Xiangyu and Ma, Ningning and Han, Jungong and Ding, Guiguang and Sun, Jian},
+ booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition},
+ pages={13733--13742},
+ year={2021}
+}
+```
+
+## Pretrain model
+
+| Model | Epochs | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download |
+| :---------: | :----: | :-------------------------------: | :-----------------------------: | :-------: | :-------: | :----------------------------------------------------------: | :----------------------------------------------------------: |
+| RepVGG-A0\* | 120 | 9.11(train) \| 8.31 (deploy) | 1.52 (train) \| 1.36 (deploy) | 72.41 | 90.50 | [config (train)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/repvgg-A0_4xb64-coslr-120e_in1k.py) \| [config (deploy)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/deploy/repvgg-A0_deploy_4xb64-coslr-120e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-A0_3rdparty_4xb64-coslr-120e_in1k_20210909-883ab98c.pth) |
+| RepVGG-A1\* | 120 | 14.09 (train) \| 12.79 (deploy) | 2.64 (train) \| 2.37 (deploy) | 74.47 | 91.85 | [config (train)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/repvgg-A1_4xb64-coslr-120e_in1k.py) \| [config (deploy)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/deploy/repvgg-A1_deploy_4xb64-coslr-120e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-A1_3rdparty_4xb64-coslr-120e_in1k_20210909-24003a24.pth) |
+| RepVGG-A2\* | 120 | 28.21 (train) \| 25.5 (deploy) | 5.7 (train) \| 5.12 (deploy) | 76.48 | 93.01 | [config (train)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/repvgg-A2_4xb64-coslr-120e_in1k.py) \|[config (deploy)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/deploy/repvgg-A2_deploy_4xb64-coslr-120e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-A2_3rdparty_4xb64-coslr-120e_in1k_20210909-97d7695a.pth) |
+| RepVGG-B0\* | 120 | 15.82 (train) \| 14.34 (deploy) | 3.42 (train) \| 3.06 (deploy) | 75.14 | 92.42 | [config (train)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/repvgg-B0_4xb64-coslr-120e_in1k.py) \|[config (deploy)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/deploy/repvgg-B0_deploy_4xb64-coslr-120e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B0_3rdparty_4xb64-coslr-120e_in1k_20210909-446375f4.pth) |
+| RepVGG-B1\* | 120 | 57.42 (train) \| 51.83 (deploy) | 13.16 (train) \| 11.82 (deploy) | 78.37 | 94.11 | [config (train)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/repvgg-B1_4xb64-coslr-120e_in1k.py) \|[config (deploy)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/deploy/repvgg-B1_deploy_4xb64-coslr-120e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B1_3rdparty_4xb64-coslr-120e_in1k_20210909-750cdf67.pth) |
+| RepVGG-B1g2\* | 120 | 45.78 (train) \| 41.36 (deploy) | 9.82 (train) \| 8.82 (deploy) | 77.79 | 93.88 | [config (train)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/repvgg-B1g2_4xb64-coslr-120e_in1k.py) \|[config (deploy)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/deploy/repvgg-B1g2_deploy_4xb64-coslr-120e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B1g2_3rdparty_4xb64-coslr-120e_in1k_20210909-344f6422.pth) |
+| RepVGG-B1g4\* | 120 | 39.97 (train) \| 36.13 (deploy) | 8.15 (train) \| 7.32 (deploy) | 77.58 | 93.84 | [config (train)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/repvgg-B1g4_4xb64-coslr-120e_in1k.py) \|[config (deploy)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/deploy/repvgg-B1g4_deploy_4xb64-coslr-120e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B1g4_3rdparty_4xb64-coslr-120e_in1k_20210909-d4c1a642.pth) |
+| RepVGG-B2\* | 120 | 89.02 (train) \| 80.32 (deploy) | 20.46 (train) \| 18.39 (deploy) | 78.78 | 94.42 | [config (train)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/repvgg-B2_4xb64-coslr-120e_in1k.py) \|[config (deploy)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/deploy/repvgg-B2_deploy_4xb64-coslr-120e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B2_3rdparty_4xb64-coslr-120e_in1k_20210909-bd6b937c.pth) |
+| RepVGG-B2g4\* | 200 | 61.76 (train) \| 55.78 (deploy) | 12.63 (train) \| 11.34 (deploy) | 79.38 | 94.68 | [config (train)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/repvgg-B2g4_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py) \|[config (deploy)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/deploy/repvgg-B2g4_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B2g4_3rdparty_4xb64-autoaug-lbs-mixup-coslr-200e_in1k_20210909-7b7955f0.pth) |
+| RepVGG-B3\* | 200 | 123.09 (train) \| 110.96 (deploy) | 29.17 (train) \| 26.22 (deploy) | 80.52 | 95.26 | [config (train)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/repvgg-B3_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py) \|[config (deploy)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/deploy/repvgg-B3_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B3_3rdparty_4xb64-autoaug-lbs-mixup-coslr-200e_in1k_20210909-dda968bf.pth) |
+| RepVGG-B3g4\* | 200 | 83.83 (train) \| 75.63 (deploy) | 17.9 (train) \| 16.08 (deploy) | 80.22 | 95.10 | [config (train)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/repvgg-B3g4_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py) \|[config (deploy)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/deploy/repvgg-B3g4_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B3g4_3rdparty_4xb64-autoaug-lbs-mixup-coslr-200e_in1k_20210909-4e54846a.pth) |
+| RepVGG-D2se\* | 200 | 133.33 (train) \| 120.39 (deploy) | 36.56 (train) \| 32.85 (deploy) | 81.81 | 95.94 | [config (train)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/repvgg-D2se_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py) \|[config (deploy)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/deploy/repvgg-D2se_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-D2se_3rdparty_4xb64-autoaug-lbs-mixup-coslr-200e_in1k_20210909-cf3139b7.pth) |
+
+*Models with \* are converted from other repos.*
+
+## Reparameterize RepVGG
+
+The checkpoints provided are all in `train` form. Use the reparameterize tool to switch them to more efficient `deploy` form, which not only has fewer parameters but also less calculations.
+
+```bash
+python ./tools/convert_models/reparameterize_repvgg.py ${CFG_PATH} ${SRC_CKPT_PATH} ${TARGET_CKPT_PATH}
+```
+
+`${CFG_PATH}` is the config file, `${SRC_CKPT_PATH}` is the source chenpoint file, `${TARGET_CKPT_PATH}` is the target deploy weight file path.
+
+To use reparameterized repvgg weight, the config file must switch to [the deploy config files](./deploy) as below:
+
+```bash
+python ./tools/test.py ${RapVGG_Deploy_CFG} ${CHECK_POINT}
+```
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-A0_deploy_4xb64-coslr-120e_in1k.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-A0_deploy_4xb64-coslr-120e_in1k.py
new file mode 100644
index 0000000000..20787f286d
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-A0_deploy_4xb64-coslr-120e_in1k.py
@@ -0,0 +1,3 @@
+_base_ = '../repvgg-A0_4xb64-coslr-120e_in1k.py'
+
+model = dict(backbone=dict(deploy=True))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-A1_deploy_4xb64-coslr-120e_in1k.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-A1_deploy_4xb64-coslr-120e_in1k.py
new file mode 100644
index 0000000000..eea0da9c58
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-A1_deploy_4xb64-coslr-120e_in1k.py
@@ -0,0 +1,3 @@
+_base_ = '../repvgg-A1_4xb64-coslr-120e_in1k.py'
+
+model = dict(backbone=dict(deploy=True))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-A2_deploy_4xb64-coslr-120e_in1k.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-A2_deploy_4xb64-coslr-120e_in1k.py
new file mode 100644
index 0000000000..7b0cea7b7d
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-A2_deploy_4xb64-coslr-120e_in1k.py
@@ -0,0 +1,3 @@
+_base_ = '../repvgg-A2_4xb64-coslr-120e_in1k.py'
+
+model = dict(backbone=dict(deploy=True))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-B0_deploy_4xb64-coslr-120e_in1k.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-B0_deploy_4xb64-coslr-120e_in1k.py
new file mode 100644
index 0000000000..23a2898ac5
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-B0_deploy_4xb64-coslr-120e_in1k.py
@@ -0,0 +1,3 @@
+_base_ = '../repvgg-B0_4xb64-coslr-120e_in1k.py'
+
+model = dict(backbone=dict(deploy=True))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-B1_deploy_4xb64-coslr-120e_in1k.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-B1_deploy_4xb64-coslr-120e_in1k.py
new file mode 100644
index 0000000000..24355edac7
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-B1_deploy_4xb64-coslr-120e_in1k.py
@@ -0,0 +1,3 @@
+_base_ = '../repvgg-B1_4xb64-coslr-120e_in1k.py'
+
+model = dict(backbone=dict(deploy=True))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-B1g2_deploy_4xb64-coslr-120e_in1k.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-B1g2_deploy_4xb64-coslr-120e_in1k.py
new file mode 100644
index 0000000000..579fcc47b9
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-B1g2_deploy_4xb64-coslr-120e_in1k.py
@@ -0,0 +1,3 @@
+_base_ = '../repvgg-B1g2_4xb64-coslr-120e_in1k.py'
+
+model = dict(backbone=dict(deploy=True))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-B1g4_deploy_4xb64-coslr-120e_in1k.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-B1g4_deploy_4xb64-coslr-120e_in1k.py
new file mode 100644
index 0000000000..eab5d44037
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-B1g4_deploy_4xb64-coslr-120e_in1k.py
@@ -0,0 +1,3 @@
+_base_ = '../repvgg-B1g4_4xb64-coslr-120e_in1k.py'
+
+model = dict(backbone=dict(deploy=True))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-B2_deploy_4xb64-coslr-120e_in1k.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-B2_deploy_4xb64-coslr-120e_in1k.py
new file mode 100644
index 0000000000..0681f14dc3
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-B2_deploy_4xb64-coslr-120e_in1k.py
@@ -0,0 +1,3 @@
+_base_ = '../repvgg-B2_4xb64-coslr-120e_in1k.py'
+
+model = dict(backbone=dict(deploy=True))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-B2g4_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-B2g4_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py
new file mode 100644
index 0000000000..8f1840145f
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-B2g4_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py
@@ -0,0 +1,3 @@
+_base_ = '../repvgg-B2g4_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py'
+
+model = dict(backbone=dict(deploy=True))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-B3_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-B3_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py
new file mode 100644
index 0000000000..e60b0678a9
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-B3_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py
@@ -0,0 +1,3 @@
+_base_ = '../repvgg-B3_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py'
+
+model = dict(backbone=dict(deploy=True))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-B3g4_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-B3g4_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py
new file mode 100644
index 0000000000..46f187789a
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-B3g4_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py
@@ -0,0 +1,3 @@
+_base_ = '../repvgg-B3g4_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py'
+
+model = dict(backbone=dict(deploy=True))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-D2se_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-D2se_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py
new file mode 100644
index 0000000000..66dff3b6d4
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-D2se_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py
@@ -0,0 +1,3 @@
+_base_ = '../repvgg-D2se_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py'
+
+model = dict(backbone=dict(deploy=True))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/metafile.yml b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/metafile.yml
new file mode 100644
index 0000000000..fc3d8ab355
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/metafile.yml
@@ -0,0 +1,208 @@
+Collections:
+ - Name: RepVGG
+ Metadata:
+ Training Data: ImageNet-1k
+ Architecture:
+ - re-parameterization Convolution
+ - VGG-style Neural Network
+ Paper:
+ URL: https://arxiv.org/abs/2101.03697
+ Title: 'RepVGG: Making VGG-style ConvNets Great Again'
+ README: configs/repvgg/README.md
+ Code:
+ URL: https://github.com/open-mmlab/mmclassification/blob/v0.16.0/mmcls/models/backbones/repvgg.py#L257
+ Version: v0.16.0
+
+Models:
+ - Name: repvgg-A0_4xb64-coslr-120e_in1k
+ In Collection: RepVGG
+ Config: configs/repvgg/repvgg-A0_4xb64-coslr-120e_in1k.py
+ Metadata:
+ FLOPs: 1520000000
+ Parameters: 9110000
+ Results:
+ - Dataset: ImageNet-1k
+ Task: Image Classification
+ Metrics:
+ Top 1 Accuracy: 72.41
+ Top 5 Accuracy: 90.50
+ Weights: https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-A0_3rdparty_4xb64-coslr-120e_in1k_20210909-883ab98c.pth
+ Converted From:
+ Weights: https://drive.google.com/drive/folders/1Avome4KvNp0Lqh2QwhXO6L5URQjzCjUq
+ Code: https://github.com/DingXiaoH/RepVGG/blob/9f272318abfc47a2b702cd0e916fca8d25d683e7/repvgg.py#L196
+ - Name: repvgg-A1_4xb64-coslr-120e_in1k
+ In Collection: Repvgg
+ Config: configs/repvgg/repvgg-A1_4xb64-coslr-120e_in1k.py
+ Metadata:
+ FLOPs: 2640000000
+ Parameters: 14090000
+ Results:
+ - Dataset: ImageNet-1k
+ Task: Image Classification
+ Metrics:
+ Top 1 Accuracy: 74.47
+ Top 5 Accuracy: 91.85
+ Weights: https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-A1_3rdparty_4xb64-coslr-120e_in1k_20210909-24003a24.pth
+ Converted From:
+ Weights: https://drive.google.com/drive/folders/1Avome4KvNp0Lqh2QwhXO6L5URQjzCjUq
+ Code: https://github.com/DingXiaoH/RepVGG/blob/9f272318abfc47a2b702cd0e916fca8d25d683e7/repvgg.py#L200
+ - Name: repvgg-A2_4xb64-coslr-120e_in1k
+ In Collection: Repvgg
+ Config: configs/repvgg/repvgg-A2_4xb64-coslr-120e_in1k.py
+ Metadata:
+ FLOPs: 28210000000
+ Parameters: 5700000
+ Results:
+ - Dataset: ImageNet-1k
+ Task: Image Classification
+ Metrics:
+ Top 1 Accuracy: 76.48
+ Top 5 Accuracy: 93.01
+ Weights: https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-A2_3rdparty_4xb64-coslr-120e_in1k_20210909-97d7695a.pth
+ Converted From:
+ Weights: https://drive.google.com/drive/folders/1Avome4KvNp0Lqh2QwhXO6L5URQjzCjUq
+ Code: https://github.com/DingXiaoH/RepVGG/blob/9f272318abfc47a2b702cd0e916fca8d25d683e7/repvgg.py#L204
+ - Name: repvgg-B0_4xb64-coslr-120e_in1k
+ In Collection: RepVGG
+ Config: configs/repvgg/repvgg-B0_4xb64-coslr-120e_in1k.py
+ Metadata:
+ FLOPs: 15820000000
+ Parameters: 3420000
+ Results:
+ - Dataset: ImageNet-1k
+ Task: Image Classification
+ Metrics:
+ Top 1 Accuracy: 75.14
+ Top 5 Accuracy: 92.42
+ Weights: https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B0_3rdparty_4xb64-coslr-120e_in1k_20210909-446375f4.pth
+ Converted From:
+ Weights: https://drive.google.com/drive/folders/1Avome4KvNp0Lqh2QwhXO6L5URQjzCjUq
+ Code: https://github.com/DingXiaoH/RepVGG/blob/9f272318abfc47a2b702cd0e916fca8d25d683e7/repvgg.py#L208
+ - Name: repvgg-B1_4xb64-coslr-120e_in1k
+ In Collection: RepVGG
+ Config: configs/repvgg/repvgg-B1_4xb64-coslr-120e_in1k.py
+ Metadata:
+ FLOPs: 57420000000
+ Parameters: 13160000
+ Results:
+ - Dataset: ImageNet-1k
+ Task: Image Classification
+ Metrics:
+ Top 1 Accuracy: 78.37
+ Top 5 Accuracy: 94.11
+ Weights: https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B1_3rdparty_4xb64-coslr-120e_in1k_20210909-750cdf67.pth
+ Converted From:
+ Weights: https://drive.google.com/drive/folders/1Avome4KvNp0Lqh2QwhXO6L5URQjzCjUq
+ Code: https://github.com/DingXiaoH/RepVGG/blob/9f272318abfc47a2b702cd0e916fca8d25d683e7/repvgg.py#L212
+ - Name: repvgg-B1g2_4xb64-coslr-120e_in1k
+ In Collection: RepVGG
+ Config: configs/repvgg/repvgg-B1g2_4xb64-coslr-120e_in1k.py
+ Metadata:
+ FLOPs: 45780000000
+ Parameters: 9820000
+ Results:
+ - Dataset: ImageNet-1k
+ Task: Image Classification
+ Metrics:
+ Top 1 Accuracy: 77.79
+ Top 5 Accuracy: 93.88
+ Weights: https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B1g2_3rdparty_4xb64-coslr-120e_in1k_20210909-344f6422.pth
+ Converted From:
+ Weights: https://drive.google.com/drive/folders/1Avome4KvNp0Lqh2QwhXO6L5URQjzCjUq
+ Code: https://github.com/DingXiaoH/RepVGG/blob/9f272318abfc47a2b702cd0e916fca8d25d683e7/repvgg.py#L216
+ - Name: repvgg-B1g4_4xb64-coslr-120e_in1k
+ In Collection: RepVGG
+ Config: configs/repvgg/repvgg-B1g4_4xb64-coslr-120e_in1k.py
+ Metadata:
+ FLOPs: 39970000000
+ Parameters: 8150000
+ Results:
+ - Dataset: ImageNet-1k
+ Task: Image Classification
+ Metrics:
+ Top 1 Accuracy: 77.58
+ Top 5 Accuracy: 93.84
+ Weights: https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B1g4_3rdparty_4xb64-coslr-120e_in1k_20210909-d4c1a642.pth
+ Converted From:
+ Weights: https://drive.google.com/drive/folders/1Avome4KvNp0Lqh2QwhXO6L5URQjzCjUq
+ Code: https://github.com/DingXiaoH/RepVGG/blob/9f272318abfc47a2b702cd0e916fca8d25d683e7/repvgg.py#L220
+ - Name: repvgg-B2_4xb64-coslr-120e_in1k
+ In Collection: RepVGG
+ Config: configs/repvgg/repvgg-B2_4xb64-coslr-120e_in1k.py
+ Metadata:
+ FLOPs: 89020000000
+ Parameters: 20420000
+ Results:
+ - Dataset: ImageNet-1k
+ Task: Image Classification
+ Metrics:
+ Top 1 Accuracy: 78.78
+ Top 5 Accuracy: 94.42
+ Weights: https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B2_3rdparty_4xb64-coslr-120e_in1k_20210909-bd6b937c.pth
+ Converted From:
+ Weights: https://drive.google.com/drive/folders/1Avome4KvNp0Lqh2QwhXO6L5URQjzCjUq
+ Code: https://github.com/DingXiaoH/RepVGG/blob/9f272318abfc47a2b702cd0e916fca8d25d683e7/repvgg.py#L225
+ - Name: repvgg-B2g4_4xb64-autoaug-lbs-mixup-coslr-200e_in1k
+ In Collection: RepVGG
+ Config: configs/repvgg/repvgg-B2g4_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py
+ Metadata:
+ FLOPs: 61760000000
+ Parameters: 12630000
+ Results:
+ - Dataset: ImageNet-1k
+ Task: Image Classification
+ Metrics:
+ Top 1 Accuracy: 79.38
+ Top 5 Accuracy: 94.68
+ Weights: https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B2g4_3rdparty_4xb64-autoaug-lbs-mixup-coslr-200e_in1k_20210909-7b7955f0.pth
+ Converted From:
+ Weights: https://drive.google.com/drive/folders/1Avome4KvNp0Lqh2QwhXO6L5URQjzCjUq
+ Code: https://github.com/DingXiaoH/RepVGG/blob/9f272318abfc47a2b702cd0e916fca8d25d683e7/repvgg.py#L229
+ - Name: repvgg-B3_4xb64-autoaug-lbs-mixup-coslr-200e_in1k
+ In Collection: RepVGG
+ Config: configs/repvgg/repvgg-B3_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py
+ Metadata:
+ FLOPs: 123090000000
+ Parameters: 29170000
+ Results:
+ - Dataset: ImageNet-1k
+ Task: Image Classification
+ Metrics:
+ Top 1 Accuracy: 80.52
+ Top 5 Accuracy: 95.26
+ Weights: https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B3_3rdparty_4xb64-autoaug-lbs-mixup-coslr-200e_in1k_20210909-dda968bf.pth
+ Converted From:
+ Weights: https://drive.google.com/drive/folders/1Avome4KvNp0Lqh2QwhXO6L5URQjzCjUq
+ Code: https://github.com/DingXiaoH/RepVGG/blob/9f272318abfc47a2b702cd0e916fca8d25d683e7/repvgg.py#L238
+ - Name: repvgg-B3g4_4xb64-autoaug-lbs-mixup-coslr-200e_in1k
+ In Collection: RepVGG
+ Config: configs/repvgg/repvgg-B3g4_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py
+ Metadata:
+ FLOPs: 83830000000
+ Parameters: 17900000
+ Results:
+ - Dataset: ImageNet-1k
+ Task: Image Classification
+ Metrics:
+ Top 1 Accuracy: 80.22
+ Top 5 Accuracy: 95.10
+ Weights: https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B3g4_3rdparty_4xb64-autoaug-lbs-mixup-coslr-200e_in1k_20210909-4e54846a.pth
+ Converted From:
+ Weights: https://drive.google.com/drive/folders/1Avome4KvNp0Lqh2QwhXO6L5URQjzCjUq
+ Code: https://github.com/DingXiaoH/RepVGG/blob/9f272318abfc47a2b702cd0e916fca8d25d683e7/repvgg.py#L238
+ - Name: repvgg-D2se_4xb64-autoaug-lbs-mixup-coslr-200e_in1k
+ In Collection: RepVGG
+ Config: configs/repvgg/repvgg-D2se_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py
+ Metadata:
+ FLOPs: 133330000000
+ Parameters: 36560000
+ Results:
+ - Dataset: ImageNet-1k
+ Task: Image Classification
+ Metrics:
+ Top 1 Accuracy: 81.81
+ Top 5 Accuracy: 95.94
+ Weights: https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-D2se_3rdparty_4xb64-autoaug-lbs-mixup-coslr-200e_in1k_20210909-cf3139b7.pth
+ Converted From:
+ Weights: https://drive.google.com/drive/folders/1Avome4KvNp0Lqh2QwhXO6L5URQjzCjUq
+ Code: https://github.com/DingXiaoH/RepVGG/blob/9f272318abfc47a2b702cd0e916fca8d25d683e7/repvgg.py#L250
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-A0_4xb64-coslr-120e_in1k.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-A0_4xb64-coslr-120e_in1k.py
new file mode 100644
index 0000000000..a7fd3bbe91
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-A0_4xb64-coslr-120e_in1k.py
@@ -0,0 +1,8 @@
+_base_ = [
+ '../_base_/models/repvgg-A0_in1k.py',
+ '../_base_/datasets/imagenet_bs64_pil_resize.py',
+ '../_base_/schedules/imagenet_bs256_coslr.py',
+ '../_base_/default_runtime.py'
+]
+
+runner = dict(max_epochs=120)
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-A1_4xb64-coslr-120e_in1k.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-A1_4xb64-coslr-120e_in1k.py
new file mode 100644
index 0000000000..649020f2c6
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-A1_4xb64-coslr-120e_in1k.py
@@ -0,0 +1,3 @@
+_base_ = './repvgg-A0_4xb64-coslr-120e_in1k.py'
+
+model = dict(backbone=dict(arch='A1'))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-A2_4xb64-coslr-120e_in1k.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-A2_4xb64-coslr-120e_in1k.py
new file mode 100644
index 0000000000..eedaf2d29b
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-A2_4xb64-coslr-120e_in1k.py
@@ -0,0 +1,3 @@
+_base_ = './repvgg-A0_4xb64-coslr-120e_in1k.py'
+
+model = dict(backbone=dict(arch='A2'), head=dict(in_channels=1408))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-B0_4xb64-coslr-120e_in1k.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-B0_4xb64-coslr-120e_in1k.py
new file mode 100644
index 0000000000..b3ce7ea27d
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-B0_4xb64-coslr-120e_in1k.py
@@ -0,0 +1,3 @@
+_base_ = './repvgg-A0_4xb64-coslr-120e_in1k.py'
+
+model = dict(backbone=dict(arch='B0'), head=dict(in_channels=1280))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-B1_4xb64-coslr-120e_in1k.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-B1_4xb64-coslr-120e_in1k.py
new file mode 100644
index 0000000000..30adea3dc8
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-B1_4xb64-coslr-120e_in1k.py
@@ -0,0 +1,3 @@
+_base_ = './repvgg-A0_4xb64-coslr-120e_in1k.py'
+
+model = dict(backbone=dict(arch='B1'), head=dict(in_channels=2048))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-B1g2_4xb64-coslr-120e_in1k.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-B1g2_4xb64-coslr-120e_in1k.py
new file mode 100644
index 0000000000..2749db8d95
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-B1g2_4xb64-coslr-120e_in1k.py
@@ -0,0 +1,3 @@
+_base_ = './repvgg-A0_4xb64-coslr-120e_in1k.py'
+
+model = dict(backbone=dict(arch='B1g2'), head=dict(in_channels=2048))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-B1g4_4xb64-coslr-120e_in1k.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-B1g4_4xb64-coslr-120e_in1k.py
new file mode 100644
index 0000000000..2647690975
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-B1g4_4xb64-coslr-120e_in1k.py
@@ -0,0 +1,3 @@
+_base_ = './repvgg-A0_4xb64-coslr-120e_in1k.py'
+
+model = dict(backbone=dict(arch='B1g4'), head=dict(in_channels=2048))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-B2_4xb64-coslr-120e_in1k.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-B2_4xb64-coslr-120e_in1k.py
new file mode 100644
index 0000000000..4d215567f4
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-B2_4xb64-coslr-120e_in1k.py
@@ -0,0 +1,3 @@
+_base_ = './repvgg-A0_4xb64-coslr-120e_in1k.py'
+
+model = dict(backbone=dict(arch='B2'), head=dict(in_channels=2560))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-B2g4_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-B2g4_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py
new file mode 100644
index 0000000000..11331cf02f
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-B2g4_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py
@@ -0,0 +1,3 @@
+_base_ = './repvgg-B3_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py'
+
+model = dict(backbone=dict(arch='B2g4'))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-B3_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-B3_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py
new file mode 100644
index 0000000000..7b6dc5065d
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-B3_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py
@@ -0,0 +1,6 @@
+_base_ = [
+ '../_base_/models/repvgg-B3_lbs-mixup_in1k.py',
+ '../_base_/datasets/imagenet_bs64_pil_resize.py',
+ '../_base_/schedules/imagenet_bs256_200e_coslr_warmup.py',
+ '../_base_/default_runtime.py'
+]
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-B3g4_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-B3g4_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py
new file mode 100644
index 0000000000..67e3688c5a
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-B3g4_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py
@@ -0,0 +1,3 @@
+_base_ = './repvgg-B3_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py'
+
+model = dict(backbone=dict(arch='B3g4'))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-D2se_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-D2se_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py
new file mode 100644
index 0000000000..d235610f07
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-D2se_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py
@@ -0,0 +1,3 @@
+_base_ = './repvgg-B3_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py'
+
+model = dict(backbone=dict(arch='D2se'))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/res2net/README.md b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/res2net/README.md
new file mode 100644
index 0000000000..befe4ba6d6
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/res2net/README.md
@@ -0,0 +1,30 @@
+# Res2Net: A New Multi-scale Backbone Architecture
+
+
+## Introduction
+
+
+
+```latex
+@article{gao2019res2net,
+ title={Res2Net: A New Multi-scale Backbone Architecture},
+ author={Gao, Shang-Hua and Cheng, Ming-Ming and Zhao, Kai and Zhang, Xin-Yu and Yang, Ming-Hsuan and Torr, Philip},
+ journal={IEEE TPAMI},
+ year={2021},
+ doi={10.1109/TPAMI.2019.2938758},
+}
+```
+
+## Pretrain model
+
+The pre-trained models are converted from [official repo](https://github.com/Res2Net/Res2Net-PretrainedModels).
+
+### ImageNet 1k
+
+| Model | resolution | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Download |
+|:---------------------:|:-----------:|:---------:|:---------:|:---------:|:---------:|:--------:|
+| Res2Net-50-14w-8s\* | 224x224 | 25.06 | 4.22 | 78.14 | 93.85 | [model](https://download.openmmlab.com/mmclassification/v0/res2net/res2net50-w14-s8_3rdparty_8xb32_in1k_20210927-bc967bf1.pth)|
+| Res2Net-50-26w-8s\* | 224x224 | 48.40 | 8.39 | 79.20 | 94.36 | [model](https://download.openmmlab.com/mmclassification/v0/res2net/res2net50-w26-s8_3rdparty_8xb32_in1k_20210927-f547a94b.pth)|
+| Res2Net-101-26w-4s\* | 224x224 | 45.21 | 8.12 | 79.19 | 94.44 | [model](https://download.openmmlab.com/mmclassification/v0/res2net/res2net101-w26-s4_3rdparty_8xb32_in1k_20210927-870b6c36.pth)|
+
+*Models with \* are converted from other repos.*
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/res2net/metafile.yml b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/res2net/metafile.yml
new file mode 100644
index 0000000000..dfcda7329f
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/res2net/metafile.yml
@@ -0,0 +1,67 @@
+Collections:
+ - Name: Res2Net
+ Metadata:
+ Training Data: ImageNet-1k
+ Training Techniques:
+ - SGD with Momentum
+ - Weight Decay
+ Architecture:
+ - Batch Normalization
+ - Convolution
+ - Global Average Pooling
+ - ReLU
+ - Res2Net Block
+ Paper:
+ Title: 'Res2Net: A New Multi-scale Backbone Architecture'
+ URL: https://arxiv.org/pdf/1904.01169.pdf
+ README: configs/res2net/README.md
+
+Models:
+ - Name: res2net50-w14-s8_3rdparty_8xb32_in1k
+ Metadata:
+ FLOPs: 4220000000
+ Parameters: 25060000
+ In Collection: Res2Net
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 78.14
+ Top 5 Accuracy: 93.85
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/res2net/res2net50-w14-s8_3rdparty_8xb32_in1k_20210927-bc967bf1.pth
+ Converted From:
+ Weights: https://1drv.ms/u/s!AkxDDnOtroRPdOTqhF8ne_aakDI?e=EVb8Ri
+ Code: https://github.com/Res2Net/Res2Net-PretrainedModels/blob/master/res2net.py#L221
+ Config: configs/res2net/res2net50-w14-s8_8xb32_in1k.py
+ - Name: res2net50-w26-s8_3rdparty_8xb32_in1k
+ Metadata:
+ FLOPs: 8390000000
+ Parameters: 48400000
+ In Collection: Res2Net
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 79.20
+ Top 5 Accuracy: 94.36
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/res2net/res2net50-w26-s8_3rdparty_8xb32_in1k_20210927-f547a94b.pth
+ Converted From:
+ Weights: https://1drv.ms/u/s!AkxDDnOtroRPdTrAd_Afzc26Z7Q?e=slYqsR
+ Code: https://github.com/Res2Net/Res2Net-PretrainedModels/blob/master/res2net.py#L201
+ Config: configs/res2net/res2net50-w26-s8_8xb32_in1k.py
+ - Name: res2net101-w26-s4_3rdparty_8xb32_in1k
+ Metadata:
+ FLOPs: 8120000000
+ Parameters: 45210000
+ In Collection: Res2Net
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 79.19
+ Top 5 Accuracy: 94.44
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/res2net/res2net101-w26-s4_3rdparty_8xb32_in1k_20210927-870b6c36.pth
+ Converted From:
+ Weights: https://1drv.ms/u/s!AkxDDnOtroRPcJRgTLkahL0cFYw?e=nwbnic
+ Code: https://github.com/Res2Net/Res2Net-PretrainedModels/blob/master/res2net.py#L181
+ Config: configs/res2net/res2net101-w26-s4_8xb32_in1k.py
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/res2net/res2net101-w26-s4_8xb32_in1k.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/res2net/res2net101-w26-s4_8xb32_in1k.py
new file mode 100644
index 0000000000..7ebe9e94d6
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/res2net/res2net101-w26-s4_8xb32_in1k.py
@@ -0,0 +1,5 @@
+_base_ = [
+ '../_base_/models/res2net101-w26-s4.py',
+ '../_base_/datasets/imagenet_bs32_pil_resize.py',
+ '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py'
+]
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/res2net/res2net50-w14-s8_8xb32_in1k.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/res2net/res2net50-w14-s8_8xb32_in1k.py
new file mode 100644
index 0000000000..56cc02e3b8
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/res2net/res2net50-w14-s8_8xb32_in1k.py
@@ -0,0 +1,5 @@
+_base_ = [
+ '../_base_/models/res2net50-w14-s8.py',
+ '../_base_/datasets/imagenet_bs32_pil_resize.py',
+ '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py'
+]
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/res2net/res2net50-w26-s8_8xb32_in1k.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/res2net/res2net50-w26-s8_8xb32_in1k.py
new file mode 100644
index 0000000000..d7dcbeb916
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/res2net/res2net50-w26-s8_8xb32_in1k.py
@@ -0,0 +1,5 @@
+_base_ = [
+ '../_base_/models/res2net50-w26-s8.py',
+ '../_base_/datasets/imagenet_bs32_pil_resize.py',
+ '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py'
+]
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnest/README.md b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnest/README.md
new file mode 100644
index 0000000000..704d24a759
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnest/README.md
@@ -0,0 +1,17 @@
+# ResNeSt: Split-Attention Networks
+
+
+## Introduction
+
+
+
+```latex
+@misc{zhang2020resnest,
+ title={ResNeSt: Split-Attention Networks},
+ author={Hang Zhang and Chongruo Wu and Zhongyue Zhang and Yi Zhu and Haibin Lin and Zhi Zhang and Yue Sun and Tong He and Jonas Mueller and R. Manmatha and Mu Li and Alexander Smola},
+ year={2020},
+ eprint={2004.08955},
+ archivePrefix={arXiv},
+ primaryClass={cs.CV}
+}
+```
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnest/resnest101_b64x32_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnest/resnest101_b64x32_imagenet.py
new file mode 100644
index 0000000000..27b1882cf7
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnest/resnest101_b64x32_imagenet.py
@@ -0,0 +1,181 @@
+_base_ = ['../_base_/models/resnest101.py', '../_base_/default_runtime.py']
+
+# dataset settings
+dataset_type = 'ImageNet'
+img_lighting_cfg = dict(
+ eigval=[55.4625, 4.7940, 1.1475],
+ eigvec=[[-0.5675, 0.7192, 0.4009], [-0.5808, -0.0045, -0.8140],
+ [-0.5836, -0.6948, 0.4203]],
+ alphastd=0.1,
+ to_rgb=True)
+policies = [
+ dict(type='AutoContrast', prob=0.5),
+ dict(type='Equalize', prob=0.5),
+ dict(type='Invert', prob=0.5),
+ dict(
+ type='Rotate',
+ magnitude_key='angle',
+ magnitude_range=(0, 30),
+ pad_val=0,
+ prob=0.5,
+ random_negative_prob=0.5),
+ dict(
+ type='Posterize',
+ magnitude_key='bits',
+ magnitude_range=(0, 4),
+ prob=0.5),
+ dict(
+ type='Solarize',
+ magnitude_key='thr',
+ magnitude_range=(0, 256),
+ prob=0.5),
+ dict(
+ type='SolarizeAdd',
+ magnitude_key='magnitude',
+ magnitude_range=(0, 110),
+ thr=128,
+ prob=0.5),
+ dict(
+ type='ColorTransform',
+ magnitude_key='magnitude',
+ magnitude_range=(-0.9, 0.9),
+ prob=0.5,
+ random_negative_prob=0.),
+ dict(
+ type='Contrast',
+ magnitude_key='magnitude',
+ magnitude_range=(-0.9, 0.9),
+ prob=0.5,
+ random_negative_prob=0.),
+ dict(
+ type='Brightness',
+ magnitude_key='magnitude',
+ magnitude_range=(-0.9, 0.9),
+ prob=0.5,
+ random_negative_prob=0.),
+ dict(
+ type='Sharpness',
+ magnitude_key='magnitude',
+ magnitude_range=(-0.9, 0.9),
+ prob=0.5,
+ random_negative_prob=0.),
+ dict(
+ type='Shear',
+ magnitude_key='magnitude',
+ magnitude_range=(0, 0.3),
+ pad_val=0,
+ prob=0.5,
+ direction='horizontal',
+ random_negative_prob=0.5),
+ dict(
+ type='Shear',
+ magnitude_key='magnitude',
+ magnitude_range=(0, 0.3),
+ pad_val=0,
+ prob=0.5,
+ direction='vertical',
+ random_negative_prob=0.5),
+ dict(
+ type='Cutout',
+ magnitude_key='shape',
+ magnitude_range=(1, 41),
+ pad_val=0,
+ prob=0.5),
+ dict(
+ type='Translate',
+ magnitude_key='magnitude',
+ magnitude_range=(0, 0.3),
+ pad_val=0,
+ prob=0.5,
+ direction='horizontal',
+ random_negative_prob=0.5,
+ interpolation='bicubic'),
+ dict(
+ type='Translate',
+ magnitude_key='magnitude',
+ magnitude_range=(0, 0.3),
+ pad_val=0,
+ prob=0.5,
+ direction='vertical',
+ random_negative_prob=0.5,
+ interpolation='bicubic')
+]
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='RandAugment',
+ policies=policies,
+ num_policies=2,
+ magnitude_level=12),
+ dict(
+ type='RandomResizedCrop',
+ size=256,
+ efficientnet_style=True,
+ interpolation='bicubic',
+ backend='pillow'),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(type='ColorJitter', brightness=0.4, contrast=0.4, saturation=0.4),
+ dict(type='Lighting', **img_lighting_cfg),
+ dict(
+ type='Normalize',
+ mean=[123.675, 116.28, 103.53],
+ std=[58.395, 57.12, 57.375],
+ to_rgb=False),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='CenterCrop',
+ crop_size=256,
+ efficientnet_style=True,
+ interpolation='bicubic',
+ backend='pillow'),
+ dict(
+ type='Normalize',
+ mean=[123.675, 116.28, 103.53],
+ std=[58.395, 57.12, 57.375],
+ to_rgb=True),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+data = dict(
+ samples_per_gpu=64,
+ workers_per_gpu=2,
+ train=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/train',
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline),
+ test=dict(
+ # replace `data/val` with `data/test` for standard test
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline))
+evaluation = dict(interval=1, metric='accuracy')
+
+# optimizer
+optimizer = dict(
+ type='SGD',
+ lr=0.8,
+ momentum=0.9,
+ weight_decay=1e-4,
+ paramwise_cfg=dict(bias_decay_mult=0., norm_decay_mult=0.))
+optimizer_config = dict(grad_clip=None)
+
+# learning policy
+lr_config = dict(
+ policy='CosineAnnealing',
+ min_lr=0,
+ warmup='linear',
+ warmup_iters=5,
+ warmup_ratio=1e-6,
+ warmup_by_epoch=True)
+runner = dict(type='EpochBasedRunner', max_epochs=270)
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnest/resnest200_b32x64_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnest/resnest200_b32x64_imagenet.py
new file mode 100644
index 0000000000..3b166a2d62
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnest/resnest200_b32x64_imagenet.py
@@ -0,0 +1,181 @@
+_base_ = ['../_base_/models/resnest200.py', '../_base_/default_runtime.py']
+
+# dataset settings
+dataset_type = 'ImageNet'
+img_lighting_cfg = dict(
+ eigval=[55.4625, 4.7940, 1.1475],
+ eigvec=[[-0.5675, 0.7192, 0.4009], [-0.5808, -0.0045, -0.8140],
+ [-0.5836, -0.6948, 0.4203]],
+ alphastd=0.1,
+ to_rgb=True)
+policies = [
+ dict(type='AutoContrast', prob=0.5),
+ dict(type='Equalize', prob=0.5),
+ dict(type='Invert', prob=0.5),
+ dict(
+ type='Rotate',
+ magnitude_key='angle',
+ magnitude_range=(0, 30),
+ pad_val=0,
+ prob=0.5,
+ random_negative_prob=0.5),
+ dict(
+ type='Posterize',
+ magnitude_key='bits',
+ magnitude_range=(0, 4),
+ prob=0.5),
+ dict(
+ type='Solarize',
+ magnitude_key='thr',
+ magnitude_range=(0, 256),
+ prob=0.5),
+ dict(
+ type='SolarizeAdd',
+ magnitude_key='magnitude',
+ magnitude_range=(0, 110),
+ thr=128,
+ prob=0.5),
+ dict(
+ type='ColorTransform',
+ magnitude_key='magnitude',
+ magnitude_range=(-0.9, 0.9),
+ prob=0.5,
+ random_negative_prob=0.),
+ dict(
+ type='Contrast',
+ magnitude_key='magnitude',
+ magnitude_range=(-0.9, 0.9),
+ prob=0.5,
+ random_negative_prob=0.),
+ dict(
+ type='Brightness',
+ magnitude_key='magnitude',
+ magnitude_range=(-0.9, 0.9),
+ prob=0.5,
+ random_negative_prob=0.),
+ dict(
+ type='Sharpness',
+ magnitude_key='magnitude',
+ magnitude_range=(-0.9, 0.9),
+ prob=0.5,
+ random_negative_prob=0.),
+ dict(
+ type='Shear',
+ magnitude_key='magnitude',
+ magnitude_range=(0, 0.3),
+ pad_val=0,
+ prob=0.5,
+ direction='horizontal',
+ random_negative_prob=0.5),
+ dict(
+ type='Shear',
+ magnitude_key='magnitude',
+ magnitude_range=(0, 0.3),
+ pad_val=0,
+ prob=0.5,
+ direction='vertical',
+ random_negative_prob=0.5),
+ dict(
+ type='Cutout',
+ magnitude_key='shape',
+ magnitude_range=(1, 41),
+ pad_val=0,
+ prob=0.5),
+ dict(
+ type='Translate',
+ magnitude_key='magnitude',
+ magnitude_range=(0, 0.3),
+ pad_val=0,
+ prob=0.5,
+ direction='horizontal',
+ random_negative_prob=0.5,
+ interpolation='bicubic'),
+ dict(
+ type='Translate',
+ magnitude_key='magnitude',
+ magnitude_range=(0, 0.3),
+ pad_val=0,
+ prob=0.5,
+ direction='vertical',
+ random_negative_prob=0.5,
+ interpolation='bicubic')
+]
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='RandAugment',
+ policies=policies,
+ num_policies=2,
+ magnitude_level=12),
+ dict(
+ type='RandomResizedCrop',
+ size=320,
+ efficientnet_style=True,
+ interpolation='bicubic',
+ backend='pillow'),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(type='ColorJitter', brightness=0.4, contrast=0.4, saturation=0.4),
+ dict(type='Lighting', **img_lighting_cfg),
+ dict(
+ type='Normalize',
+ mean=[123.675, 116.28, 103.53],
+ std=[58.395, 57.12, 57.375],
+ to_rgb=False),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='CenterCrop',
+ crop_size=320,
+ efficientnet_style=True,
+ interpolation='bicubic',
+ backend='pillow'),
+ dict(
+ type='Normalize',
+ mean=[123.675, 116.28, 103.53],
+ std=[58.395, 57.12, 57.375],
+ to_rgb=True),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+data = dict(
+ samples_per_gpu=32,
+ workers_per_gpu=2,
+ train=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/train',
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline),
+ test=dict(
+ # replace `data/val` with `data/test` for standard test
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline))
+evaluation = dict(interval=1, metric='accuracy')
+
+# optimizer
+optimizer = dict(
+ type='SGD',
+ lr=0.8,
+ momentum=0.9,
+ weight_decay=1e-4,
+ paramwise_cfg=dict(bias_decay_mult=0., norm_decay_mult=0.))
+optimizer_config = dict(grad_clip=None)
+
+# learning policy
+lr_config = dict(
+ policy='CosineAnnealing',
+ min_lr=0,
+ warmup='linear',
+ warmup_iters=5,
+ warmup_ratio=1e-6,
+ warmup_by_epoch=True)
+runner = dict(type='EpochBasedRunner', max_epochs=270)
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnest/resnest269_b32x64_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnest/resnest269_b32x64_imagenet.py
new file mode 100644
index 0000000000..7a4db092a4
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnest/resnest269_b32x64_imagenet.py
@@ -0,0 +1,181 @@
+_base_ = ['../_base_/models/resnest269.py', '../_base_/default_runtime.py']
+
+# dataset settings
+dataset_type = 'ImageNet'
+img_lighting_cfg = dict(
+ eigval=[55.4625, 4.7940, 1.1475],
+ eigvec=[[-0.5675, 0.7192, 0.4009], [-0.5808, -0.0045, -0.8140],
+ [-0.5836, -0.6948, 0.4203]],
+ alphastd=0.1,
+ to_rgb=True)
+policies = [
+ dict(type='AutoContrast', prob=0.5),
+ dict(type='Equalize', prob=0.5),
+ dict(type='Invert', prob=0.5),
+ dict(
+ type='Rotate',
+ magnitude_key='angle',
+ magnitude_range=(0, 30),
+ pad_val=0,
+ prob=0.5,
+ random_negative_prob=0.5),
+ dict(
+ type='Posterize',
+ magnitude_key='bits',
+ magnitude_range=(0, 4),
+ prob=0.5),
+ dict(
+ type='Solarize',
+ magnitude_key='thr',
+ magnitude_range=(0, 256),
+ prob=0.5),
+ dict(
+ type='SolarizeAdd',
+ magnitude_key='magnitude',
+ magnitude_range=(0, 110),
+ thr=128,
+ prob=0.5),
+ dict(
+ type='ColorTransform',
+ magnitude_key='magnitude',
+ magnitude_range=(-0.9, 0.9),
+ prob=0.5,
+ random_negative_prob=0.),
+ dict(
+ type='Contrast',
+ magnitude_key='magnitude',
+ magnitude_range=(-0.9, 0.9),
+ prob=0.5,
+ random_negative_prob=0.),
+ dict(
+ type='Brightness',
+ magnitude_key='magnitude',
+ magnitude_range=(-0.9, 0.9),
+ prob=0.5,
+ random_negative_prob=0.),
+ dict(
+ type='Sharpness',
+ magnitude_key='magnitude',
+ magnitude_range=(-0.9, 0.9),
+ prob=0.5,
+ random_negative_prob=0.),
+ dict(
+ type='Shear',
+ magnitude_key='magnitude',
+ magnitude_range=(0, 0.3),
+ pad_val=0,
+ prob=0.5,
+ direction='horizontal',
+ random_negative_prob=0.5),
+ dict(
+ type='Shear',
+ magnitude_key='magnitude',
+ magnitude_range=(0, 0.3),
+ pad_val=0,
+ prob=0.5,
+ direction='vertical',
+ random_negative_prob=0.5),
+ dict(
+ type='Cutout',
+ magnitude_key='shape',
+ magnitude_range=(1, 41),
+ pad_val=0,
+ prob=0.5),
+ dict(
+ type='Translate',
+ magnitude_key='magnitude',
+ magnitude_range=(0, 0.3),
+ pad_val=0,
+ prob=0.5,
+ direction='horizontal',
+ random_negative_prob=0.5,
+ interpolation='bicubic'),
+ dict(
+ type='Translate',
+ magnitude_key='magnitude',
+ magnitude_range=(0, 0.3),
+ pad_val=0,
+ prob=0.5,
+ direction='vertical',
+ random_negative_prob=0.5,
+ interpolation='bicubic')
+]
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='RandAugment',
+ policies=policies,
+ num_policies=2,
+ magnitude_level=12),
+ dict(
+ type='RandomResizedCrop',
+ size=416,
+ efficientnet_style=True,
+ interpolation='bicubic',
+ backend='pillow'),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(type='ColorJitter', brightness=0.4, contrast=0.4, saturation=0.4),
+ dict(type='Lighting', **img_lighting_cfg),
+ dict(
+ type='Normalize',
+ mean=[123.675, 116.28, 103.53],
+ std=[58.395, 57.12, 57.375],
+ to_rgb=False),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='CenterCrop',
+ crop_size=416,
+ efficientnet_style=True,
+ interpolation='bicubic',
+ backend='pillow'),
+ dict(
+ type='Normalize',
+ mean=[123.675, 116.28, 103.53],
+ std=[58.395, 57.12, 57.375],
+ to_rgb=True),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+data = dict(
+ samples_per_gpu=32,
+ workers_per_gpu=2,
+ train=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/train',
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline),
+ test=dict(
+ # replace `data/val` with `data/test` for standard test
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline))
+evaluation = dict(interval=1, metric='accuracy')
+
+# optimizer
+optimizer = dict(
+ type='SGD',
+ lr=0.8,
+ momentum=0.9,
+ weight_decay=1e-4,
+ paramwise_cfg=dict(bias_decay_mult=0., norm_decay_mult=0.))
+optimizer_config = dict(grad_clip=None)
+
+# learning policy
+lr_config = dict(
+ policy='CosineAnnealing',
+ min_lr=0,
+ warmup='linear',
+ warmup_iters=5,
+ warmup_ratio=1e-6,
+ warmup_by_epoch=True)
+runner = dict(type='EpochBasedRunner', max_epochs=270)
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnest/resnest50_b64x32_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnest/resnest50_b64x32_imagenet.py
new file mode 100644
index 0000000000..812a3bee53
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnest/resnest50_b64x32_imagenet.py
@@ -0,0 +1,181 @@
+_base_ = ['../_base_/models/resnest50.py', '../_base_/default_runtime.py']
+
+# dataset settings
+dataset_type = 'ImageNet'
+img_lighting_cfg = dict(
+ eigval=[55.4625, 4.7940, 1.1475],
+ eigvec=[[-0.5675, 0.7192, 0.4009], [-0.5808, -0.0045, -0.8140],
+ [-0.5836, -0.6948, 0.4203]],
+ alphastd=0.1,
+ to_rgb=True)
+policies = [
+ dict(type='AutoContrast', prob=0.5),
+ dict(type='Equalize', prob=0.5),
+ dict(type='Invert', prob=0.5),
+ dict(
+ type='Rotate',
+ magnitude_key='angle',
+ magnitude_range=(0, 30),
+ pad_val=0,
+ prob=0.5,
+ random_negative_prob=0.5),
+ dict(
+ type='Posterize',
+ magnitude_key='bits',
+ magnitude_range=(0, 4),
+ prob=0.5),
+ dict(
+ type='Solarize',
+ magnitude_key='thr',
+ magnitude_range=(0, 256),
+ prob=0.5),
+ dict(
+ type='SolarizeAdd',
+ magnitude_key='magnitude',
+ magnitude_range=(0, 110),
+ thr=128,
+ prob=0.5),
+ dict(
+ type='ColorTransform',
+ magnitude_key='magnitude',
+ magnitude_range=(-0.9, 0.9),
+ prob=0.5,
+ random_negative_prob=0.),
+ dict(
+ type='Contrast',
+ magnitude_key='magnitude',
+ magnitude_range=(-0.9, 0.9),
+ prob=0.5,
+ random_negative_prob=0.),
+ dict(
+ type='Brightness',
+ magnitude_key='magnitude',
+ magnitude_range=(-0.9, 0.9),
+ prob=0.5,
+ random_negative_prob=0.),
+ dict(
+ type='Sharpness',
+ magnitude_key='magnitude',
+ magnitude_range=(-0.9, 0.9),
+ prob=0.5,
+ random_negative_prob=0.),
+ dict(
+ type='Shear',
+ magnitude_key='magnitude',
+ magnitude_range=(0, 0.3),
+ pad_val=0,
+ prob=0.5,
+ direction='horizontal',
+ random_negative_prob=0.5),
+ dict(
+ type='Shear',
+ magnitude_key='magnitude',
+ magnitude_range=(0, 0.3),
+ pad_val=0,
+ prob=0.5,
+ direction='vertical',
+ random_negative_prob=0.5),
+ dict(
+ type='Cutout',
+ magnitude_key='shape',
+ magnitude_range=(1, 41),
+ pad_val=0,
+ prob=0.5),
+ dict(
+ type='Translate',
+ magnitude_key='magnitude',
+ magnitude_range=(0, 0.3),
+ pad_val=0,
+ prob=0.5,
+ direction='horizontal',
+ random_negative_prob=0.5,
+ interpolation='bicubic'),
+ dict(
+ type='Translate',
+ magnitude_key='magnitude',
+ magnitude_range=(0, 0.3),
+ pad_val=0,
+ prob=0.5,
+ direction='vertical',
+ random_negative_prob=0.5,
+ interpolation='bicubic')
+]
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='RandAugment',
+ policies=policies,
+ num_policies=2,
+ magnitude_level=12),
+ dict(
+ type='RandomResizedCrop',
+ size=224,
+ efficientnet_style=True,
+ interpolation='bicubic',
+ backend='pillow'),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(type='ColorJitter', brightness=0.4, contrast=0.4, saturation=0.4),
+ dict(type='Lighting', **img_lighting_cfg),
+ dict(
+ type='Normalize',
+ mean=[123.675, 116.28, 103.53],
+ std=[58.395, 57.12, 57.375],
+ to_rgb=False),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='CenterCrop',
+ crop_size=224,
+ efficientnet_style=True,
+ interpolation='bicubic',
+ backend='pillow'),
+ dict(
+ type='Normalize',
+ mean=[123.675, 116.28, 103.53],
+ std=[58.395, 57.12, 57.375],
+ to_rgb=True),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+data = dict(
+ samples_per_gpu=64,
+ workers_per_gpu=2,
+ train=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/train',
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline),
+ test=dict(
+ # replace `data/val` with `data/test` for standard test
+ type=dataset_type,
+ data_prefix='data/imagenet/val',
+ ann_file='data/imagenet/meta/val.txt',
+ pipeline=test_pipeline))
+evaluation = dict(interval=1, metric='accuracy')
+
+# optimizer
+optimizer = dict(
+ type='SGD',
+ lr=0.8,
+ momentum=0.9,
+ weight_decay=1e-4,
+ paramwise_cfg=dict(bias_decay_mult=0., norm_decay_mult=0.))
+optimizer_config = dict(grad_clip=None)
+
+# learning policy
+lr_config = dict(
+ policy='CosineAnnealing',
+ min_lr=0,
+ warmup='linear',
+ warmup_iters=5,
+ warmup_ratio=1e-6,
+ warmup_by_epoch=True)
+runner = dict(type='EpochBasedRunner', max_epochs=270)
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/README.md b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/README.md
new file mode 100644
index 0000000000..8e30bcb46b
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/README.md
@@ -0,0 +1,47 @@
+# Deep Residual Learning for Image Recognition
+
+
+## Introduction
+
+
+
+```latex
+@inproceedings{he2016deep,
+ title={Deep residual learning for image recognition},
+ author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian},
+ booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition},
+ pages={770--778},
+ year={2016}
+}
+```
+
+## Results and models
+
+## Cifar10
+
+| Model | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download |
+|:---------------------:|:---------:|:--------:|:---------:|:---------:|:---------:|:--------:|
+| ResNet-18-b16x8 | 11.17 | 0.56 | 94.82 | | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet18_b16x8_cifar10.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet18_b16x8_cifar10_20210528-bd6371c8.pth) | [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet18_b16x8_cifar10_20210528-bd6371c8.log.json) |
+| ResNet-34-b16x8 | 21.28 | 1.16 | 95.34 | | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet34_b16x8_cifar10.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet34_b16x8_cifar10_20210528-a8aa36a6.pth) | [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet34_b16x8_cifar10_20210528-a8aa36a6.log.json) |
+| ResNet-50-b16x8 | 23.52 | 1.31 | 95.55 | | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet50_b16x8_cifar10.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_b16x8_cifar10_20210528-f54bfad9.pth) | [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_b16x8_cifar10_20210528-f54bfad9.log.json) |
+| ResNet-101-b16x8 | 42.51 | 2.52 | 95.58 | | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet101_b16x8_cifar10.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet101_b16x8_cifar10_20210528-2d29e936.pth) | [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet101_b16x8_cifar10_20210528-2d29e936.log.json) |
+| ResNet-152-b16x8 | 58.16 | 3.74 | 95.76 | | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet152_b16x8_cifar10.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet152_b16x8_cifar10_20210528-3e8e9178.pth) | [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet152_b16x8_cifar10_20210528-3e8e9178.log.json) |
+
+## Cifar100
+
+| Model | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download |
+|:---------------------:|:---------:|:--------:|:---------:|:---------:|:---------:|:--------:|
+| ResNet-50-b16x8 | 23.71 | 1.31 | 79.90 | 95.19 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet50_b16x8_cifar100.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_b16x8_cifar100_20210528-67b58a1b.pth) | [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_b16x8_cifar100_20210528-67b58a1b.log.json) |
+
+### ImageNet
+
+| Model | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download |
+|:---------------------:|:---------:|:--------:|:---------:|:---------:|:---------:|:--------:|
+| ResNet-18 | 11.69 | 1.82 | 69.90 | 89.43 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet18_b32x8_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet18_8xb32_in1k_20210831-fbbb1da6.pth) | [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet18_8xb32_in1k_20210831-fbbb1da6.log.json) |
+| ResNet-34 | 21.8 | 3.68 | 73.62 | 91.59 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet34_b32x8_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet34_8xb32_in1k_20210831-f257d4e6.pth) | [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet34_8xb32_in1k_20210831-f257d4e6.log.json) |
+| ResNet-50 | 25.56 | 4.12 | 76.55 | 93.06 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet50_b32x8_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb32_in1k_20210831-ea4938fc.pth) | [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb32_in1k_20210831-ea4938fc.log.json) |
+| ResNet-101 | 44.55 | 7.85 | 77.97 | 94.06 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet101_b32x8_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet101_8xb32_in1k_20210831-539c63f8.pth) | [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet101_8xb32_in1k_20210831-539c63f8.log.json) |
+| ResNet-152 | 60.19 | 11.58 | 78.48 | 94.13 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet152_b32x8_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet152_8xb32_in1k_20210901-4d7582fa.pth) | [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet152_8xb32_in1k_20210901-4d7582fa.log.json) |
+| ResNetV1D-50 | 25.58 | 4.36 | 77.54 | 93.57 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnetv1d50_b32x8_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1d50_b32x8_imagenet_20210531-db14775a.pth) | [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1d50_b32x8_imagenet_20210531-db14775a.log.json) |
+| ResNetV1D-101 | 44.57 | 8.09 | 78.93 | 94.48 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnetv1d101_b32x8_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1d101_b32x8_imagenet_20210531-6e13bcd3.pth) | [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1d101_b32x8_imagenet_20210531-6e13bcd3.log.json) |
+| ResNetV1D-152 | 60.21 | 11.82 | 79.41 | 94.70 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnetv1d152_b32x8_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1d152_b32x8_imagenet_20210531-278cf22a.pth) | [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1d152_b32x8_imagenet_20210531-278cf22a.log.json) |
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/metafile.yml b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/metafile.yml
new file mode 100644
index 0000000000..8353014dbd
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/metafile.yml
@@ -0,0 +1,217 @@
+Collections:
+ - Name: ResNet
+ Metadata:
+ Training Data: ImageNet-1k
+ Training Techniques:
+ - SGD with Momentum
+ - Weight Decay
+ Training Resources: 8x V100 GPUs
+ Epochs: 100
+ Batch Size: 256
+ Architecture:
+ - ResNet
+ Paper:
+ URL: https://openaccess.thecvf.com/content_cvpr_2016/html/He_Deep_Residual_Learning_CVPR_2016_paper.html
+ Title: "Deep Residual Learning for Image Recognition"
+ README: configs/resnet/README.md
+ Code:
+ URL: https://github.com/open-mmlab/mmclassification/blob/v0.15.0/mmcls/models/backbones/resnet.py#L383
+ Version: v0.15.0
+ - Name: ResNet-CIFAR
+ Metadata:
+ Training Data: CIFAR-10
+ Training Techniques:
+ - SGD with Momentum
+ - Weight Decay
+ Training Resources: 8x 1080 GPUs
+ Epochs: 200
+ Batch Size: 128
+ Architecture:
+ - ResNet
+ Paper:
+ URL: https://openaccess.thecvf.com/content_cvpr_2016/html/He_Deep_Residual_Learning_CVPR_2016_paper.html
+ Title: "Deep Residual Learning for Image Recognition"
+ README: configs/resnet/README.md
+ Code:
+ URL: https://github.com/open-mmlab/mmclassification/blob/v0.15.0/mmcls/models/backbones/resnet_cifar.py#L10
+ Version: v0.15.0
+
+Models:
+ - Name: resnet18_b16x8_cifar10
+ Metadata:
+ FLOPs: 560000000
+ Parameters: 11170000
+ In Collection: ResNet-CIFAR
+ Results:
+ - Dataset: CIFAR-10
+ Metrics:
+ Top 1 Accuracy: 94.82
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnet18_b16x8_cifar10_20210528-bd6371c8.pth
+ Config: configs/resnet/resnet18_b16x8_cifar10.py
+ - Name: resnet34_b16x8_cifar10
+ Metadata:
+ FLOPs: 1160000000
+ Parameters: 21280000
+ In Collection: ResNet-CIFAR
+ Results:
+ - Dataset: CIFAR-10
+ Metrics:
+ Top 1 Accuracy: 95.34
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnet34_b16x8_cifar10_20210528-a8aa36a6.pth
+ Config: configs/resnet/resnet34_b16x8_cifar10.py
+ - Name: resnet50_b16x8_cifar10
+ Metadata:
+ FLOPs: 1310000000
+ Parameters: 23520000
+ In Collection: ResNet-CIFAR
+ Results:
+ - Dataset: CIFAR-10
+ Metrics:
+ Top 1 Accuracy: 95.55
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_b16x8_cifar10_20210528-f54bfad9.pth
+ Config: configs/resnet/resnet50_b16x8_cifar10.py
+ - Name: resnet101_b16x8_cifar10
+ Metadata:
+ FLOPs: 2520000000
+ Parameters: 42510000
+ In Collection: ResNet-CIFAR
+ Results:
+ - Dataset: CIFAR-10
+ Metrics:
+ Top 1 Accuracy: 95.58
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnet101_b16x8_cifar10_20210528-2d29e936.pth
+ Config: configs/resnet/resnet101_b16x8_cifar10.py
+ - Name: resnet152_b16x8_cifar10
+ Metadata:
+ FLOPs: 3740000000
+ Parameters: 58160000
+ In Collection: ResNet-CIFAR
+ Results:
+ - Dataset: CIFAR-10
+ Metrics:
+ Top 1 Accuracy: 95.76
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnet152_b16x8_cifar10_20210528-3e8e9178.pth
+ Config: configs/resnet/resnet152_b16x8_cifar10.py
+ - Name: resnet50_b16x8_cifar100
+ Metadata:
+ FLOPs: 1310000000
+ Parameters: 23710000
+ Training Data: CIFAR-100
+ In Collection: ResNet-CIFAR
+ Results:
+ - Dataset: CIFAR-100
+ Metrics:
+ Top 1 Accuracy: 79.90
+ Top 5 Accuracy: 95.19
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_b16x8_cifar100_20210528-67b58a1b.pth
+ Config: configs/resnet/resnet50_b16x8_cifar100.py
+ - Name: resnet18_b32x8_imagenet
+ Metadata:
+ FLOPs: 1820000000
+ Parameters: 11690000
+ In Collection: ResNet
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 69.90
+ Top 5 Accuracy: 89.43
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnet18_8xb32_in1k_20210831-fbbb1da6.pth
+ Config: configs/resnet/resnet18_b32x8_imagenet.py
+ - Name: resnet34_b32x8_imagenet
+ Metadata:
+ FLOPs: 3680000000
+ Parameters: 2180000
+ In Collection: ResNet
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 73.62
+ Top 5 Accuracy: 91.59
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnet34_8xb32_in1k_20210831-f257d4e6.pth
+ Config: configs/resnet/resnet34_b32x8_imagenet.py
+ - Name: resnet50_b32x8_imagenet
+ Metadata:
+ FLOPs: 4120000000
+ Parameters: 25560000
+ In Collection: ResNet
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 76.55
+ Top 5 Accuracy: 93.06
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb32_in1k_20210831-ea4938fc.pth
+ Config: configs/resnet/resnet50_b32x8_imagenet.py
+ - Name: resnet101_b32x8_imagenet
+ Metadata:
+ FLOPs: 7850000000
+ Parameters: 44550000
+ In Collection: ResNet
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 77.97
+ Top 5 Accuracy: 94.06
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnet101_8xb32_in1k_20210831-539c63f8.pth
+ Config: configs/resnet/resnet101_b32x8_imagenet.py
+ - Name: resnet152_b32x8_imagenet
+ Metadata:
+ FLOPs: 11580000000
+ Parameters: 60190000
+ In Collection: ResNet
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 78.48
+ Top 5 Accuracy: 94.13
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnet152_8xb32_in1k_20210901-4d7582fa.pth
+ Config: configs/resnet/resnet152_b32x8_imagenet.py
+ - Name: resnetv1d50_b32x8_imagenet
+ Metadata:
+ FLOPs: 4360000000
+ Parameters: 25580000
+ In Collection: ResNet
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 77.54
+ Top 5 Accuracy: 93.57
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1d50_b32x8_imagenet_20210531-db14775a.pth
+ Config: configs/resnet/resnetv1d50_b32x8_imagenet.py
+ - Name: resnetv1d101_b32x8_imagenet
+ Metadata:
+ FLOPs: 8090000000
+ Parameters: 44570000
+ In Collection: ResNet
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 78.93
+ Top 5 Accuracy: 94.48
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1d101_b32x8_imagenet_20210531-6e13bcd3.pth
+ Config: configs/resnet/resnetv1d101_b32x8_imagenet.py
+ - Name: resnetv1d152_b32x8_imagenet
+ Metadata:
+ FLOPs: 11820000000
+ Parameters: 60210000
+ In Collection: ResNet
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 79.41
+ Top 5 Accuracy: 94.70
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1d152_b32x8_imagenet_20210531-278cf22a.pth
+ Config: configs/resnet/resnetv1d152_b32x8_imagenet.py
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet101_b16x8_cifar10.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet101_b16x8_cifar10.py
new file mode 100644
index 0000000000..166a1740b0
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet101_b16x8_cifar10.py
@@ -0,0 +1,5 @@
+_base_ = [
+ '../_base_/models/resnet101_cifar.py',
+ '../_base_/datasets/cifar10_bs16.py',
+ '../_base_/schedules/cifar10_bs128.py', '../_base_/default_runtime.py'
+]
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet101_b32x8_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet101_b32x8_imagenet.py
new file mode 100644
index 0000000000..388d2cd918
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet101_b32x8_imagenet.py
@@ -0,0 +1,4 @@
+_base_ = [
+ '../_base_/models/resnet101.py', '../_base_/datasets/imagenet_bs32.py',
+ '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py'
+]
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet152_b16x8_cifar10.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet152_b16x8_cifar10.py
new file mode 100644
index 0000000000..3f307b6aa8
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet152_b16x8_cifar10.py
@@ -0,0 +1,5 @@
+_base_ = [
+ '../_base_/models/resnet152_cifar.py',
+ '../_base_/datasets/cifar10_bs16.py',
+ '../_base_/schedules/cifar10_bs128.py', '../_base_/default_runtime.py'
+]
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet152_b32x8_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet152_b32x8_imagenet.py
new file mode 100644
index 0000000000..cc9dc2cee4
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet152_b32x8_imagenet.py
@@ -0,0 +1,4 @@
+_base_ = [
+ '../_base_/models/resnet152.py', '../_base_/datasets/imagenet_bs32.py',
+ '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py'
+]
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet18_b16x8_cifar10.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet18_b16x8_cifar10.py
new file mode 100644
index 0000000000..c7afa397b7
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet18_b16x8_cifar10.py
@@ -0,0 +1,4 @@
+_base_ = [
+ '../_base_/models/resnet18_cifar.py', '../_base_/datasets/cifar10_bs16.py',
+ '../_base_/schedules/cifar10_bs128.py', '../_base_/default_runtime.py'
+]
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet18_b32x8_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet18_b32x8_imagenet.py
new file mode 100644
index 0000000000..ac452ff756
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet18_b32x8_imagenet.py
@@ -0,0 +1,4 @@
+_base_ = [
+ '../_base_/models/resnet18.py', '../_base_/datasets/imagenet_bs32.py',
+ '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py'
+]
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet34_b16x8_cifar10.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet34_b16x8_cifar10.py
new file mode 100644
index 0000000000..7f5cd517d5
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet34_b16x8_cifar10.py
@@ -0,0 +1,4 @@
+_base_ = [
+ '../_base_/models/resnet34_cifar.py', '../_base_/datasets/cifar10_bs16.py',
+ '../_base_/schedules/cifar10_bs128.py', '../_base_/default_runtime.py'
+]
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet34_b32x8_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet34_b32x8_imagenet.py
new file mode 100644
index 0000000000..7749261c80
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet34_b32x8_imagenet.py
@@ -0,0 +1,4 @@
+_base_ = [
+ '../_base_/models/resnet34.py', '../_base_/datasets/imagenet_bs32.py',
+ '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py'
+]
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_8xb128_coslr-90e_in21k.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_8xb128_coslr-90e_in21k.py
new file mode 100644
index 0000000000..8cc79211e9
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_8xb128_coslr-90e_in21k.py
@@ -0,0 +1,11 @@
+_base_ = [
+ '../_base_/models/resnet50.py', '../_base_/datasets/imagenet21k_bs128.py',
+ '../_base_/schedules/imagenet_bs1024_coslr.py',
+ '../_base_/default_runtime.py'
+]
+
+# model settings
+model = dict(head=dict(num_classes=21843))
+
+# runtime settings
+runner = dict(type='EpochBasedRunner', max_epochs=90)
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b16x8_cifar10.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b16x8_cifar10.py
new file mode 100644
index 0000000000..669e5de27e
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b16x8_cifar10.py
@@ -0,0 +1,4 @@
+_base_ = [
+ '../_base_/models/resnet50_cifar.py', '../_base_/datasets/cifar10_bs16.py',
+ '../_base_/schedules/cifar10_bs128.py', '../_base_/default_runtime.py'
+]
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b16x8_cifar100.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b16x8_cifar100.py
new file mode 100644
index 0000000000..39bd90f794
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b16x8_cifar100.py
@@ -0,0 +1,10 @@
+_base_ = [
+ '../_base_/models/resnet50_cifar.py',
+ '../_base_/datasets/cifar100_bs16.py',
+ '../_base_/schedules/cifar10_bs128.py', '../_base_/default_runtime.py'
+]
+
+model = dict(head=dict(num_classes=100))
+
+optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0005)
+lr_config = dict(policy='step', step=[60, 120, 160], gamma=0.2)
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b16x8_cifar10_mixup.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b16x8_cifar10_mixup.py
new file mode 100644
index 0000000000..2420ebfeb0
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b16x8_cifar10_mixup.py
@@ -0,0 +1,5 @@
+_base_ = [
+ '../_base_/models/resnet50_cifar_mixup.py',
+ '../_base_/datasets/cifar10_bs16.py',
+ '../_base_/schedules/cifar10_bs128.py', '../_base_/default_runtime.py'
+]
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b32x8_coslr_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b32x8_coslr_imagenet.py
new file mode 100644
index 0000000000..938a114b79
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b32x8_coslr_imagenet.py
@@ -0,0 +1,5 @@
+_base_ = [
+ '../_base_/models/resnet50.py', '../_base_/datasets/imagenet_bs32.py',
+ '../_base_/schedules/imagenet_bs256_coslr.py',
+ '../_base_/default_runtime.py'
+]
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b32x8_cutmix_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b32x8_cutmix_imagenet.py
new file mode 100644
index 0000000000..2f8d0ca9f3
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b32x8_cutmix_imagenet.py
@@ -0,0 +1,5 @@
+_base_ = [
+ '../_base_/models/resnet50_cutmix.py',
+ '../_base_/datasets/imagenet_bs32.py',
+ '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py'
+]
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b32x8_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b32x8_imagenet.py
new file mode 100644
index 0000000000..c32f333b67
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b32x8_imagenet.py
@@ -0,0 +1,4 @@
+_base_ = [
+ '../_base_/models/resnet50.py', '../_base_/datasets/imagenet_bs32.py',
+ '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py'
+]
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b32x8_label_smooth_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b32x8_label_smooth_imagenet.py
new file mode 100644
index 0000000000..1c1aa5a2c4
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b32x8_label_smooth_imagenet.py
@@ -0,0 +1,5 @@
+_base_ = [
+ '../_base_/models/resnet50_label_smooth.py',
+ '../_base_/datasets/imagenet_bs32.py',
+ '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py'
+]
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b32x8_mixup_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b32x8_mixup_imagenet.py
new file mode 100644
index 0000000000..2a153d0e18
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b32x8_mixup_imagenet.py
@@ -0,0 +1,5 @@
+_base_ = [
+ '../_base_/models/resnet50_mixup.py',
+ '../_base_/datasets/imagenet_bs32.py',
+ '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py'
+]
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b64x32_warmup_coslr_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b64x32_warmup_coslr_imagenet.py
new file mode 100644
index 0000000000..c26245ef53
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b64x32_warmup_coslr_imagenet.py
@@ -0,0 +1,5 @@
+_base_ = [
+ '../_base_/models/resnet50.py', '../_base_/datasets/imagenet_bs64.py',
+ '../_base_/schedules/imagenet_bs2048_coslr.py',
+ '../_base_/default_runtime.py'
+]
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b64x32_warmup_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b64x32_warmup_imagenet.py
new file mode 100644
index 0000000000..34d5288b9d
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b64x32_warmup_imagenet.py
@@ -0,0 +1,4 @@
+_base_ = [
+ '../_base_/models/resnet50.py', '../_base_/datasets/imagenet_bs64.py',
+ '../_base_/schedules/imagenet_bs2048.py', '../_base_/default_runtime.py'
+]
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b64x32_warmup_label_smooth_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b64x32_warmup_label_smooth_imagenet.py
new file mode 100644
index 0000000000..23c9defdde
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b64x32_warmup_label_smooth_imagenet.py
@@ -0,0 +1,12 @@
+_base_ = ['./resnet50_b64x32_warmup_imagenet.py']
+model = dict(
+ head=dict(
+ type='LinearClsHead',
+ num_classes=1000,
+ in_channels=2048,
+ loss=dict(
+ type='LabelSmoothLoss',
+ loss_weight=1.0,
+ label_smooth_val=0.1,
+ num_classes=1000),
+ ))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnetv1d101_b32x8_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnetv1d101_b32x8_imagenet.py
new file mode 100644
index 0000000000..b16ca863db
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnetv1d101_b32x8_imagenet.py
@@ -0,0 +1,5 @@
+_base_ = [
+ '../_base_/models/resnetv1d101.py',
+ '../_base_/datasets/imagenet_bs32_pil_resize.py',
+ '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py'
+]
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnetv1d152_b32x8_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnetv1d152_b32x8_imagenet.py
new file mode 100644
index 0000000000..76926ddbb6
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnetv1d152_b32x8_imagenet.py
@@ -0,0 +1,5 @@
+_base_ = [
+ '../_base_/models/resnetv1d152.py',
+ '../_base_/datasets/imagenet_bs32_pil_resize.py',
+ '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py'
+]
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnetv1d50_b32x8_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnetv1d50_b32x8_imagenet.py
new file mode 100644
index 0000000000..208bde470a
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnetv1d50_b32x8_imagenet.py
@@ -0,0 +1,5 @@
+_base_ = [
+ '../_base_/models/resnetv1d50.py',
+ '../_base_/datasets/imagenet_bs32_pil_resize.py',
+ '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py'
+]
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnext/README.md b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnext/README.md
new file mode 100644
index 0000000000..8a4786aac9
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnext/README.md
@@ -0,0 +1,27 @@
+# Aggregated Residual Transformations for Deep Neural Networks
+
+
+## Introduction
+
+
+
+```latex
+@inproceedings{xie2017aggregated,
+ title={Aggregated residual transformations for deep neural networks},
+ author={Xie, Saining and Girshick, Ross and Doll{\'a}r, Piotr and Tu, Zhuowen and He, Kaiming},
+ booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition},
+ pages={1492--1500},
+ year={2017}
+}
+```
+
+## Results and models
+
+### ImageNet
+
+| Model | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download |
+|:---------------------:|:---------:|:--------:|:---------:|:---------:|:---------:|:--------:|
+| ResNeXt-32x4d-50 | 25.03 | 4.27 | 77.90 | 93.66 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnext/resnext50_32x4d_b32x8_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnext/resnext50_32x4d_b32x8_imagenet_20210429-56066e27.pth) | [log](https://download.openmmlab.com/mmclassification/v0/resnext/resnext50_32x4d_b32x8_imagenet_20210429-56066e27.log.json) |
+| ResNeXt-32x4d-101 | 44.18 | 8.03 | 78.61 | 94.17 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnext/resnext101_32x4d_b32x8_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnext/resnext101_32x4d_b32x8_imagenet_20210506-e0fa3dd5.pth) | [log](https://download.openmmlab.com/mmclassification/v0/resnext/resnext101_32x4d_b32x8_imagenet_20210506-e0fa3dd5.log.json) |
+| ResNeXt-32x8d-101 | 88.79 | 16.5 | 79.27 | 94.58 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnext/resnext101_32x8d_b32x8_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnext/resnext101_32x8d_b32x8_imagenet_20210506-23a247d5.pth) | [log](https://download.openmmlab.com/mmclassification/v0/resnext/resnext101_32x8d_b32x8_imagenet_20210506-23a247d5.log.json) |
+| ResNeXt-32x4d-152 | 59.95 | 11.8 | 78.88 | 94.33 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnext/resnext152_32x4d_b32x8_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnext/resnext152_32x4d_b32x8_imagenet_20210524-927787be.pth) | [log](https://download.openmmlab.com/mmclassification/v0/resnext/resnext152_32x4d_b32x8_imagenet_20210524-927787be.log.json) |
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnext/metafile.yml b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnext/metafile.yml
new file mode 100644
index 0000000000..841bad4ca1
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnext/metafile.yml
@@ -0,0 +1,73 @@
+Collections:
+ - Name: ResNeXt
+ Metadata:
+ Training Data: ImageNet-1k
+ Training Techniques:
+ - SGD with Momentum
+ - Weight Decay
+ Training Resources: 8x V100 GPUs
+ Epochs: 100
+ Batch Size: 256
+ Architecture:
+ - ResNeXt
+ Paper:
+ URL: https://openaccess.thecvf.com/content_cvpr_2017/html/Xie_Aggregated_Residual_Transformations_CVPR_2017_paper.html
+ Title: "Aggregated Residual Transformations for Deep Neural Networks"
+ README: configs/resnext/README.md
+ Code:
+ URL: https://github.com/open-mmlab/mmclassification/blob/v0.15.0/mmcls/models/backbones/resnext.py#L90
+ Version: v0.15.0
+
+Models:
+ - Name: resnext50_32x4d_b32x8_imagenet
+ Metadata:
+ FLOPs: 4270000000
+ Parameters: 25030000
+ In Collection: ResNeXt
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 77.90
+ Top 5 Accuracy: 93.66
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/resnext/resnext50_32x4d_b32x8_imagenet_20210429-56066e27.pth
+ Config: configs/resnext/resnext50_32x4d_b32x8_imagenet.py
+ - Name: resnext101_32x4d_b32x8_imagenet
+ Metadata:
+ FLOPs: 8030000000
+ Parameters: 44180000
+ In Collection: ResNeXt
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 78.61
+ Top 5 Accuracy: 94.17
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/resnext/resnext101_32x4d_b32x8_imagenet_20210506-e0fa3dd5.pth
+ Config: configs/resnext/resnext101_32x4d_b32x8_imagenet.py
+ - Name: resnext101_32x8d_b32x8_imagenet
+ Metadata:
+ FLOPs: 16500000000
+ Parameters: 88790000
+ In Collection: ResNeXt
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 79.27
+ Top 5 Accuracy: 94.58
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/resnext/resnext101_32x8d_b32x8_imagenet_20210506-23a247d5.pth
+ Config: configs/resnext/resnext101_32x8d_b32x8_imagenet.py
+ - Name: resnext152_32x4d_b32x8_imagenet
+ Metadata:
+ FLOPs: 11800000000
+ Parameters: 59950000
+ In Collection: ResNeXt
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 78.88
+ Top 5 Accuracy: 94.33
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/resnext/resnext152_32x4d_b32x8_imagenet_20210524-927787be.pth
+ Config: configs/resnext/resnext152_32x4d_b32x8_imagenet.py
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnext/resnext101_32x4d_b32x8_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnext/resnext101_32x4d_b32x8_imagenet.py
new file mode 100644
index 0000000000..970aa60f35
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnext/resnext101_32x4d_b32x8_imagenet.py
@@ -0,0 +1,5 @@
+_base_ = [
+ '../_base_/models/resnext101_32x4d.py',
+ '../_base_/datasets/imagenet_bs32_pil_resize.py',
+ '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py'
+]
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnext/resnext101_32x8d_b32x8_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnext/resnext101_32x8d_b32x8_imagenet.py
new file mode 100644
index 0000000000..315d05fd57
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnext/resnext101_32x8d_b32x8_imagenet.py
@@ -0,0 +1,5 @@
+_base_ = [
+ '../_base_/models/resnext101_32x8d.py',
+ '../_base_/datasets/imagenet_bs32_pil_resize.py',
+ '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py'
+]
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnext/resnext152_32x4d_b32x8_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnext/resnext152_32x4d_b32x8_imagenet.py
new file mode 100644
index 0000000000..9c137313cb
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnext/resnext152_32x4d_b32x8_imagenet.py
@@ -0,0 +1,5 @@
+_base_ = [
+ '../_base_/models/resnext152_32x4d.py',
+ '../_base_/datasets/imagenet_bs32_pil_resize.py',
+ '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py'
+]
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnext/resnext50_32x4d_b32x8_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnext/resnext50_32x4d_b32x8_imagenet.py
new file mode 100644
index 0000000000..bd9c9fcf4e
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnext/resnext50_32x4d_b32x8_imagenet.py
@@ -0,0 +1,5 @@
+_base_ = [
+ '../_base_/models/resnext50_32x4d.py',
+ '../_base_/datasets/imagenet_bs32_pil_resize.py',
+ '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py'
+]
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/seresnet/README.md b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/seresnet/README.md
new file mode 100644
index 0000000000..1241e3fc6e
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/seresnet/README.md
@@ -0,0 +1,25 @@
+# Squeeze-and-Excitation Networks
+
+
+## Introduction
+
+
+
+```latex
+@inproceedings{hu2018squeeze,
+ title={Squeeze-and-excitation networks},
+ author={Hu, Jie and Shen, Li and Sun, Gang},
+ booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition},
+ pages={7132--7141},
+ year={2018}
+}
+```
+
+## Results and models
+
+### ImageNet
+
+| Model | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download |
+|:---------------------:|:---------:|:--------:|:---------:|:---------:|:---------:|:--------:|
+| SE-ResNet-50 | 28.09 | 4.13 | 77.74 | 93.84 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/seresnet/seresnet50_b32x8_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/se-resnet/se-resnet50_batch256_imagenet_20200804-ae206104.pth) | [log](https://download.openmmlab.com/mmclassification/v0/se-resnet/se-resnet50_batch256_imagenet_20200708-657b3c36.log.json) |
+| SE-ResNet-101 | 49.33 | 7.86 | 78.26 | 94.07 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/seresnet/seresnet101_b32x8_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/se-resnet/se-resnet101_batch256_imagenet_20200804-ba5b51d4.pth) | [log](https://download.openmmlab.com/mmclassification/v0/se-resnet/se-resnet101_batch256_imagenet_20200708-038a4d04.log.json) |
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/seresnet/metafile.yml b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/seresnet/metafile.yml
new file mode 100644
index 0000000000..419425dc79
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/seresnet/metafile.yml
@@ -0,0 +1,47 @@
+Collections:
+ - Name: SEResNet
+ Metadata:
+ Training Data: ImageNet-1k
+ Training Techniques:
+ - SGD with Momentum
+ - Weight Decay
+ Training Resources: 8x V100 GPUs
+ Epochs: 140
+ Batch Size: 256
+ Architecture:
+ - ResNet
+ Paper:
+ URL: https://openaccess.thecvf.com/content_cvpr_2018/html/Hu_Squeeze-and-Excitation_Networks_CVPR_2018_paper.html
+ Title: "Squeeze-and-Excitation Networks"
+ README: configs/seresnet/README.md
+ Code:
+ URL: https://github.com/open-mmlab/mmclassification/blob/v0.15.0/mmcls/models/backbones/seresnet.py#L58
+ Version: v0.15.0
+
+Models:
+ - Name: seresnet50_b32x8_imagenet
+ Metadata:
+ FLOPs: 4130000000
+ Parameters: 28090000
+ In Collection: SEResNet
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 77.74
+ Top 5 Accuracy: 93.84
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/se-resnet/se-resnet50_batch256_imagenet_20200804-ae206104.pth
+ Config: configs/seresnet/seresnet50_b32x8_imagenet.py
+ - Name: seresnet101_b32x8_imagenet
+ Metadata:
+ FLOPs: 7860000000
+ Parameters: 49330000
+ In Collection: SEResNet
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 78.26
+ Top 5 Accuracy: 94.07
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/se-resnet/se-resnet101_batch256_imagenet_20200804-ba5b51d4.pth
+ Config: configs/seresnet/seresnet101_b32x8_imagenet.py
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/seresnet/seresnet101_b32x8_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/seresnet/seresnet101_b32x8_imagenet.py
new file mode 100644
index 0000000000..8be39e7a32
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/seresnet/seresnet101_b32x8_imagenet.py
@@ -0,0 +1,5 @@
+_base_ = [
+ '../_base_/models/seresnet101.py',
+ '../_base_/datasets/imagenet_bs32_pil_resize.py',
+ '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py'
+]
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/seresnet/seresnet50_b32x8_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/seresnet/seresnet50_b32x8_imagenet.py
new file mode 100644
index 0000000000..19082bd0dd
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/seresnet/seresnet50_b32x8_imagenet.py
@@ -0,0 +1,6 @@
+_base_ = [
+ '../_base_/models/seresnet50.py',
+ '../_base_/datasets/imagenet_bs32_pil_resize.py',
+ '../_base_/schedules/imagenet_bs256_140e.py',
+ '../_base_/default_runtime.py'
+]
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/seresnext/README.md b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/seresnext/README.md
new file mode 100644
index 0000000000..393cc5183d
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/seresnext/README.md
@@ -0,0 +1,16 @@
+# Squeeze-and-Excitation Networks
+
+
+## Introduction
+
+
+
+```latex
+@inproceedings{hu2018squeeze,
+ title={Squeeze-and-excitation networks},
+ author={Hu, Jie and Shen, Li and Sun, Gang},
+ booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition},
+ pages={7132--7141},
+ year={2018}
+}
+```
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/seresnext/seresnext101_32x4d_b32x8_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/seresnext/seresnext101_32x4d_b32x8_imagenet.py
new file mode 100644
index 0000000000..01778305ca
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/seresnext/seresnext101_32x4d_b32x8_imagenet.py
@@ -0,0 +1,5 @@
+_base_ = [
+ '../_base_/models/seresnext101_32x4d.py',
+ '../_base_/datasets/imagenet_bs32_pil_resize.py',
+ '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py'
+]
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/seresnext/seresnext50_32x4d_b32x8_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/seresnext/seresnext50_32x4d_b32x8_imagenet.py
new file mode 100644
index 0000000000..4d593e45b8
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/seresnext/seresnext50_32x4d_b32x8_imagenet.py
@@ -0,0 +1,5 @@
+_base_ = [
+ '../_base_/models/seresnext50_32x4d.py',
+ '../_base_/datasets/imagenet_bs32_pil_resize.py',
+ '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py'
+]
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/shufflenet_v1/README.md b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/shufflenet_v1/README.md
new file mode 100644
index 0000000000..b18934565b
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/shufflenet_v1/README.md
@@ -0,0 +1,24 @@
+# ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices
+
+
+## Introduction
+
+
+
+```latex
+@inproceedings{zhang2018shufflenet,
+ title={Shufflenet: An extremely efficient convolutional neural network for mobile devices},
+ author={Zhang, Xiangyu and Zhou, Xinyu and Lin, Mengxiao and Sun, Jian},
+ booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition},
+ pages={6848--6856},
+ year={2018}
+}
+```
+
+## Results and models
+
+### ImageNet
+
+| Model | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download |
+|:---------------------:|:---------:|:--------:|:---------:|:---------:|:---------:|:--------:|
+| ShuffleNetV1 1.0x (group=3) | 1.87 | 0.146 | 68.13 | 87.81 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/shufflenet_v1/shufflenet_v1_1x_b64x16_linearlr_bn_nowd_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/shufflenet_v1/shufflenet_v1_batch1024_imagenet_20200804-5d6cec73.pth) | [log](https://download.openmmlab.com/mmclassification/v0/shufflenet_v1/shufflenet_v1_batch1024_imagenet_20200804-5d6cec73.log.json) |
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/shufflenet_v1/metafile.yml b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/shufflenet_v1/metafile.yml
new file mode 100644
index 0000000000..04e7e46484
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/shufflenet_v1/metafile.yml
@@ -0,0 +1,35 @@
+Collections:
+ - Name: Shufflenet V1
+ Metadata:
+ Training Data: ImageNet-1k
+ Training Techniques:
+ - SGD with Momentum
+ - Weight Decay
+ - No BN decay
+ Training Resources: 8x 1080 GPUs
+ Epochs: 300
+ Batch Size: 1024
+ Architecture:
+ - Shufflenet V1
+ Paper:
+ URL: https://openaccess.thecvf.com/content_cvpr_2018/html/Zhang_ShuffleNet_An_Extremely_CVPR_2018_paper.html
+ Title: "ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices"
+ README: configs/shufflenet_v1/README.md
+ Code:
+ URL: https://github.com/open-mmlab/mmclassification/blob/v0.15.0/mmcls/models/backbones/shufflenet_v1.py#L152
+ Version: v0.15.0
+
+Models:
+ - Name: shufflenet_v1_1x_b64x16_linearlr_bn_nowd_imagenet
+ Metadata:
+ FLOPs: 146000000
+ Parameters: 1870000
+ In Collection: Shufflenet V1
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 68.13
+ Top 5 Accuracy: 87.81
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/shufflenet_v1/shufflenet_v1_batch1024_imagenet_20200804-5d6cec73.pth
+ Config: configs/shufflenet_v1/shufflenet_v1_1x_b64x16_linearlr_bn_nowd_imagenet.py
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/shufflenet_v1/shufflenet_v1_1x_b64x16_linearlr_bn_nowd_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/shufflenet_v1/shufflenet_v1_1x_b64x16_linearlr_bn_nowd_imagenet.py
new file mode 100644
index 0000000000..58e45f1ba4
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/shufflenet_v1/shufflenet_v1_1x_b64x16_linearlr_bn_nowd_imagenet.py
@@ -0,0 +1,6 @@
+_base_ = [
+ '../_base_/models/shufflenet_v1_1x.py',
+ '../_base_/datasets/imagenet_bs64_pil_resize.py',
+ '../_base_/schedules/imagenet_bs1024_linearlr_bn_nowd.py',
+ '../_base_/default_runtime.py'
+]
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/shufflenet_v2/README.md b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/shufflenet_v2/README.md
new file mode 100644
index 0000000000..3502425819
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/shufflenet_v2/README.md
@@ -0,0 +1,24 @@
+# Shufflenet v2: Practical guidelines for efficient cnn architecture design
+
+
+## Introduction
+
+
+
+```latex
+@inproceedings{ma2018shufflenet,
+ title={Shufflenet v2: Practical guidelines for efficient cnn architecture design},
+ author={Ma, Ningning and Zhang, Xiangyu and Zheng, Hai-Tao and Sun, Jian},
+ booktitle={Proceedings of the European conference on computer vision (ECCV)},
+ pages={116--131},
+ year={2018}
+}
+```
+
+## Results and models
+
+### ImageNet
+
+| Model | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download |
+|:---------------------:|:---------:|:--------:|:---------:|:---------:|:---------:|:--------:|
+| ShuffleNetV2 1.0x | 2.28 | 0.149 | 69.55 | 88.92 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/shufflenet_v2/shufflenet_v2_1x_b64x16_linearlr_bn_nowd_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/shufflenet_v2/shufflenet_v2_batch1024_imagenet_20200812-5bf4721e.pth) | [log](https://download.openmmlab.com/mmclassification/v0/shufflenet_v2/shufflenet_v2_batch1024_imagenet_20200804-8860eec9.log.json) |
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/shufflenet_v2/metafile.yml b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/shufflenet_v2/metafile.yml
new file mode 100644
index 0000000000..a1aa95daaa
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/shufflenet_v2/metafile.yml
@@ -0,0 +1,35 @@
+Collections:
+ - Name: Shufflenet V2
+ Metadata:
+ Training Data: ImageNet-1k
+ Training Techniques:
+ - SGD with Momentum
+ - Weight Decay
+ - No BN decay
+ Training Resources: 8x 1080 GPUs
+ Epochs: 300
+ Batch Size: 1024
+ Architecture:
+ - Shufflenet V2
+ Paper:
+ URL: https://openaccess.thecvf.com/content_ECCV_2018/papers/Ningning_Light-weight_CNN_Architecture_ECCV_2018_paper.pdf
+ Title: "ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design"
+ README: configs/shufflenet_v2/README.md
+ Code:
+ URL: https://github.com/open-mmlab/mmclassification/blob/v0.15.0/mmcls/models/backbones/shufflenet_v2.py#L134
+ Version: v0.15.0
+
+Models:
+ - Name: shufflenet_v2_1x_b64x16_linearlr_bn_nowd_imagenet
+ Metadata:
+ FLOPs: 149000000
+ Parameters: 2280000
+ In Collection: Shufflenet V2
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 69.55
+ Top 5 Accuracy: 88.92
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/shufflenet_v2/shufflenet_v2_batch1024_imagenet_20200812-5bf4721e.pth
+ Config: configs/shufflenet_v2/shufflenet_v2_1x_b64x16_linearlr_bn_nowd_imagenet.py
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/shufflenet_v2/shufflenet_v2_1x_b64x16_linearlr_bn_nowd_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/shufflenet_v2/shufflenet_v2_1x_b64x16_linearlr_bn_nowd_imagenet.py
new file mode 100644
index 0000000000..a106ab8686
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/shufflenet_v2/shufflenet_v2_1x_b64x16_linearlr_bn_nowd_imagenet.py
@@ -0,0 +1,6 @@
+_base_ = [
+ '../_base_/models/shufflenet_v2_1x.py',
+ '../_base_/datasets/imagenet_bs64_pil_resize.py',
+ '../_base_/schedules/imagenet_bs1024_linearlr_bn_nowd.py',
+ '../_base_/default_runtime.py'
+]
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/swin_transformer/README.md b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/swin_transformer/README.md
new file mode 100644
index 0000000000..b1fade80dd
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/swin_transformer/README.md
@@ -0,0 +1,42 @@
+# Swin Transformer: Hierarchical Vision Transformer using Shifted Windows
+
+
+## Introduction
+
+[ALGORITHM]
+
+```latex
+@article{liu2021Swin,
+ title={Swin Transformer: Hierarchical Vision Transformer using Shifted Windows},
+ author={Liu, Ze and Lin, Yutong and Cao, Yue and Hu, Han and Wei, Yixuan and Zhang, Zheng and Lin, Stephen and Guo, Baining},
+ journal={arXiv preprint arXiv:2103.14030},
+ year={2021}
+}
+```
+
+## Pretrain model
+
+The pre-trained modles are converted from [model zoo of Swin Transformer](https://github.com/microsoft/Swin-Transformer#main-results-on-imagenet-with-pretrained-models).
+
+### ImageNet 1k
+
+| Model | Pretrain | resolution | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Download |
+|:---------:|:------------:|:-----------:|:---------:|:---------:|:---------:|:---------:|:--------:|
+| Swin-T | ImageNet-1k | 224x224 | 28.29 | 4.36 | 81.18 | 95.52 | [model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_tiny_patch4_window7_224-160bb0a5.pth)|
+| Swin-S | ImageNet-1k | 224x224 | 49.61 | 8.52 | 83.21 | 96.25 | [model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_small_patch4_window7_224-cc7a01c9.pth)|
+| Swin-B | ImageNet-1k | 224x224 | 87.77 | 15.14 | 83.42 | 96.44 | [model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_base_patch4_window7_224-4670dd19.pth)|
+| Swin-B | ImageNet-1k | 384x384 | 87.90 | 44.49 | 84.49 | 96.95 | [model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_base_patch4_window12_384-02c598a4.pth)|
+| Swin-B | ImageNet-22k | 224x224 | 87.77 | 15.14 | 85.16 | 97.50 | [model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_base_patch4_window7_224_22kto1k-f967f799.pth)|
+| Swin-B | ImageNet-22k | 384x384 | 87.90 | 44.49 | 86.44 | 98.05 | [model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_base_patch4_window12_384_22kto1k-d59b0d1d.pth)|
+| Swin-L | ImageNet-22k | 224x224 | 196.53 | 34.04 | 86.24 | 97.88 | [model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_large_patch4_window7_224_22kto1k-5f0996db.pth)|
+| Swin-L | ImageNet-22k | 384x384 | 196.74 | 100.04 | 87.25 | 98.25 | [model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_large_patch4_window12_384_22kto1k-0a40944b.pth)|
+
+
+## Results and models
+
+### ImageNet
+| Model | Pretrain | resolution | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download |
+|:---------:|:------------:|:-----------:|:---------:|:---------:|:---------:|:---------:|:----------:|:--------:|
+| Swin-T | ImageNet-1k | 224x224 | 28.29 | 4.36 | 81.18 | 95.61 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/swin_transformer/swin_tiny_224_b16x64_300e_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin_tiny_224_b16x64_300e_imagenet_20210616_090925-66df6be6.pth) | [log](https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin_tiny_224_b16x64_300e_imagenet_20210616_090925.log.json)|
+| Swin-S | ImageNet-1k | 224x224 | 49.61 | 8.52 | 83.02 | 96.29 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/swin_transformer/swin_small_224_b16x64_300e_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin_small_224_b16x64_300e_imagenet_20210615_110219-7f9d988b.pth) | [log](https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin_small_224_b16x64_300e_imagenet_20210615_110219.log.json)|
+| Swin-B | ImageNet-1k | 224x224 | 87.77 | 15.14 | 83.36 | 96.44 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/swin_transformer/swin_base_224_b16x64_300e_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin_base_224_b16x64_300e_imagenet_20210616_190742-93230b0d.pth) | [log](https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin_base_224_b16x64_300e_imagenet_20210616_190742.log.json)|
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/swin_transformer/metafile.yml b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/swin_transformer/metafile.yml
new file mode 100644
index 0000000000..46ea185da2
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/swin_transformer/metafile.yml
@@ -0,0 +1,188 @@
+Collections:
+ - Name: Swin-Transformer
+ Metadata:
+ Training Data: ImageNet-1k
+ Training Techniques:
+ - AdamW
+ - Weight Decay
+ Training Resources: 16x V100 GPUs
+ Epochs: 300
+ Batch Size: 1024
+ Architecture:
+ - Shift Window Multihead Self Attention
+ Paper:
+ URL: https://arxiv.org/pdf/2103.14030.pdf
+ Title: "Swin Transformer: Hierarchical Vision Transformer using Shifted Windows"
+ README: configs/swin_transformer/README.md
+ Code:
+ URL: https://github.com/open-mmlab/mmclassification/blob/v0.15.0/mmcls/models/backbones/swin_transformer.py#L176
+ Version: v0.15.0
+
+Models:
+ - Name: swin-tiny_64xb16_in1k
+ Metadata:
+ FLOPs: 4360000000
+ Parameters: 28290000
+ In Collection: Swin-Transformer
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 81.18
+ Top 5 Accuracy: 95.61
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin_tiny_224_b16x64_300e_imagenet_20210616_090925-66df6be6.pth
+ Config: configs/swin_transformer/swin_tiny_224_b16x64_300e_imagenet.py
+ - Name: swin-small_64xb16_in1k
+ Metadata:
+ FLOPs: 8520000000
+ Parameters: 49610000
+ In Collection: Swin-Transformer
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 83.02
+ Top 5 Accuracy: 96.29
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin_small_224_b16x64_300e_imagenet_20210615_110219-7f9d988b.pth
+ Config: configs/swin_transformer/swin_small_224_b16x64_300e_imagenet.py
+ - Name: swin-base_64xb16_in1k
+ Metadata:
+ FLOPs: 15140000000
+ Parameters: 87770000
+ In Collection: Swin-Transformer
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 83.36
+ Top 5 Accuracy: 96.44
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin_base_224_b16x64_300e_imagenet_20210616_190742-93230b0d.pth
+ Config: configs/swin_transformer/swin_base_224_b16x64_300e_imagenet.py
+ - Name: swin-tiny_3rdparty_in1k
+ Metadata:
+ FLOPs: 4360000000
+ Parameters: 28290000
+ In Collection: Swin-Transformer
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 81.18
+ Top 5 Accuracy: 95.52
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_tiny_patch4_window7_224-160bb0a5.pth
+ Converted From:
+ Weights: https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth
+ Code: https://github.com/microsoft/Swin-Transformer/blob/777f6c66604bb5579086c4447efe3620344d95a9/models/swin_transformer.py#L458
+ Config: configs/swin_transformer/swin_tiny_224_b16x64_300e_imagenet.py
+ - Name: swin-small_3rdparty_in1k
+ Metadata:
+ FLOPs: 8520000000
+ Parameters: 49610000
+ In Collection: Swin-Transformer
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 83.21
+ Top 5 Accuracy: 96.25
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_small_patch4_window7_224-cc7a01c9.pth
+ Converted From:
+ Weights: https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth
+ Code: https://github.com/microsoft/Swin-Transformer/blob/777f6c66604bb5579086c4447efe3620344d95a9/models/swin_transformer.py#L458
+ Config: configs/swin_transformer/swin_small_224_b16x64_300e_imagenet.py
+ - Name: swin-base_3rdparty_in1k
+ Metadata:
+ FLOPs: 15140000000
+ Parameters: 87770000
+ In Collection: Swin-Transformer
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 83.42
+ Top 5 Accuracy: 96.44
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_base_patch4_window7_224-4670dd19.pth
+ Converted From:
+ Weights: https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224.pth
+ Code: https://github.com/microsoft/Swin-Transformer/blob/777f6c66604bb5579086c4447efe3620344d95a9/models/swin_transformer.py#L458
+ Config: configs/swin_transformer/swin_base_224_b16x64_300e_imagenet.py
+ - Name: swin-base_3rdparty_in1k-384
+ Metadata:
+ FLOPs: 44490000000
+ Parameters: 87900000
+ In Collection: Swin-Transformer
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 84.49
+ Top 5 Accuracy: 96.95
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_base_patch4_window12_384-02c598a4.pth
+ Converted From:
+ Weights: https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384.pth
+ Code: https://github.com/microsoft/Swin-Transformer/blob/777f6c66604bb5579086c4447efe3620344d95a9/models/swin_transformer.py#L458
+ Config: configs/swin_transformer/swin_base_384_evalonly_imagenet.py
+ - Name: swin-base_in21k-pre-3rdparty_in1k
+ Metadata:
+ FLOPs: 15140000000
+ Parameters: 87770000
+ In Collection: Swin-Transformer
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 85.16
+ Top 5 Accuracy: 97.50
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_base_patch4_window7_224_22kto1k-f967f799.pth
+ Converted From:
+ Weights: https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224_22kto1k.pth
+ Code: https://github.com/microsoft/Swin-Transformer/blob/777f6c66604bb5579086c4447efe3620344d95a9/models/swin_transformer.py#L458
+ Config: configs/swin_transformer/swin_base_224_b16x64_300e_imagenet.py
+ - Name: swin-base_in21k-pre-3rdparty_in1k-384
+ Metadata:
+ FLOPs: 44490000000
+ Parameters: 87900000
+ In Collection: Swin-Transformer
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 86.44
+ Top 5 Accuracy: 98.05
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_base_patch4_window12_384_22kto1k-d59b0d1d.pth
+ Converted From:
+ Weights: https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384_22kto1k.pth
+ Code: https://github.com/microsoft/Swin-Transformer/blob/777f6c66604bb5579086c4447efe3620344d95a9/models/swin_transformer.py#L458
+ Config: configs/swin_transformer/swin_base_384_evalonly_imagenet.py
+ - Name: swin-large_in21k-pre-3rdparty_in1k
+ Metadata:
+ FLOPs: 34040000000
+ Parameters: 196530000
+ In Collection: Swin-Transformer
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 86.24
+ Top 5 Accuracy: 97.88
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_large_patch4_window7_224_22kto1k-5f0996db.pth
+ Converted From:
+ Weights: https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window7_224_22kto1k.pth
+ Code: https://github.com/microsoft/Swin-Transformer/blob/777f6c66604bb5579086c4447efe3620344d95a9/models/swin_transformer.py#L458
+ Config: configs/swin_transformer/swin_large_224_evalonly_imagenet.py
+ - Name: swin-large_in21k-pre-3rdparty_in1k-384
+ Metadata:
+ FLOPs: 100040000000
+ Parameters: 196740000
+ In Collection: Swin-Transformer
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 87.25
+ Top 5 Accuracy: 98.25
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_large_patch4_window12_384_22kto1k-0a40944b.pth
+ Converted From:
+ Weights: https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22kto1k.pth
+ Code: https://github.com/microsoft/Swin-Transformer/blob/777f6c66604bb5579086c4447efe3620344d95a9/models/swin_transformer.py#L458
+ Config: configs/swin_transformer/swin_large_384_evalonly_imagenet.py
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/swin_transformer/swin_base_224_b16x64_300e_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/swin_transformer/swin_base_224_b16x64_300e_imagenet.py
new file mode 100644
index 0000000000..2a4548af0b
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/swin_transformer/swin_base_224_b16x64_300e_imagenet.py
@@ -0,0 +1,6 @@
+_base_ = [
+ '../_base_/models/swin_transformer/base_224.py',
+ '../_base_/datasets/imagenet_bs64_swin_224.py',
+ '../_base_/schedules/imagenet_bs1024_adamw_swin.py',
+ '../_base_/default_runtime.py'
+]
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/swin_transformer/swin_base_384_evalonly_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/swin_transformer/swin_base_384_evalonly_imagenet.py
new file mode 100644
index 0000000000..711a0d6d21
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/swin_transformer/swin_base_384_evalonly_imagenet.py
@@ -0,0 +1,7 @@
+# Only for evaluation
+_base_ = [
+ '../_base_/models/swin_transformer/base_384.py',
+ '../_base_/datasets/imagenet_bs64_swin_384.py',
+ '../_base_/schedules/imagenet_bs1024_adamw_swin.py',
+ '../_base_/default_runtime.py'
+]
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/swin_transformer/swin_large_224_evalonly_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/swin_transformer/swin_large_224_evalonly_imagenet.py
new file mode 100644
index 0000000000..4e875c59f3
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/swin_transformer/swin_large_224_evalonly_imagenet.py
@@ -0,0 +1,7 @@
+# Only for evaluation
+_base_ = [
+ '../_base_/models/swin_transformer/large_224.py',
+ '../_base_/datasets/imagenet_bs64_swin_224.py',
+ '../_base_/schedules/imagenet_bs1024_adamw_swin.py',
+ '../_base_/default_runtime.py'
+]
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/swin_transformer/swin_large_384_evalonly_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/swin_transformer/swin_large_384_evalonly_imagenet.py
new file mode 100644
index 0000000000..a7f0ad2762
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/swin_transformer/swin_large_384_evalonly_imagenet.py
@@ -0,0 +1,7 @@
+# Only for evaluation
+_base_ = [
+ '../_base_/models/swin_transformer/large_384.py',
+ '../_base_/datasets/imagenet_bs64_swin_384.py',
+ '../_base_/schedules/imagenet_bs1024_adamw_swin.py',
+ '../_base_/default_runtime.py'
+]
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/swin_transformer/swin_small_224_b16x64_300e_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/swin_transformer/swin_small_224_b16x64_300e_imagenet.py
new file mode 100644
index 0000000000..aa1fa21b05
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/swin_transformer/swin_small_224_b16x64_300e_imagenet.py
@@ -0,0 +1,6 @@
+_base_ = [
+ '../_base_/models/swin_transformer/small_224.py',
+ '../_base_/datasets/imagenet_bs64_swin_224.py',
+ '../_base_/schedules/imagenet_bs1024_adamw_swin.py',
+ '../_base_/default_runtime.py'
+]
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/swin_transformer/swin_tiny_224_b16x64_300e_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/swin_transformer/swin_tiny_224_b16x64_300e_imagenet.py
new file mode 100644
index 0000000000..e1ed022a1b
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/swin_transformer/swin_tiny_224_b16x64_300e_imagenet.py
@@ -0,0 +1,6 @@
+_base_ = [
+ '../_base_/models/swin_transformer/tiny_224.py',
+ '../_base_/datasets/imagenet_bs64_swin_224.py',
+ '../_base_/schedules/imagenet_bs1024_adamw_swin.py',
+ '../_base_/default_runtime.py'
+]
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/t2t_vit/README.md b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/t2t_vit/README.md
new file mode 100644
index 0000000000..64768463a4
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/t2t_vit/README.md
@@ -0,0 +1,33 @@
+# Tokens-to-Token ViT: Training Vision Transformers from Scratch on ImageNet
+
+
+## Introduction
+
+
+
+```latex
+@article{yuan2021tokens,
+ title={Tokens-to-token vit: Training vision transformers from scratch on imagenet},
+ author={Yuan, Li and Chen, Yunpeng and Wang, Tao and Yu, Weihao and Shi, Yujun and Tay, Francis EH and Feng, Jiashi and Yan, Shuicheng},
+ journal={arXiv preprint arXiv:2101.11986},
+ year={2021}
+}
+```
+
+## Pretrain model
+
+The pre-trained modles are converted from [official repo](https://github.com/yitu-opensource/T2T-ViT/tree/main#2-t2t-vit-models).
+
+### ImageNet-1k
+
+| Model | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download |
+|:--------------:|:---------:|:--------:|:---------:|:---------:|:------:|:--------:|
+| T2T-ViT_t-14\* | 21.47 | 4.34 | 81.69 | 95.85 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/t2t_vit/t2t-vit-t-14_8xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/t2t-vit/t2t-vit-t-14_3rdparty_8xb64_in1k_20210928-b7c09b62.pth) | [log]()|
+| T2T-ViT_t-19\* | 39.08 | 7.80 | 82.43 | 96.08 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/t2t_vit/t2t-vit-t-19_8xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/t2t-vit/t2t-vit-t-19_3rdparty_8xb64_in1k_20210928-7f1478d5.pth) | [log]()|
+| T2T-ViT_t-24\* | 64.00 | 12.69 | 82.55 | 96.06 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/t2t_vit/t2t-vit-t-24_8xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/t2t-vit/t2t-vit-t-24_3rdparty_8xb64_in1k_20210928-fe95a61b.pth) | [log]()|
+
+*Models with \* are converted from other repos.*
+
+## Results and models
+
+Waiting for adding.
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/t2t_vit/metafile.yml b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/t2t_vit/metafile.yml
new file mode 100644
index 0000000000..0abcfe0617
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/t2t_vit/metafile.yml
@@ -0,0 +1,64 @@
+Collections:
+ - Name: Tokens-to-Token ViT
+ Metadata:
+ Training Data: ImageNet-1k
+ Architecture:
+ - Layer Normalization
+ - Scaled Dot-Product Attention
+ - Attention Dropout
+ - Dropout
+ - Tokens to Token
+ Paper:
+ URL: https://arxiv.org/abs/2101.11986
+ Title: "Tokens-to-Token ViT: Training Vision Transformers from Scratch on ImageNet"
+ README: configs/t2t_vit/README.md
+
+Models:
+ - Name: t2t-vit-t-14_3rdparty_8xb64_in1k
+ Metadata:
+ FLOPs: 4340000000
+ Parameters: 21470000
+ In Collection: Tokens-to-Token ViT
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 81.69
+ Top 5 Accuracy: 95.85
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/t2t-vit/t2t-vit-t-14_3rdparty_8xb64_in1k_20210928-b7c09b62.pth
+ Converted From:
+ Weights: https://github.com/yitu-opensource/T2T-ViT/releases/download/main/81.7_T2T_ViTt_14.pth.tar
+ Code: https://github.com/yitu-opensource/T2T-ViT/blob/main/models/t2t_vit.py#L243
+ Config: configs/t2t_vit/t2t-vit-t-14_8xb64_in1k.py
+ - Name: t2t-vit-t-19_3rdparty_8xb64_in1k
+ Metadata:
+ FLOPs: 7800000000
+ Parameters: 39080000
+ In Collection: Tokens-to-Token ViT
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 82.43
+ Top 5 Accuracy: 96.08
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/t2t-vit/t2t-vit-t-19_3rdparty_8xb64_in1k_20210928-7f1478d5.pth
+ Converted From:
+ Weights: https://github.com/yitu-opensource/T2T-ViT/releases/download/main/82.4_T2T_ViTt_19.pth.tar
+ Code: https://github.com/yitu-opensource/T2T-ViT/blob/main/models/t2t_vit.py#L254
+ Config: configs/t2t_vit/t2t-vit-t-19_8xb64_in1k.py
+ - Name: t2t-vit-t-24_3rdparty_8xb64_in1k
+ Metadata:
+ FLOPs: 12690000000
+ Parameters: 64000000
+ In Collection: Tokens-to-Token ViT
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 82.55
+ Top 5 Accuracy: 96.06
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/t2t-vit/t2t-vit-t-24_3rdparty_8xb64_in1k_20210928-fe95a61b.pth
+ Converted From:
+ Weights: https://github.com/yitu-opensource/T2T-ViT/releases/download/main/82.6_T2T_ViTt_24.pth.tar
+ Code: https://github.com/yitu-opensource/T2T-ViT/blob/main/models/t2t_vit.py#L265
+ Config: configs/t2t_vit/t2t-vit-t-24_8xb64_in1k.py
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/t2t_vit/t2t-vit-t-14_8xb64_in1k.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/t2t_vit/t2t-vit-t-14_8xb64_in1k.py
new file mode 100644
index 0000000000..126d564ed2
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/t2t_vit/t2t-vit-t-14_8xb64_in1k.py
@@ -0,0 +1,31 @@
+_base_ = [
+ '../_base_/models/t2t-vit-t-14.py',
+ '../_base_/datasets/imagenet_bs64_t2t_224.py',
+ '../_base_/default_runtime.py',
+]
+
+# optimizer
+paramwise_cfg = dict(
+ bias_decay_mult=0.0,
+ custom_keys={'.backbone.cls_token': dict(decay_mult=0.0)},
+)
+optimizer = dict(
+ type='AdamW',
+ lr=5e-4,
+ weight_decay=0.05,
+ paramwise_cfg=paramwise_cfg,
+)
+optimizer_config = dict(grad_clip=None)
+
+# learning policy
+# FIXME: lr in the first 300 epochs conforms to the CosineAnnealing and
+# the lr in the last 10 epoch equals to min_lr
+lr_config = dict(
+ policy='CosineAnnealing',
+ min_lr=1e-5,
+ by_epoch=True,
+ warmup_by_epoch=True,
+ warmup='linear',
+ warmup_iters=10,
+ warmup_ratio=1e-6)
+runner = dict(type='EpochBasedRunner', max_epochs=310)
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/t2t_vit/t2t-vit-t-19_8xb64_in1k.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/t2t_vit/t2t-vit-t-19_8xb64_in1k.py
new file mode 100644
index 0000000000..afd05a76a4
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/t2t_vit/t2t-vit-t-19_8xb64_in1k.py
@@ -0,0 +1,31 @@
+_base_ = [
+ '../_base_/models/t2t-vit-t-19.py',
+ '../_base_/datasets/imagenet_bs64_t2t_224.py',
+ '../_base_/default_runtime.py',
+]
+
+# optimizer
+paramwise_cfg = dict(
+ bias_decay_mult=0.0,
+ custom_keys={'.backbone.cls_token': dict(decay_mult=0.0)},
+)
+optimizer = dict(
+ type='AdamW',
+ lr=5e-4,
+ weight_decay=0.065,
+ paramwise_cfg=paramwise_cfg,
+)
+optimizer_config = dict(grad_clip=None)
+
+# learning policy
+# FIXME: lr in the first 300 epochs conforms to the CosineAnnealing and
+# the lr in the last 10 epoch equals to min_lr
+lr_config = dict(
+ policy='CosineAnnealing',
+ min_lr=1e-5,
+ by_epoch=True,
+ warmup_by_epoch=True,
+ warmup='linear',
+ warmup_iters=10,
+ warmup_ratio=1e-6)
+runner = dict(type='EpochBasedRunner', max_epochs=310)
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/t2t_vit/t2t-vit-t-24_8xb64_in1k.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/t2t_vit/t2t-vit-t-24_8xb64_in1k.py
new file mode 100644
index 0000000000..9f856f3e59
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/t2t_vit/t2t-vit-t-24_8xb64_in1k.py
@@ -0,0 +1,31 @@
+_base_ = [
+ '../_base_/models/t2t-vit-t-24.py',
+ '../_base_/datasets/imagenet_bs64_t2t_224.py',
+ '../_base_/default_runtime.py',
+]
+
+# optimizer
+paramwise_cfg = dict(
+ bias_decay_mult=0.0,
+ custom_keys={'.backbone.cls_token': dict(decay_mult=0.0)},
+)
+optimizer = dict(
+ type='AdamW',
+ lr=5e-4,
+ weight_decay=0.065,
+ paramwise_cfg=paramwise_cfg,
+)
+optimizer_config = dict(grad_clip=None)
+
+# learning policy
+# FIXME: lr in the first 300 epochs conforms to the CosineAnnealing and
+# the lr in the last 10 epoch equals to min_lr
+lr_config = dict(
+ policy='CosineAnnealing',
+ min_lr=1e-5,
+ by_epoch=True,
+ warmup_by_epoch=True,
+ warmup='linear',
+ warmup_iters=10,
+ warmup_ratio=1e-6)
+runner = dict(type='EpochBasedRunner', max_epochs=310)
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/tnt/README.md b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/tnt/README.md
new file mode 100644
index 0000000000..5e4bd38c94
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/tnt/README.md
@@ -0,0 +1,32 @@
+# Transformer in Transformer
+
+## Introduction
+
+
+
+```latex
+@misc{han2021transformer,
+ title={Transformer in Transformer},
+ author={Kai Han and An Xiao and Enhua Wu and Jianyuan Guo and Chunjing Xu and Yunhe Wang},
+ year={2021},
+ eprint={2103.00112},
+ archivePrefix={arXiv},
+ primaryClass={cs.CV}
+}
+```
+
+## Pretrain model
+
+The pre-trained modles are converted from [timm](https://github.com/rwightman/pytorch-image-models/).
+
+### ImageNet
+
+| Model | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Download |
+|:---------------------:|:---------:|:--------:|:---------:|:---------:|:--------:|
+| Transformer in Transformer small\* | 23.76 | 3.36 | 81.52 | 95.73 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/tnt/tnt_s_patch16_224_evalonly_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/tnt/tnt-small-p16_3rdparty_in1k_20210903-c56ee7df.pth) | [log]()|
+
+*Models with \* are converted from other repos.*
+
+## Results and models
+
+Waiting for adding.
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/tnt/metafile.yml b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/tnt/metafile.yml
new file mode 100644
index 0000000000..ff8558b3c6
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/tnt/metafile.yml
@@ -0,0 +1,29 @@
+Collections:
+ - Name: Transformer in Transformer
+ Metadata:
+ Training Data: ImageNet-1k
+ Paper:
+ URL: https://arxiv.org/abs/2103.00112
+ Title: "Transformer in Transformer"
+ README: configs/tnt/README.md
+ Code:
+ URL: https://github.com/open-mmlab/mmclassification/blob/v0.15.0/mmcls/models/backbones/tnt.py#L203
+ Version: v0.15.0
+
+Models:
+ - Name: tnt-small-p16_3rdparty_in1k
+ Metadata:
+ FLOPs: 3360000000
+ Parameters: 23760000
+ In Collection: Transformer in Transformer
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 81.52
+ Top 5 Accuracy: 95.73
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/tnt/tnt-small-p16_3rdparty_in1k_20210903-c56ee7df.pth
+ Config: configs/tnt/tnt_s_patch16_224_evalonly_imagenet.py
+ Converted From:
+ Weights: https://github.com/contrastive/pytorch-image-models/releases/download/TNT/tnt_s_patch16_224.pth.tar
+ Code: https://github.com/contrastive/pytorch-image-models/blob/809271b0f3e5d9be4e11c0c5cec1dbba8b5e2c60/timm/models/tnt.py#L144
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/tnt/tnt_s_patch16_224_evalonly_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/tnt/tnt_s_patch16_224_evalonly_imagenet.py
new file mode 100644
index 0000000000..e09820bf5d
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/tnt/tnt_s_patch16_224_evalonly_imagenet.py
@@ -0,0 +1,39 @@
+# accuracy_top-1 : 81.52 accuracy_top-5 : 95.73
+_base_ = [
+ '../_base_/models/tnt_s_patch16_224.py',
+ '../_base_/datasets/imagenet_bs32_pil_resize.py',
+ '../_base_/default_runtime.py'
+]
+
+img_norm_cfg = dict(
+ mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_rgb=True)
+
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='Resize',
+ size=(248, -1),
+ interpolation='bicubic',
+ backend='pillow'),
+ dict(type='CenterCrop', crop_size=224),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+
+dataset_type = 'ImageNet'
+data = dict(
+ samples_per_gpu=32, workers_per_gpu=4, test=dict(pipeline=test_pipeline))
+
+# optimizer
+optimizer = dict(type='AdamW', lr=1e-3, weight_decay=0.05)
+optimizer_config = dict(grad_clip=None)
+
+lr_config = dict(
+ policy='CosineAnnealing',
+ min_lr=0,
+ warmup_by_epoch=True,
+ warmup='linear',
+ warmup_iters=5,
+ warmup_ratio=1e-3)
+runner = dict(type='EpochBasedRunner', max_epochs=300)
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/README.md b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/README.md
new file mode 100644
index 0000000000..a1aca53dfc
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/README.md
@@ -0,0 +1,31 @@
+# Very Deep Convolutional Networks for Large-Scale Image Recognition
+
+
+## Introduction
+
+
+
+```latex
+@article{simonyan2014very,
+ title={Very deep convolutional networks for large-scale image recognition},
+ author={Simonyan, Karen and Zisserman, Andrew},
+ journal={arXiv preprint arXiv:1409.1556},
+ year={2014}
+}
+
+```
+
+## Results and models
+
+### ImageNet
+
+| Model | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download |
+|:---------------------:|:---------:|:--------:|:---------:|:---------:|:---------:|:--------:|
+| VGG-11 | 132.86 | 7.63 | 68.75 | 88.87 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/vgg/vgg11_b32x8_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/vgg/vgg11_batch256_imagenet_20210208-4271cd6c.pth) | [log](https://download.openmmlab.com/mmclassification/v0/vgg/vgg11_batch256_imagenet_20210208-4271cd6c.log.json) |
+| VGG-13 | 133.05 | 11.34 | 70.02 | 89.46 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/vgg/vgg13_b32x8_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/vgg/vgg13_batch256_imagenet_20210208-4d1d6080.pth) | [log](https://download.openmmlab.com/mmclassification/v0/vgg/vgg13_batch256_imagenet_20210208-4d1d6080.log.json) |
+| VGG-16 | 138.36 | 15.5 | 71.62 | 90.49 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/vgg/vgg16_b32x8_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/vgg/vgg16_batch256_imagenet_20210208-db26f1a5.pth) | [log](https://download.openmmlab.com/mmclassification/v0/vgg/vgg16_batch256_imagenet_20210208-db26f1a5.log.json) |
+| VGG-19 | 143.67 | 19.67 | 72.41 | 90.80 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/vgg/vgg19_b32x8_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/vgg/vgg19_batch256_imagenet_20210208-e6920e4a.pth) | [log](https://download.openmmlab.com/mmclassification/v0/vgg/vgg19_batch256_imagenet_20210208-e6920e4a.log.json)|
+| VGG-11-BN | 132.87 | 7.64 | 70.67 | 90.16 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/vgg/vgg11bn_b32x8_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/vgg/vgg11_bn_batch256_imagenet_20210207-f244902c.pth) | [log](https://download.openmmlab.com/mmclassification/v0/vgg/vgg11_bn_batch256_imagenet_20210207-f244902c.log.json) |
+| VGG-13-BN | 133.05 | 11.36 | 72.12 | 90.66 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/vgg/vgg13bn_b32x8_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/vgg/vgg13_bn_batch256_imagenet_20210207-1a8b7864.pth) | [log](https://download.openmmlab.com/mmclassification/v0/vgg/vgg13_bn_batch256_imagenet_20210207-1a8b7864.log.json) |
+| VGG-16-BN | 138.37 | 15.53 | 73.74 | 91.66 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/vgg/vgg16_b32x8_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/vgg/vgg16_bn_batch256_imagenet_20210208-7e55cd29.pth) | [log](https://download.openmmlab.com/mmclassification/v0/vgg/vgg16_bn_batch256_imagenet_20210208-7e55cd29.log.json) |
+| VGG-19-BN | 143.68 | 19.7 | 74.68 | 92.27 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/vgg/vgg19bn_b32x8_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/vgg/vgg19_bn_batch256_imagenet_20210208-da620c4f.pth) | [log](https://download.openmmlab.com/mmclassification/v0/vgg/vgg19_bn_batch256_imagenet_20210208-da620c4f.log.json)|
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/metafile.yml b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/metafile.yml
new file mode 100644
index 0000000000..0c94481200
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/metafile.yml
@@ -0,0 +1,125 @@
+Collections:
+ - Name: VGG
+ Metadata:
+ Training Data: ImageNet-1k
+ Training Techniques:
+ - SGD with Momentum
+ - Weight Decay
+ Training Resources: 8x Xp GPUs
+ Epochs: 100
+ Batch Size: 256
+ Architecture:
+ - VGG
+ Paper:
+ URL: https://arxiv.org/abs/1409.1556
+ Title: "Very Deep Convolutional Networks for Large-Scale Image"
+ README: configs/vgg/README.md
+ Code:
+ URL: https://github.com/open-mmlab/mmclassification/blob/v0.15.0/mmcls/models/backbones/vgg.py#L39
+ Version: v0.15.0
+
+Models:
+ - Name: vgg11_b32x8_imagenet
+ Metadata:
+ FLOPs: 7630000000
+ Parameters: 132860000
+ In Collection: VGG
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 68.75
+ Top 5 Accuracy: 88.87
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/vgg/vgg11_batch256_imagenet_20210208-4271cd6c.pth
+ Config: configs/vgg/vgg11_b32x8_imagenet.py
+ - Name: vgg13_b32x8_imagenet
+ Metadata:
+ FLOPs: 11340000000
+ Parameters: 133050000
+ In Collection: VGG
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 70.02
+ Top 5 Accuracy: 89.46
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/vgg/vgg13_batch256_imagenet_20210208-4d1d6080.pth
+ Config: configs/vgg/vgg13_b32x8_imagenet.py
+ - Name: vgg16_b32x8_imagenet
+ Metadata:
+ FLOPs: 15500000000
+ Parameters: 138360000
+ In Collection: VGG
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 71.62
+ Top 5 Accuracy: 90.49
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/vgg/vgg16_batch256_imagenet_20210208-db26f1a5.pth
+ Config: configs/vgg/vgg16_b32x8_imagenet.py
+ - Name: vgg19_b32x8_imagenet
+ Metadata:
+ FLOPs: 19670000000
+ Parameters: 143670000
+ In Collection: VGG
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 72.41
+ Top 5 Accuracy: 90.8
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/vgg/vgg19_batch256_imagenet_20210208-e6920e4a.pth
+ Config: configs/vgg/vgg19_b32x8_imagenet.py
+ - Name: vgg11bn_b32x8_imagenet
+ Metadata:
+ FLOPs: 7640000000
+ Parameters: 132870000
+ In Collection: VGG
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 70.67
+ Top 5 Accuracy: 90.16
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/vgg/vgg11_bn_batch256_imagenet_20210207-f244902c.pth
+ Config: configs/vgg/vgg11bn_b32x8_imagenet.py
+ - Name: vgg13bn_b32x8_imagenet
+ Metadata:
+ FLOPs: 11360000000
+ Parameters: 133050000
+ In Collection: VGG
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 72.12
+ Top 5 Accuracy: 90.66
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/vgg/vgg13_bn_batch256_imagenet_20210207-1a8b7864.pth
+ Config: configs/vgg/vgg13bn_b32x8_imagenet.py
+ - Name: vgg16bn_b32x8_imagenet
+ Metadata:
+ FLOPs: 15530000000
+ Parameters: 138370000
+ In Collection: VGG
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 73.74
+ Top 5 Accuracy: 91.66
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/vgg/vgg16_bn_batch256_imagenet_20210208-7e55cd29.pth
+ Config: configs/vgg/vgg16bn_b32x8_imagenet.py
+ - Name: vgg19bn_b32x8_imagenet
+ Metadata:
+ FLOPs: 19700000000
+ Parameters: 143680000
+ In Collection: VGG
+ Results:
+ - Dataset: ImageNet-1k
+ Metrics:
+ Top 1 Accuracy: 74.68
+ Top 5 Accuracy: 92.27
+ Task: Image Classification
+ Weights: https://download.openmmlab.com/mmclassification/v0/vgg/vgg19_bn_batch256_imagenet_20210208-da620c4f.pth
+ Config: configs/vgg/vgg19bn_b32x8_imagenet.py
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/vgg11_b32x8_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/vgg11_b32x8_imagenet.py
new file mode 100644
index 0000000000..c5742bcb98
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/vgg11_b32x8_imagenet.py
@@ -0,0 +1,7 @@
+_base_ = [
+ '../_base_/models/vgg11.py',
+ '../_base_/datasets/imagenet_bs32_pil_resize.py',
+ '../_base_/schedules/imagenet_bs256.py',
+ '../_base_/default_runtime.py',
+]
+optimizer = dict(lr=0.01)
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/vgg11bn_b32x8_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/vgg11bn_b32x8_imagenet.py
new file mode 100644
index 0000000000..4ead074bfb
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/vgg11bn_b32x8_imagenet.py
@@ -0,0 +1,5 @@
+_base_ = [
+ '../_base_/models/vgg11bn.py',
+ '../_base_/datasets/imagenet_bs32_pil_resize.py',
+ '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py'
+]
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/vgg13_b32x8_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/vgg13_b32x8_imagenet.py
new file mode 100644
index 0000000000..50d26f3d2b
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/vgg13_b32x8_imagenet.py
@@ -0,0 +1,6 @@
+_base_ = [
+ '../_base_/models/vgg13.py',
+ '../_base_/datasets/imagenet_bs32_pil_resize.py',
+ '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py'
+]
+optimizer = dict(lr=0.01)
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/vgg13bn_b32x8_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/vgg13bn_b32x8_imagenet.py
new file mode 100644
index 0000000000..8d22a81729
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/vgg13bn_b32x8_imagenet.py
@@ -0,0 +1,5 @@
+_base_ = [
+ '../_base_/models/vgg13bn.py',
+ '../_base_/datasets/imagenet_bs32_pil_resize.py',
+ '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py'
+]
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/vgg16_b16x8_voc.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/vgg16_b16x8_voc.py
new file mode 100644
index 0000000000..d096959f29
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/vgg16_b16x8_voc.py
@@ -0,0 +1,25 @@
+_base_ = ['../_base_/datasets/voc_bs16.py', '../_base_/default_runtime.py']
+
+# use different head for multilabel task
+model = dict(
+ type='ImageClassifier',
+ backbone=dict(type='VGG', depth=16, num_classes=20),
+ neck=None,
+ head=dict(
+ type='MultiLabelClsHead',
+ loss=dict(type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)))
+
+# load model pretrained on imagenet
+load_from = 'https://download.openmmlab.com/mmclassification/v0/vgg/vgg16_batch256_imagenet_20210208-db26f1a5.pth' # noqa
+
+# optimizer
+optimizer = dict(
+ type='SGD',
+ lr=0.001,
+ momentum=0.9,
+ weight_decay=0,
+ paramwise_cfg=dict(custom_keys={'.backbone.classifier': dict(lr_mult=10)}))
+optimizer_config = dict(grad_clip=None)
+# learning policy
+lr_config = dict(policy='step', step=20, gamma=0.1)
+runner = dict(type='EpochBasedRunner', max_epochs=40)
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/vgg16_b32x8_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/vgg16_b32x8_imagenet.py
new file mode 100644
index 0000000000..55cd9fc4ab
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/vgg16_b32x8_imagenet.py
@@ -0,0 +1,6 @@
+_base_ = [
+ '../_base_/models/vgg16.py',
+ '../_base_/datasets/imagenet_bs32_pil_resize.py',
+ '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py'
+]
+optimizer = dict(lr=0.01)
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/vgg16bn_b32x8_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/vgg16bn_b32x8_imagenet.py
new file mode 100644
index 0000000000..60674c7144
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/vgg16bn_b32x8_imagenet.py
@@ -0,0 +1,5 @@
+_base_ = [
+ '../_base_/models/vgg16bn.py',
+ '../_base_/datasets/imagenet_bs32_pil_resize.py',
+ '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py'
+]
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/vgg19_b32x8_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/vgg19_b32x8_imagenet.py
new file mode 100644
index 0000000000..6b033c90b6
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/vgg19_b32x8_imagenet.py
@@ -0,0 +1,6 @@
+_base_ = [
+ '../_base_/models/vgg19.py',
+ '../_base_/datasets/imagenet_bs32_pil_resize.py',
+ '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py'
+]
+optimizer = dict(lr=0.01)
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/vgg19bn_b32x8_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/vgg19bn_b32x8_imagenet.py
new file mode 100644
index 0000000000..18a1897f65
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/vgg19bn_b32x8_imagenet.py
@@ -0,0 +1,5 @@
+_base_ = [
+ '../_base_/models/vgg19bn.py',
+ '../_base_/datasets/imagenet_bs32_pil_resize.py',
+ '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py'
+]
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/README.md b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/README.md
new file mode 100644
index 0000000000..c78d00d206
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/README.md
@@ -0,0 +1,51 @@
+# An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale
+
+
+## Introduction
+
+[ALGORITHM]
+
+```latex
+@inproceedings{
+ dosovitskiy2021an,
+ title={An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale},
+ author={Alexey Dosovitskiy and Lucas Beyer and Alexander Kolesnikov and Dirk Weissenborn and Xiaohua Zhai and Thomas Unterthiner and Mostafa Dehghani and Matthias Minderer and Georg Heigold and Sylvain Gelly and Jakob Uszkoreit and Neil Houlsby},
+ booktitle={International Conference on Learning Representations},
+ year={2021},
+ url={https://openreview.net/forum?id=YicbFdNTTy}
+}
+```
+
+The training step of Vision Transformers is divided into two steps. The first
+step is training the model on a large dataset, like ImageNet-21k, and get the
+pretrain model. And the second step is training the model on the target dataset,
+like ImageNet-1k, and get the finetune model. Here, we provide both pretrain
+models and finetune models.
+
+## Pretrain model
+
+The pre-trained models are converted from [model zoo of Google Research](https://github.com/google-research/vision_transformer#available-vit-models).
+
+### ImageNet 21k
+
+| Model | Params(M) | Flops(G) | Download |
+|:----------:|:---------:|:---------:|:--------:|
+| ViT-B16\* | 86.86 | 33.03 | [model](https://download.openmmlab.com/mmclassification/v0/vit/pretrain/vit-base-p16_3rdparty_pt-64xb64_in1k-224_20210928-02284250.pth)|
+| ViT-B32\* | 88.30 | 8.56 | [model](https://download.openmmlab.com/mmclassification/v0/vit/pretrain/vit-base-p32_3rdparty_pt-64xb64_in1k-224_20210928-eee25dd4.pth)|
+| ViT-L16\* | 304.72 | 116.68 | [model](https://download.openmmlab.com/mmclassification/v0/vit/pretrain/vit-large-p16_3rdparty_pt-64xb64_in1k-224_20210928-0001f9a1.pth)|
+
+*Models with \* are converted from other repos.*
+
+
+## Finetune model
+
+The finetune models are converted from [model zoo of Google Research](https://github.com/google-research/vision_transformer#available-vit-models).
+
+### ImageNet 1k
+| Model | Pretrain | resolution | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download |
+|:----------:|:------------:|:-----------:|:---------:|:---------:|:---------:|:---------:|:----------:|:--------:|
+| ViT-B16\* | ImageNet-21k | 384x384 | 86.86 | 33.03 | 85.43 | 97.77 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/vision_transformer/vit-base-p16_ft-64xb64_in1k-384.py) | [model](https://download.openmmlab.com/mmclassification/v0/vit/finetune/vit-base-p16_in21k-pre-3rdparty_ft-64xb64_in1k-384_20210928-98e8652b.pth)|
+| ViT-B32\* | ImageNet-21k | 384x384 | 88.30 | 8.56 | 84.01 | 97.08 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/vision_transformer/vit-base-p32_ft-64xb64_in1k-384.py) | [model](https://download.openmmlab.com/mmclassification/v0/vit/finetune/vit-base-p32_in21k-pre-3rdparty_ft-64xb64_in1k-384_20210928-9cea8599.pth)|
+| ViT-L16\* | ImageNet-21k | 384x384 | 304.72 | 116.68 | 85.63 | 97.63 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/vision_transformer/vit-large-p16_ft-64xb64_in1k-384.py) | [model](https://download.openmmlab.com/mmclassification/v0/vit/finetune/vit-large-p16_in21k-pre-3rdparty_ft-64xb64_in1k-384_20210928-b20ba619.pth)|
+
+*Models with \* are converted from other repos.*
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/metafile.yml b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/metafile.yml
new file mode 100644
index 0000000000..a497b17e4f
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/metafile.yml
@@ -0,0 +1,76 @@
+Collections:
+ - Name: Vision Transformer
+ Metadata:
+ Architecture:
+ - Attention Dropout
+ - Convolution
+ - Dense Connections
+ - Dropout
+ - GELU
+ - Layer Normalization
+ - Multi-Head Attention
+ - Scaled Dot-Product Attention
+ - Tanh Activation
+ Paper:
+ URL: https://arxiv.org/pdf/2010.11929.pdf
+ Title: 'An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale'
+ README: configs/vision_transformer/README.md
+
+Models:
+ - Name: vit-base-p16_in21k-pre-3rdparty_ft-64xb64_in1k-384
+ In Collection: Vision Transformer
+ Metadata:
+ FLOPs: 33030000000
+ Parameters: 86860000
+ Training Data:
+ - ImageNet-21k
+ - ImageNet-1k
+ Results:
+ - Dataset: ImageNet-1k
+ Task: Image Classification
+ Metrics:
+ Top 1 Accuracy: 85.43
+ Top 5 Accuracy: 97.77
+ Weights: https://download.openmmlab.com/mmclassification/v0/vit/finetune/vit-base-p16_in21k-pre-3rdparty_ft-64xb64_in1k-384_20210928-98e8652b.pth
+ Converted From:
+ Weights: https://console.cloud.google.com/storage/browser/_details/vit_models/augreg/B_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz
+ Code: https://github.com/google-research/vision_transformer/blob/88a52f8892c80c10de99194990a517b4d80485fd/vit_jax/models.py#L208
+ Config: configs/vision_transformer/vit-base-p16_ft-64xb64_in1k-384.py
+ - Name: vit-base-p32_in21k-pre-3rdparty_ft-64xb64_in1k-384
+ In Collection: Vision Transformer
+ Metadata:
+ FLOPs: 8560000000
+ Parameters: 88300000
+ Training Data:
+ - ImageNet-21k
+ - ImageNet-1k
+ Results:
+ - Dataset: ImageNet-1k
+ Task: Image Classification
+ Metrics:
+ Top 1 Accuracy: 84.01
+ Top 5 Accuracy: 97.08
+ Weights: https://download.openmmlab.com/mmclassification/v0/vit/finetune/vit-base-p32_in21k-pre-3rdparty_ft-64xb64_in1k-384_20210928-9cea8599.pth
+ Converted From:
+ Weights: https://console.cloud.google.com/storage/browser/_details/vit_models/augreg/B_32-i21k-300ep-lr_0.001-aug_light1-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_384.npz
+ Code: https://github.com/google-research/vision_transformer/blob/88a52f8892c80c10de99194990a517b4d80485fd/vit_jax/models.py#L208
+ Config: configs/vision_transformer/vit-base-p32_ft-64xb64_in1k-384.py
+ - Name: vit-large-p16_in21k-pre-3rdparty_ft-64xb64_in1k-384
+ In Collection: Vision Transformer
+ Metadata:
+ FLOPs: 116680000000
+ Parameters: 304720000
+ Training Data:
+ - ImageNet-21k
+ - ImageNet-1k
+ Results:
+ - Dataset: ImageNet-1k
+ Task: Image Classification
+ Metrics:
+ Top 1 Accuracy: 85.63
+ Top 5 Accuracy: 97.63
+ Weights: https://download.openmmlab.com/mmclassification/v0/vit/finetune/vit-large-p16_in21k-pre-3rdparty_ft-64xb64_in1k-384_20210928-b20ba619.pth
+ Converted From:
+ Weights: https://console.cloud.google.com/storage/browser/_details/vit_models/augreg/L_16-i21k-300ep-lr_0.001-aug_strong1-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_384.npz
+ Code: https://github.com/google-research/vision_transformer/blob/88a52f8892c80c10de99194990a517b4d80485fd/vit_jax/models.py#L208
+ Config: configs/vision_transformer/vit-large-p16_ft-64xb64_in1k-384.py
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/vit-base-p16_ft-64xb64_in1k-384.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/vit-base-p16_ft-64xb64_in1k-384.py
new file mode 100644
index 0000000000..cb42d0d813
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/vit-base-p16_ft-64xb64_in1k-384.py
@@ -0,0 +1,36 @@
+_base_ = [
+ '../_base_/models/vit-base-p16.py',
+ '../_base_/datasets/imagenet_bs64_pil_resize_autoaug.py',
+ '../_base_/schedules/imagenet_bs4096_AdamW.py',
+ '../_base_/default_runtime.py'
+]
+
+model = dict(backbone=dict(img_size=384))
+
+img_norm_cfg = dict(
+ mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_rgb=True)
+
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='RandomResizedCrop', size=384, backend='pillow'),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='Resize', size=(384, -1), backend='pillow'),
+ dict(type='CenterCrop', crop_size=384),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+
+data = dict(
+ train=dict(pipeline=train_pipeline),
+ val=dict(pipeline=test_pipeline),
+ test=dict(pipeline=test_pipeline),
+)
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/vit-base-p16_pt-64xb64_in1k-224.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/vit-base-p16_pt-64xb64_in1k-224.py
new file mode 100644
index 0000000000..79c323b1ef
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/vit-base-p16_pt-64xb64_in1k-224.py
@@ -0,0 +1,12 @@
+_base_ = [
+ '../_base_/models/vit-base-p16.py',
+ '../_base_/datasets/imagenet_bs64_pil_resize_autoaug.py',
+ '../_base_/schedules/imagenet_bs4096_AdamW.py',
+ '../_base_/default_runtime.py'
+]
+
+model = dict(
+ head=dict(hidden_dim=3072),
+ train_cfg=dict(
+ augments=dict(type='BatchMixup', alpha=0.2, num_classes=1000,
+ prob=1.)))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/vit-base-p32_ft-64xb64_in1k-384.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/vit-base-p32_ft-64xb64_in1k-384.py
new file mode 100644
index 0000000000..0386fef1fd
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/vit-base-p32_ft-64xb64_in1k-384.py
@@ -0,0 +1,36 @@
+_base_ = [
+ '../_base_/models/vit-base-p32.py',
+ '../_base_/datasets/imagenet_bs64_pil_resize_autoaug.py',
+ '../_base_/schedules/imagenet_bs4096_AdamW.py',
+ '../_base_/default_runtime.py'
+]
+
+model = dict(backbone=dict(img_size=384))
+
+img_norm_cfg = dict(
+ mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_rgb=True)
+
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='RandomResizedCrop', size=384, backend='pillow'),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='Resize', size=(384, -1), backend='pillow'),
+ dict(type='CenterCrop', crop_size=384),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+
+data = dict(
+ train=dict(pipeline=train_pipeline),
+ val=dict(pipeline=test_pipeline),
+ test=dict(pipeline=test_pipeline),
+)
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/vit-base-p32_pt-64xb64_in1k-224.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/vit-base-p32_pt-64xb64_in1k-224.py
new file mode 100644
index 0000000000..a477e2119e
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/vit-base-p32_pt-64xb64_in1k-224.py
@@ -0,0 +1,12 @@
+_base_ = [
+ '../_base_/models/vit-base-p32.py',
+ '../_base_/datasets/imagenet_bs64_pil_resize_autoaug.py',
+ '../_base_/schedules/imagenet_bs4096_AdamW.py',
+ '../_base_/default_runtime.py'
+]
+
+model = dict(
+ head=dict(hidden_dim=3072),
+ train_cfg=dict(
+ augments=dict(type='BatchMixup', alpha=0.2, num_classes=1000,
+ prob=1.)))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/vit-large-p16_ft-64xb64_in1k-384.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/vit-large-p16_ft-64xb64_in1k-384.py
new file mode 100644
index 0000000000..5be99188bf
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/vit-large-p16_ft-64xb64_in1k-384.py
@@ -0,0 +1,36 @@
+_base_ = [
+ '../_base_/models/vit-large-p16.py',
+ '../_base_/datasets/imagenet_bs64_pil_resize_autoaug.py',
+ '../_base_/schedules/imagenet_bs4096_AdamW.py',
+ '../_base_/default_runtime.py'
+]
+
+model = dict(backbone=dict(img_size=384))
+
+img_norm_cfg = dict(
+ mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_rgb=True)
+
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='RandomResizedCrop', size=384, backend='pillow'),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='Resize', size=(384, -1), backend='pillow'),
+ dict(type='CenterCrop', crop_size=384),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+
+data = dict(
+ train=dict(pipeline=train_pipeline),
+ val=dict(pipeline=test_pipeline),
+ test=dict(pipeline=test_pipeline),
+)
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/vit-large-p16_pt-64xb64_in1k-224.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/vit-large-p16_pt-64xb64_in1k-224.py
new file mode 100644
index 0000000000..5cf7a7d30c
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/vit-large-p16_pt-64xb64_in1k-224.py
@@ -0,0 +1,12 @@
+_base_ = [
+ '../_base_/models/vit-large-p16.py',
+ '../_base_/datasets/imagenet_bs64_pil_resize_autoaug.py',
+ '../_base_/schedules/imagenet_bs4096_AdamW.py',
+ '../_base_/default_runtime.py'
+]
+
+model = dict(
+ head=dict(hidden_dim=3072),
+ train_cfg=dict(
+ augments=dict(type='BatchMixup', alpha=0.2, num_classes=1000,
+ prob=1.)))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/vit-large-p32_ft-64xb64_in1k-384.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/vit-large-p32_ft-64xb64_in1k-384.py
new file mode 100644
index 0000000000..60506b0241
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/vit-large-p32_ft-64xb64_in1k-384.py
@@ -0,0 +1,37 @@
+# Refer to pytorch-image-models
+_base_ = [
+ '../_base_/models/vit-large-p32.py',
+ '../_base_/datasets/imagenet_bs64_pil_resize_autoaug.py',
+ '../_base_/schedules/imagenet_bs4096_AdamW.py',
+ '../_base_/default_runtime.py'
+]
+
+model = dict(backbone=dict(img_size=384))
+
+img_norm_cfg = dict(
+ mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_rgb=True)
+
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='RandomResizedCrop', size=384, backend='pillow'),
+ dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='ToTensor', keys=['gt_label']),
+ dict(type='Collect', keys=['img', 'gt_label'])
+]
+
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='Resize', size=(384, -1), backend='pillow'),
+ dict(type='CenterCrop', crop_size=384),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+]
+
+data = dict(
+ train=dict(pipeline=train_pipeline),
+ val=dict(pipeline=test_pipeline),
+ test=dict(pipeline=test_pipeline),
+)
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/vit-large-p32_pt-64xb64_in1k-224.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/vit-large-p32_pt-64xb64_in1k-224.py
new file mode 100644
index 0000000000..773ade874a
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/vit-large-p32_pt-64xb64_in1k-224.py
@@ -0,0 +1,12 @@
+_base_ = [
+ '../_base_/models/vit-large-p32.py',
+ '../_base_/datasets/imagenet_bs64_pil_resize_autoaug.py',
+ '../_base_/schedules/imagenet_bs4096_AdamW.py',
+ '../_base_/default_runtime.py'
+]
+
+model = dict(
+ head=dict(hidden_dim=3072),
+ train_cfg=dict(
+ augments=dict(type='BatchMixup', alpha=0.2, num_classes=1000,
+ prob=1.)))
diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/demo/demo.JPEG b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/demo/demo.JPEG
new file mode 100644
index 0000000000000000000000000000000000000000..fd3a93f59385d6ff632483646e6caee300b56d09
GIT binary patch
literal 109527
zcmb4qcT`hP@Na-XDAJ2TLQMb#M0$tNoAf3qNC`y|=?Fw1G!Zc*gx;h`2SF)H69okl
zKtY-)MbHF85u{2FzkGl1ocI2IbMMJHIXgRd@7>v%ozKjke>-0SaF`pL83Sl&XaFA6
z3vj*xfY{s#3J(gq6BI0>C@Tk0(>JpO0s;R^=W_sq|9Jl&(bCe<{zpJMI$An0#AB=A$;b;0(NDlB+Lbr=Z87n|yv7Yew=X9C1dAKmD=AM<
z#R$*<{`VpO_bGt?!~m$TO>I=6j^{rD(9_b>0jZ;*HUc>5M7iXFdi304ww^o+`XLFp
zat3HKuU+5P__m_gPrmb6zy&HNEhil(01h}Kmbg|
zz;u;=zxLh@`FAp04$Yv%Fyaa7WA)&laF3U58J`CiEkm}qQ$*0;alZ;%
zShDdaa2OumzA+>Hy>2q%a#=Zz`=~~wyUq7{F3asqT_GZVbq9Idi-Uf=@WMqz{6{Ss
zGUafUvp$$%%?EKq^c4-Ap!|?PdXeH1l&y)7oIpCgu$Ro;sos?bOlCnp#Kd^V?720C
zXV98z?j~X2?Piysgn6yG+!SAI%Ls*A|64uE7v>H0WRe=|*&R!uzN+Bk%=f{?;v=ows-XoTa9UYYOVoePNlbP
z_pFur_Sta}`MF7GxpeR&4^BaYI(3mKDE4SSCR+DaTBxv{hk0c*vjG1`)zuZqhcZGn
zxwrCDe?ZotN^t_Ebamo!GfyM`l?(W}kL+~V!#zDAGPmz<+r;OOWrTa-bJe~gA6{=7
zIwa(HL<--*UpE|}*n54t*ytQnIjMe)5z
zg9kq9Tj?Oq!=ALo^FaWQ-eNaOu!{UnOMlAWHe1DDv18Wm78ZAvKa_b2n3IA-Nc)$4
z(`@8K-smj}6(wLw-vkbqvFRO>n>>vzgx@Bbddl$PZsGcb*ewaim(ZsB3LMb*hO8`%
zRyc1x(-Yp&fv`+?khXNdD2_v1+yyK#!Q$6s>syhs=i5)q#94otl(0J;&HJU80O(Me
zNZxP{I(;t)RW!@;sx(%by`=_9INAQv`Y`#5Sbx%l7T~%pbq!5EmoMy`P;?FO@p3NJ
z@@r~#RBPlfYv3ckzNljy8Zu$ESXW>DB{W|odQfq!_cWq+Q_#_Jt7v#fk4|gpXuaUi
z6y2^U=esdG6>oJB@BJ^Fp$aXueJpYa;=y{^k^~)r$e9O{OkWfX-=w|s%g6IfzY@bE
zRNQ);>VQnt9FDb6%sNJ$9j-p*oM%Nxat|r3kz`DXyGJWALprpU`Y&LEP22U53-mi-
zrP_+_wM#C!jGlXP6#^z&TWlynb=Au^HY6oP-07mUmQ>`B@uZ%Ste5pZHIGy}MdTq%
z;Rd)mj)=}Y`8x3Z&Up?R{J+%>=5%EjXzDw%=}ba*Fr7BwXSk=0*;N1623`BjOX9Kf
zS3J)Qii0(rb{vH36@E$Y>(6?2ieCos(?Q}s)^}feooZQx>v?)1;a6HSu7%AxLwyN5
zwC29)v^=OR*mp2;^^t68iYL_t09
zSnoODy`ZfPcSnulm2$E?dQA}M@Q9P~>7E%pua^NyUG84j!&xclJ#t;>#LW+s5s
z!=u&jMy9X#B#iq~X2)xxLwMnK)AqHy1K9aIskv|Tc6#vkQw<-~+ahm~eMN3{uta#<*J1VD0r0jla
zq1RtXwfSh$-b;qJUsuoOR+-gxWdDHP>R2y#tkc-M15;4b)rr~Oo|MX(a^&%k=`JAH
z6zM`vaVO!Q%J@~3D^$M^FLA}k#FSK9|9hqg$@#6I#r;vWqk=~>#UzJFiumTNg+ODf
zB_7G>4@ohJi8Nf<`gjuYi6+@w?5_DEb50XhsaI1?Z&QEimYZoUvFU_O1~V&3N&EO}
zCQAc*Wlc#Xa6w7I`FkRPzCL!<0iAt=prO@24put96Sg8%nD3LYTl)&rYaz5vCa?m}
z4i1zgdHaisrB&hMaJ$O*_gPrF1_fLz!?7ZR(f7A>9a}U<_%XZRk^=i8d>$rybeA*G
zIiymxF$eyYcm$$*eu1QKs_XcGxJ^l_jD)S2r@Z^UvM%ziUUyIng9=1vyFS
zYX0dU$w6VwNfBY8-|gFH!;(r_ChLUnGzE1wRPfW*=MqjS8oyt8(jFJ)>jrqgW6;G5
z!u>MLFHY}Zv_GTm=PZ!l6#B)oGq
z2e2j(6lU;I*uygepaYkz9Ev!4#Hgvre-wpti2D(>i??!hH2y7>ob+H>s$;h}i%2(h*>wW(cjkv4J;($n9j;18@|O%)@ajTi=%Eer$9^HxpJ6t`)=9f3
z5z4oUjZ6#WoO2fwW8yTdG&`2loeR}w9jCbB;P0&(?Lt*pFC|+Jg!Br9O>>&guU4>*
zzHE;(NuoS68)6Xkx@-x7iRa2CA~Ojs#8Y@*e@E}XW!K6Sv&b1-@|410oi{Tl+P6KO&}dPDmS
zKEH&?N!6*!xfh1AbgqqL8H`dN3>mle3l0H=xd2Vm(WVRvw1=zCMai<8K91T(vk8Op
z!%GQAYlK8aP6uDgQ*~HsPD3
zd(j26F;d8s-I?dk;THWIrFPmyq3W<;V+FvfL&&9UCqc_k_uk?%U8RBo;zFZw>vN`8AX5BNRR#l$|w*kDkFHq?Z_Z}n?R
ze^~$1%y^Ye(eIA&^d@6gL7iMPof_T3zAC#zW2JL~e)fmhUEuiqL)AANnedL))aDQt
zflkPbzmsF8*{>9UDg$myaW@lLK;Q}Z(0h^=7>NqJJN~%)<(=v8sf^yQ(gfguEb_l~
zS!*~W{FcGa?G;A$b3p9^Ze9Gr<^s#~(9eJ?L%Z=NB{}b)N#n)AaK}pqKiuki_};Zw
z(ZiZqF4H_Y2Q;CU$KBza%Aqe4j?-`4oP6fcSRb0Av(Q4zt^KG>NKh$Gpq-7&21|0zzsA*r!lDA=^UmZ1leIwmDZEmd2=
zJ`V4B_lMmZFxI5KCDhT1+;QbOK+JOOZ}2AV6(JMX!-Rzo^q1?3nRtqQtkeQgfAQv)
zgZ9QGyWXa!@08CBGt}|A3F@hmw!U>ne_urWnhLzz?d`NLoiiUY%BBZ~Ysj`kFaYeHT(I2FFHy6-6p}FB
z&Z0b>sr;fNl-oGg(0&(dbhF3oYF7cZkFwhCXwJlh_W&N_YiRO#?==HjjgC@P>
z09Cho>k4=6^k)yOZBDjC!Ck1%bwY6nDISl>QEY!*&Toego}QGEo9{3kRm=s!=l-ps
zXF8ZSt4l|GJRF35^+uy0r+fB2ABKDQ1-|6Ke1_>VE6I4`^TF3^AzqeDG<9BT368+x
zB?u?p=*E1366~7Y#1(eBI!PU^u*tku`%b6+MDu?N27`ohYt1lRDJBp6l^E2aX^{%a
z360Z!hayBseA26!su_`-zv1UvuG{`1?}qg9J;Fy}68mvP-nHy{r2GutD_r*O^;;7W
zbmxGoZk}DpIlv+j12_lF_6ikfCEsa$gs(@FMgqNhGrjfqgI9Kiahi;KU^CZ#*G)1Fu*-^>++h%Wmr^iQuHkR8iG~2Kb
zq9-S>qR#=c$RNpKpIa_5o`F?RyW__?PXw-F#y4Y)zy1bSI1pM89|H-!&_R^&``d_^
zTVmL67M}phE^m(>s#x0LnMzAJUKmpzqv(>E8UEzPJ0{zxuwJT2V
z+We=5276?S=7Kc?1*CrB;Abx~bfWnW*HOcxPV?u0zk6u|ppERngFnKx8dFSHG5hWR
zDk9&o=oBJ^;~h{h|L#~jD@GM#r+k_hTkBYTmAfm$
z!dc+OD|jqO2w7q++}GdtE#BmJ
zVWn~hk%kep#UE2<9Qi^g44}C3vR`0?@LqQP@7L;x?RdLEWNzcjF2*lyS>7k9Fm&8P
ztQo0v(vHG~uhk)}Px0
zc9Vu>rxh$fOxyGm4A^}{MSU?puAGC85frhRHdL#O9}nwzkwOH-q~
z|1wC=G1eFjROM67C`Kg=m!ZD#{HZXQ%CbArz;W4pOX1zZ=efbdiR$US=BOqjP5wpC
zBasN~eyYF|g(RT7=!=1>#LKo|ri=c>2XV+2Day}_WThLj36(n4$H}$tA4;Vb9H{&j
z#;U2Uwj8P*g%Uf~vvI9VQ%cC2#aC`lJrHlIgoqbF=S|SrQ21QmTJ$N%#$5>cK%#XL=qTTvHp7-1ky{
z=`0S#V|6OswaLLNnYVEWr*ps`{j#Gqx{rf{bD<*zNX(bFKCcE4VDQkO4ydG!cMo<*
z^GX$ZjT2{&)Jon?S+-unq$PxVaXggsY3GKvJ@6{JD`mAEj{;&rbqt`ok&4bI8-&_c
z6B7H@Zpn+=x%jXgJ!M-4-27}+L8$$TpECLEd^_!PF0GSLlMD6HrF`se`
zK3^l&@`=|)Tn~|IZvbP}@g*7)q4%;P6fZoiN;IjD{iJXD!a&QrSjn;j61Q{?h|_Ql
zY*Mdl*2KA2j-}Urx&zVl*z7}W!q=w@=vm^I;Za&Yo|QS>+$WtfxZ)_W*GB9D%ieS+
z{k2G)3b0YLUD`*u8d-%stYd$hun8fWU;x8zbP#AvqE
z=}x;I6{r%-<|UY$BqB2jxg6h(oIh-U2f;hHKyXaG+@#y!Zzx}ys@Dz1N3Rw&-=j9B
z-??NT
zw~nkWz9Xa{9TC3Zh3<+?T`nmu!KZiJuK?F%#d=Atzm|-1JAj&fnR#Lj%$8m`8SjZ$t+{<~G@-6@R#RW6y03BUUe-tqY!$``MFKJ;tkHs%Y(e>TZOeo?(dUs=T>F?nRQm80bCu@S%eWG5sef&
z;_EOwR9ykw&2le+`f(3K&H)!5D4RuKer^1q{5`V|@NnL=TI;UOaS%_e#ceS-zmZS0
zb{g0!<>s0|&{3_;rw4}^y{4Lm6O&cYW0v@D
z`y!A9H%o$oMxr+zfaXYj3+ePc0KcTys^o@w;g#r;ap9->+|j63cvW8QFxji-v(haW
zLa#6hL>OA>UV9$vBe8j4rIcUk0WCeif41sxq6
z^Xmg#7kYTScKB(tG9nRB;%V0};umxQKtVMVCjF|vY=@DA-CslQOMF+Y+f~0%nH$Y!
z)(eSSP)J$}T?q_g3hjL%lN^!~$W9hspBhLKC+u{-2(@Tvu5hwA8dapY$*Xqie-)KM
zI4)0@f?xpk){}Pjp9%J^M{|M4C^zUh2}Rvr5mM~$HTxcX~Js~
zo-eIeTlr*`ewt}f`^Rr;l3=EWJ0Og)2Si2hYB4JBXb)X~sb8e^j-~bkkE-vs>S|C>
zBw3K{rx%}*Oy;L%nWhAj;;6GO-Acpp+hw<(@>gDicI>vF!YHWK$%Ov#HQRLScR!RY
zUf76H0TF=5%VShreyk?c&QI}yWZ#gkZN3U%D~h_|$qv$&7T7f3u9JC@319@4gi&I3
z`r@xzs^HqPVHkBayyG=#y+=bl^{Sn}MB>!R|5o_Ek_dn)j9bgK^h!3kI^H0DAHwpz
zFcA^KR*nf)-yIC2ip6MUt8!VCe-_5Q{irTPg0n@NuEM>ub<
zZ)f>gQ`X5n2_1{|9@0}YPG2l%?HyzFS5ovI(^~MMkKBF)Q)2~EctMPE#1gW&^YD-P
z$b+Va=PMCy?*{Kn>N|WeVoAB#G{km&cr%inuZ>QiNK4&b~vAgx|aDoJwnMw3IFzoC7*O4h<>~-n!?wN$^U%
za?R0)ta;E%vM^_hy&5KgL
zJGYuyU)zZ*Pn!le(>BGi{fgP!{KHtMVxy6wr*)#(#rR!7J>cZMNQ8aCXz~bwyLOi6
zB=xJEm|YnDXwUl_(`adrW}Ng_rD)|1US)8ZqS=Mq+-nvLHf+B?JbVh!p3gdgSWEW5
zx>Dvd$*me%{xWhV!pwa5-|eCr|E}bq$9H66CqKw%e5%*a2xS~JDW+)jodawFN+}BP
zgX7QO5Fp%nYUv;X+k|}GDqHcx4PS2R9d7FX@_89m7r)&g3b93LM;`@~gMJJm0l#S1
zZLXTEmb_p1muI&*`fKK;x&7k0Szk<2@94#N?T)!|_s86CR3*ZsR>e#5CX6N>fj57Y
zJI2dA&;$si*8nJskl>i8HR4B?&!nVpxy)w;ap*_&2gVJM#X4Zq=cL_CGeJi=r|f~F
z)mxXI(OsYHu0jYkHQx4v-M=cprvn$Ft^ys6?^uC!ZrJAI@@rdDGop!NJ9y~QrchCU
zaE3|gVxy-}n4S(-^;U`BH}1a-;u)DZ69aF)J~q-n#&i)1|B|>|CRG*7xEUrt{M>
zcLK_l4kjDN;suT?0?mf#U&S$?PWOeUM75mkuwa6L;}e|kODR7o$Ag#{;t6$sdZT=S
z(DU{AL4$U3kKk2vbbIUx|2o&}p;5Y84^v1KRQ+(x-S*P`VP4+*bkpUV;*;D*JcS%z
zRhkaEf$x$ocoIyD9>i?$}SJ4J|Zso)vfWV*1%}_Fj07{G1SNljWM&4!bYG^gP47E(LHL&^ed2
z6C5_9;gZ}+=XKZJxK7VAt&;b7hPSXW$J62?JQi<|?q8AXb+wxQzBTNIX^P&^gWa(~
z4Uj;jMCjE!`gZ9pPa&!GfLRy!yP(}tIC+{^*;rKC$(SU+I*ymYzV62+PB09GsvX3g
zmUJ&OGQkKl*Pmqc%ca)4=u)FN_AzoD`qsRRb%)YajV*^9UL!MX6H1506l5>{kVzd_
zjW;IcNvi3<4+XC;pH=Y=*jtvB!2B-Q*|72JE+NRmi%mxrwexpuih5?(1S`>6Nop4=
zH_19kkhoC!5rA#U9XYOZlRoWgXHX#Hm@etW%3`b>sv`!3FIf&tBWjViI>ifc1j`P`XGH5f~jS@ZbXvA2&jw@XlhU>AIa!lOaGC#OWUqqDB%NQuki$uI20ltOB%--Y0X
z_o>O8cjOP^H|TS2i+l%-f*kxnomRYGO6k@#^LcG!O0?MLn*161OEfwKyfYd3?G%$R
zVHk{p_NZ^Gh-`gtOHiP-kOqAMI=?pHQ$0WL8{!=zvGng`Fc)%PMbtT~oqzQ2afCwe
zz7##PqA@58Q1zG$Kfu)YIx;ac1R!)9Gw0c{B^s&rE5lnr|2uQBTF<(TwW74;5L3dk
zj|drDR)%QBwwSQP^+bVML1mC!SIcpFhsP2EAXHDUUzz?yp!XB)WdIhB(O3~oShVBQ
zcO20mW?Gxh{6pJkG%In+b1pkxOz&lfFaBFq5p!B06pD7bJds9|!b;*b$~AOJiP|z6
zyI2DMs$OUBjGbcUMdqf%?Jf4=@Y4&c&qWkPH_@hvq4d3|UW4WW^)gmCWwa7nJUnW70IxzK7cp@`_yY%LWQF6kMtNlfk=
ziI6%QSa{QBiXNYAe&9g=#)vaARJ|7{G6v4@}-rT2;7XuJ05@DM-CLZ`#iU-
zHd*r(>2i4B1n2kr*mP1fuh)S
zCL+N;3G0tOguf>pt>%5&Nu4=0)RBcRYx)JmB95Ec0_Mk0^rxLXm>aoynrce;o#UgH
z5b%;Wzjod}n&UjH9EBkCks!0byDuD1R+R|rfdJD?lqC`VE(*Sg>2Vc`rTo-7&1Y0S
ztI8F;W!ZVjV$a{==PTy;CHX@T4!5H*gH=&crmbK*#&~uEb*1`K>)9MVnR#1_GeD!L
zpVRk|3F(*Qb5fTI6|R1Lc^BsPO}>F$4Uf^$ipaqqq)1`gHJ$n=6#soKne^uCy^<^Z
zYnF|1dkib+Yh>S5>_}C@8yi-0dCY}A&S0IsR~e{RRCoGLqwGU+1n0BSN_;z}1jqc~
zs(yJWCk!S>B&d%a?O91p3_1nHnu*d^M!^#I0w^C4y^KF}VoG8n5E1$zR${+Bx~uzY
z-70%Ny1eXfkhaS=b~O%G<+XpyzKD0ZF+3=Mg*TiG4zgUusg`!O`Ew1q?Vs_=WP%L?
zj0Cu%={y6_Bo9tvJ$ou~`Aur@rZC@IKReM(0p?qh3#A|sjuf^1^<^>JS84Sgv-*4T
z)-nmOeN-YOjQoH9x``JPF(sXJ^qw8B%+mJHOSQf_VCTsG{u!wjo9RXXXwX+F|=&(znB({Wl-p8_9kiXB&@?1e%z&W
zg_a2#0u$cXR*|{pf
zpgSH{dPG59MHei}xtSJOznA!(-F>Z?oU;9e*%%v~$(F1kb&U@w-DMTZLJyxSDQ?;q
zYx8aS_;&;%cLs&B9*tSm2NGM@-+nRX*qXDaQzvUfvmPcDJpu6xP(zFFW9F>-
z^$!)DLOOGB2{3X3OW0$zVKfH*C!FuB*Y1r3Yt(|OVjE2>ekpFT
zz2s@2Sf;g|V$Ec(_Tc^-eK~zo2J5ji4Uz~Rw5=K|+s61D)~w(sl1CP%8eI98UVx=b
zv@zrCUmxXjRIpi>3TbHFXh@||M77DJr^u9TfZ+!x`*^Nr;BV6j`B7!|olF-hg%oky
zlv3?$!>TrjAV;w@nCz&FjmQ1IQFc>?wXj-2i=X2>v}-!iB<$artK9Za&uaQ}puj6E
zR8R)lgM6xEE0Z?YBT}X+aJ=RyYsi3NS65aQnRa1iYlGHng(E{rF*_T%s}VK^Fmj
zfT-BpXS)9_qfJ>?Auz%ngWmM@6szwDk3{b-QC?Dz#<%X|b!C~pSciFYX>sppM14i#
zl7m75Ai?mU#im4-oOD%nHgzH?+^n%+3dwy{IYb~Olu?Uvc-B>Icesce)VvUp(C(@~rf;+~J*lG!{L5hk(S(XUD
zeQiV5VaSE(u~T))CL5{Vc%fTd5si93?;H%)3M4JR57`8Ulu|S%@;lSgLbt~goyV0}
zG6xhJ%-4(!Lu%pb=YVqJ$^OTWv^2Q*HUBoH{N$Vl+eZ~K7w73V`H$Apo=zL8yQFr$
zbx;Ba7$PQD`_ygX>He~me%l|yf|VG4(*NBQ^;~DS_CZ;?&eAOK3TC
zg~4j;qa#}ZlK;{QFMblaQ8%s>*SB$>uhAL0amLlZ^&yAydm*CLbKICF*N=XlxDywr
z@%?eX#XVh%&&yo?ZtL3qsbBhdoxN;3On5^c6Ehe9Ak528>~@A+K~uQE8I1iSFZ1*c
zn-@!A7!W4vl&$nPJs+?htiPu3Y6?sP&?msbr*S4{$R&J70r)t}#8<7CR
zB4eSOv>0mG!Q?ivX`i9XPsLPsCqhLw`EEUCs+6WvG=H~Mq2)(8T3b{&Ne#)r=~`f2
z-N+v-XQn1uqM)JG54mup?6PB0o4f-OGWmmaLbXA8p6oXO(0_z+@AryPi^ptvWPw^*
z3_3^oOSJ?&KoaPefd5PBu`m6lZ8MbaixRtwO23J2?e;4`7%(T9U)x{6}Zlw?+
z460=9j!#?B+;{Y!eY
zDA(P-UqXq99p@4aBLo#S>h&pUq5Yt|bRW8VsLj<6c1;X0dZ{E*0GLBFh&R7N;Y)BO
zXI4_igTJ1vFJ#x2*B;AsdNeVq9i9VPr-b>n!K)c;zjH1}vgBSGeea}tD6}KOG1|vV4~)h_
zh`9b
z$W+@w&D7U*{EX*PkSNe~K%>c-p>MY|>6f#fg!|P>4fU7oqGmZN@
zhD1uI4=D>4U%sSSrpfu4(aDQ<5U430L50eTxAOH67aBFob-;Uzvd40>I4@71K;GbC
z1t!qg-x;@i9k!2~p?f7B-2}%WH3+dT81_9OzR|k!k+?DXGGm-Bb*2^10YYsd)5bnp
zzE7+w%QgufF_h>vVHrVw`Gcg-)s7j$#sdVG4iN|{XlKoYgTbb!(b8)Y@#Dv{x>6m3
zQb}ro5!~9ZBaNwu*}NcitaD3`xO=$%jIUwd=@t$qb3IviWVj@1E|
zF94Cr&P;VU#WQQ1JeDGk(Ht~{v9V1J8r|hxQPHKS@H3BJm=&)aq&m9NkQcM6Jn%mX{WzwU3^ounjU3zjt-)TwOZ#$jI
zggVHdf4MH1ddy{eM2Y!3$(!T{%V3c}x*_7}aW!NwlC3G3e*)+t_(y*d8)B>Cj3_43
zL4}t~+OSuHd_lkfCWwLoeY9_`^S$0qr<@`h>oLjgqwVQZHM&s$s%|d-uSyG+FYwo0
zD?+9X3lfqrx_v;@52UMX`YRvWZFFJgpdd^!sc!@zk5PcXL6&=tXZ>ccV@xwG$hkM7
zRT`tm#rn^s{P1+@AorJGP7#nTwv)r_M$I{OO2{j!_
zI;5Pba!#=r1drbaPvzy%g)%?F3u~30kZYd_nuxuzNHMSp6fJOlm=J(|_Li0b^>cEk
z$6@)QvdSLsMWjrNp$aiD8d^fF?gl~F^e!r&guW{WJgeakl7x3szO8p8%ok=@TJ&cr
zCBL|ys7TJxiWWXdm;v@owPYYO>kWUD#RCL%4*03+m11|refVPix&4
zi$k)4oWtVXXJ&nXXuP&&Eho%&@cI>$OMK9sMReTaT-{7(rAmc!PCjlW#o|JuNw=`t
zTH43?%uLOq!sQ~F`oAX$8E|+fxv5Gr)F?q!hhpjWd&Jt8an#|DhaSB66ykS=EHK(v
zRwz9mBBI%OPngiAF}du#dgWf
zs)~Z7&pOuQr8S;B{ry%LK9Mf?c$H~H^)zfLLgnE;I~|?v0rSO)TnRP)!055kb3mje
ze+D)*Y6ravF)e9ZF(kV{L#Q>dBhn>~a5y>~tr3aI&It`DEe0{$f&
z;$7u!S`gkeT}c&VoOv@5-!V+^74{>xthE!XZ`{7`xEV}XIR$ig^meKKsXp|K)WNNM
zw*trFS)|p>975iu^oYS_lLOL#i7pFz7`1jyE@P8YDz7Z|6J4_=_@r+^GNVOo#yW#h
zD^Be+jQG6f3;OG%&tsdE0N8Q3ZHeABGB%$2p)DRId+|iF$zSI*&P49iLB*|1?T6ON
z(;3U#^%*8I@W&y~$#g-4vtwgBP4tpO?<99s((FZ6GNkmW2SqtP1Ek#(*^v?
z$w_}hU={(a&?~>ZWoEleRoNFhCIi8;1@GG;wp{QNB98AWA3$C=e5+eHtGjAu>yu{v
z&U&rlLdxzAy8-*)+t4>?DfWq1ldL0%JF2RKS67e`okar4t$~a*y&rjo@L(YOU;M3S
z&JSoZtp0hI4hKBB^#?5ZdUB#-Frj*t7XdmVRh;0}e
zf_bySe@>8=p3jEzX$X$qy}ouZan*RNswy%tsj=Qb6nCX}0-X4)CJgPtMx%BlEEtX2
z7lvSKC=&Cxb*u<^jjCPU=KVnj*J@UqTC$%;tfYM^clfK%ruz|~2Otv#Z+tk!Nbnpd
zocc2Kb$ZWL=LEH3s_J-q((Hp_r*%TnNBp~A|9NYeU4n+(?l`V~W@Yev<+}2(KAsQ5
z>27bOYqOFt3_n_r)V{Ho8=rkh&a$ij>M@PSg5R?kl0epc^{ooOEgQE`L4TbJ%UNw=gG
zwJ{`ovlq3&-?cF%g^iI=hpUe#D>kqA?)lanrI46p;_}pFLYs^k)V8BsY!H`-6~|J*3f#`J#Q>Td0ci8>9T^k{hhAoDa9C@261m7aRq^^JHY
z;0dSEWNPIJ8>J8Pf_5mm*=$YW7`?)vzomeJm(EoMft${HeRokZe
z^U|HO;~QRn2E;^?&!v7Jj>~p6tXX88DB3a(H2@$!p|r?6%t_KF5=hh
ztd$m(AAHJ8lQ|bXNX_N2v
z?I)@=>%SP$p9c+S+DqaXN(5xVJH1D^%U_p&Xv4+oU#8|EC$hs5L+TD4SNChN+-&$5^3C
zd_%oqV`j-0MoDULGjf8czFN=W!hpUwW%`3Y!{WM6Fb?8(60c9*3D&UEGBT!SS-EUH
zX&}v9njCd)C$He(v&Y((J*TX5^*TAJHLRSuR#w&tL^YU4_|9oWqKJ`y67$lzu`xG)WsQunjReOM=bkP
z7BC7p5WP{;xm;WcUDDhDmV#mDfRA$H7(nx3p5{
ziH5n?`uB|+AL0hZeICr#*CY%P)rH`3t%%);!>o=mqTHoNS^+JFKwn3ew=Do`C*+UNwLEbB!n7mtJRGGIN6!G7xkoH@3G-PLZ}G
z+ws=R^1Sn0SM8n!+1+$V9|@E9HQx~WgPpQ+_T%SQC)U*z@_d=E-5%oeJvhlsXR#gc
zRA!z{ezNuv(nQyC+Eu18(O@S^C(AK!1lvp#6c(x
z_I4;U*MOdY%90-){N$!A4p6j3)G
zolz0&n2})~vbkfWuC{)8D~sv()AxkOWbPJ!8ez{f^_IjtC!p8L@hHmk#;9LU-qUcS
zUhDSKVF1M1)3*r9;vZO8G|kN?ic_;D5tEJJ2FHNe27c!+ObSoOL?AoH)<=mt8prRa
zf>zf&TxC$%cijJVTxVcIv&^npHa{vkoeq(lcf^gF6l>AS?8?VaF1uDkSqmxx`}6<$Az_Rafy
zPC`GiKJ1`OOBL-^_(n8{`+tfjL=K&W;sx^W!*y;4B{4u)?wx+Vx6SpwIYP5$+`4>W
zW^cr_^?FBljq%^Q4O5B2_34u^NGiYmq~#b%9+VLi8ZuMy^N{J$ouH#noo^q<{d#xw
zhwb!b!wAy4Uc2G>KlHeB<*ff*gL~z6j*cxq_Psut-VUz$7HS3gA!;n+ZnCX>Ew@U)
zWweNDcGFWCjhjA}{IXX`%>lnM8m%hvk2|Nc><~oqB
z8m{x!Ovwx>wDGJtJBYiXA5|Sha}vcvFnZ?z(V40w&DRS|zx|Bd$uh47YcBX3{cW-**vfkvR
z7NmOBGkTT;n~Wt)=jOXgUO5>;<=$O0FacxPA4w%6uu%Nb{wE$2-)a7wNhl78!^7lv
zUT%EHode+K0PV{}NypA|&T=_JbJk%!^}#SAZY(KllFxo#ie}p4{xvbrZ#x`KYD4
zp<1UQ6S4ByfP0;L*}rnqhP|KYb0?6f5C;6y_Lt_Hk42}ZR&$6xOyafwc5Y4g9k9XQ
zLaVIGwm)tLr9+IPO-UM;Eo@g{csGY#J$XY3XXni!9w&JKAF0#vYO@a?b%TVjeuOxJ$2G
zpK{AYRzAn}i-=va7cHs&$iT4XvM#Q0hZXJ
z4?)6$;SnCjm3O@yMScU_ffu3=N8@7l$8RAsdcKqprMYOBX+}e^rJ(2E=~)LIen~M&
zw7#t$Y|hW#eZ)!z3U5&JAraj9uAbh;g+kXxk
zrS)o_!TOos*N!NRWp#_x+9W*4(383D*DGkLppH5RIFrHU!Q217^s~He>P}3VXLEpL
zhN86MQ0+IL3O%-qd7>mi+;FY)Kgq+TAI(vxuK-@USeERDW6nJQ6@n|rYCY!5Zwwl<
zpT5@)16V{%Id=v6DdasJ$FFdnw3GM!$R`MLl>^{!?e(Gx$t$~%sF6tFw
zAiz!l7XJ|=7rj0@9A6@GW4AEm=_DAnb7mZk*bA_4phXjkV%<)+_=|Ac#0E(WDx!DH
zZ7|d+M=Z#nAC$>vjCxNV6AU==U@93pYnG_czx=1C;YQ{2oR5StPeQt*vDFgu7HEjD
z4&6TM9}%B^m?eH^IjyJ5Zl?LB(LR^mQe~R49xxh`REoc8uw&&PpnI4;&ZXR0
z{lLkK_83C9W2Oe*Tf3g2+KK1bDD#o}`UIgT-Khn9D$rt0MODYLFi7=cgaDrh5t);&
z>HMZN{CFm4apRf6Lva1suyf0HBCq#lv<>8xZg(aNtH~nUeQJ3%9iH^^lcepmQr9c(
zXU5KLX}o~f+C_s3QkTyG+)%y)kN*MrKnA~OoMXv7SIzT6CBBE|R}YQK0G-vQ>Ji48
zLQknk=^T(v989se+;~;
zjyL(aRLNUAjkIWWtv;}){MO}~ev-r{oW{j~<8TTCWwDaXPD$e*Iy_GtsL`<}mBY&0
zC!;Uk>3UAJeZ59C;Jkc(k-^D5XUl{+{!+((4dB}
zmSU!B1(r73hIJnx<#y!pj1GG98^m#tbM^fH0Q#GqhaF1=YPt*)>K8QH_w<>hg<}>e
z*JM)|V!;=9+Juvnfs#)rt*07UWl{>DvfwUT6#L8z6FP#0m6U8XqWi287uxxi?cg2d$K860`}=^1UTHsRzt
zA}yob^Vig3(WSSi-P9ygB%g1lzof?`U`R@?<|BnAkMGpZcSftf=^e1CTM$}Wexx-U
zI+RJLQ<=`$p^BuEFmHz}@(>6B`)~(2{kl}Z%+_dsAt9G1)5HzOv3B*z5H#J#EYf_k
zm4u(}h?xHXt})h!at(7o;$ys@VIJxFB$nKYwIPZrrY|Ev$PNZhNXNf80Qn^2+^taBy*x@zzK3u?w>-xLk{=2mb)7-%MeJn@#S1+`Tl5B#{KS
z2x}NDGZK@QP*jZYKMliSSK@fbM&rMT^=Cp@G@kO(HSg-sxR5GDra>&@Y&r3kQ?Z6J
zP8jje$R|qqfy7tBK%Z;K~`-b>^>L&!E-jby;MMWynU`lN)0hBR)tO
z9Q^e}@~m=gMQ1o-@M
zgZuPfCHEDVc44y492P3@#1HBV#C&i;JfFu)^9P|F7Nku=HhqmHdlLl=R6n>6hT1%5
zoPVEBV2Thnx`rB%0u^eoU;$j2hBQpdtXmDVv0bWv
zNhjbQIsN|t%c91hD#k*Sqyh$D5=PjY^OXnv(Ttpv^Z4p%nDtYgDu|n^yH6v_4%mS5
zK@LD-1`vVg2ge>c&mBK838S+KY3R~~Jc<@9Z_3BHf%x(9&&cPFpC^zMM#MF#h_AUx
z)qPfQ*+9c+Q|ESb^PK+uO>*4h$eLN2mf~gmg@t`KY-A_uCpZc*6
zq(!=~cy^wibm6P(I!zzBooiPz*iy4fLG}U(0f#)C0l+6Db%FjVfk*`G%MTUknuAZO
zQFoRYVZJsw5;;~-&&k>ufX)Y!ybOGt9;|$uY8^>&{6SV}W|9|Fw}7la-30#tiy$5k
zj1oT|8S03)u>jGd41-M8r3uV&!ZJ`ELCNw!!9N`SM^$WX+~UIMM}tL3;XlkIq9Q)$
zNi)D7Bk}Nh96%eKHfT?)(9wNHj;yq-u)xHANIpE{&Hy9xo{uG6!;xE|BvjMYO2-G=
zuH(x5AJ};K@(0IO`HDt_N3bW>No0B0tN08LxOvAI#((9~HPl=xN$v|JjiA8-$G|&7
z>Vl;MUKB8;2u;H
zmN_Kz)I3ifk0O7hcW}d~PuSI&;Vlo}IvhvsBMqKTPBMAO!2ETjC_kA#U^et1R%g}V
z17@2AiNGaeOA4h(zy*QNxRM4zz~jl|tjqI8%%LcMX@}CUZ4xh0PVtS)zXQi40rB8t
zkIz>?RW}e4cM$3oY3cQ(hPqUzUo9zTR9NFBxyc~yB$9U!bJX)=0Bcgp*SIxXky?06
zb%eJ-+5lqFWBueH1>_v$c+NgJ7)sT*_c0t6HE}Gx&D(KnR{L!CMLPP+BSmKOTSAtfS1txp=YeQ25VRQ+-PIpDkMvHLA0yjf;B8>X*O-{{T$}=Rf;KIX}Nt
z&*tOeK{4dvKP3Zrf}dTgVEw~rYZK05O}e7H+6QT`<|hiV2hv7u|3B7$rp#d$_|W
zIRx-j6P|O9qPbPOI{ZsAS&H>)+T74Bb!GczEKFi@lFDo!72rENtT
zJ#GPGQChQ&HeGC8)lhHrEXbaswljc01o6&s{+Z88Jb*ruWkv^K$%dfmuF+<%2B)fO
zS|*_m8PrD5#XEQiCU6nl=Y&(A&qF7Wm;T`({{WBvA^Atpjhb1kR^L+6q!NJ1xx+FL
z51tN5{A6{b=GuY`&QOh1Q7(Z))Sp#){=KcC7E`z}U)T;Y`woD_E1g-4xh!$&E4dZ@
zMzyNiJ%6*5x2DZ)C5Q$2J3tMQSXzyCM4sjjCq-?RHqC;QSLtf0QB#+yRL?@
zr+1%f==SSbGC`=764qU2GT9A03jUOgeZ0%Ph6gypjTrn8-~L6Me2NsNE}Qst?Ktb*
zq1w8IKG@Q1L`1q`(XPJ%Cr
z)%(_OrP_U|EJ_6%FhFg&<+iJs;6999ga()B-g+MGn-}DNSZd
zw;u*f=Jw3letJ6ae_MIg@;E{Zs6ACqe|6%1+QM@l%hFM@kJW}9(YjP
zuegFUz~t}%0F#U#QL<22?#82{QmQRYsG8`^L72=i34G`C!F@J;eS;o-i@SI*-i*uA>&UHyBsEimcwZs2M|h9XVorVST3@
zct0Zpsum14CKLk+YYAK=;>>od?h33}wig8C{0?$5e+R0P5ABm@-9coryq0*U79B$$
zP00#2oZ~!XVR^{#eh*6gM|&~W$kfZs;#^Hi$}_65G5bS)dC%>TKmFZHm%7wj`_1ay
z)1JhTr=+eC5>_}2Q=Dh~KoAZI_~*w(NxgL&fFe3u6r_+$^2WuG1`bF$&N2_rBaC^+
zRmYgZ5;BH;J%ok^jbzTr0Lrhy{{R*Sc{%4J&srBTnVTJ(5TM2+O@*VKlx@NV8Sn;A
z`JdmOyO8Qpbs#saixM)`fZ&287#ne%Z)1-b;~CCz`}Nr}vYkNSRlw6#nslB)46&6b
z)H^n*2a-rWM~+A2eh*0gK|bNdjc6HE*Jkc$YR{CC{{V8E{7q_B_9E)`d){j@!=_xVZ%!`n>S{FQigwA$#6jAN$YKu%7*aAi
zhCDt+*lm7gFBMTA+H_A=v@s;wo}QXgf~;b3EsZ{8uQX>r?BrqYZ`=d7{vAkpmes_?{WgiXREu`d^<$Q_PVttG
zCOk5coxqTJI46uBKc1@j06$bzQ`C~Z4MHgy3Z$SR%z{m-q-P|KNBB<%;QVJDI%NAm
zz}BI2$zD3rpqXkw7k*I{_YQJ$0R-UubLWpe2lCN$J;8-olLKoNV^IK_D@xNC+)cSd
zl1bVK134HTen*}<&iVZgrK?vEKW+B=T1nSS)2b61ss)K4QpDwmj4lI{!1IiLq0dn&
zyl#ccsCwP*2$gKCf|OI!)TFMARX)kYjD?hvz`0;X6aoJL?S4mGt9pQn7G&}26!d7d
z8&m2K*M=}v1IGn4C5TH?dOhw^VFRk#y&j=T(vJrQ6Y4_3noXgbes+B>T|F?L!%`mMf+PYFhPb@1~wlUP~D6jhALUIc5Lmd1P2R}T4
zJegbzNXn~pWpcN3MP}L?LQN(;&sreStdYFCjvPxK<%}JK6lPpxmOcQ&o@0}{{(a4V
zBz@s=Thv8#w{oMz?8PGy(>MblD|;Err~@In_{irSICcy5ZsKsGsV0jaqchC4`?Flt
zh?V2nEl3g+;c`=AF69{Zu<*>o$l`c`C$s%$W>u{Sea&jNiBg?G
zVYPAAUkk`{eC4y^P=1+Zj<%x9B)+M?5}re#-Nzo|k_bEu@Os>zS4JU9)YjW-lFR7U
z(@HBc`mECce;Ysq=z`@WKE*$5zFaIb``I;J8VK_kmQg((a5mEzR~h9zM>shwzS=DjGAt#ZXe~-$IVB9mNk9D1%PnzuNlThc;~?D7DX8c
z7CMv!r_>_NC$?!NdNdO}D2}(fe(6{UvutWAF7Oj!oDFS|%ebtAmk@
z^YhjWxbvw40@(b^f`nUlGbL`|hfa38Lr9iBV;cwK%7NA+;FKJ3
zuYdd8&^(~zyXrjBJ5yJAUTe|mQC(O)5q70y^#b@Smn_5cgTTf)>iB*f`&QVHe=&Mk
za&{Cj!%D89CcE79;+NE8<y5UxuoEE5GL^JeDHcQT2*b
zS9oewIy{r^4fe|_*IlIY%7`#e_s3Mrj4pu^O4q+on4v8h63Jx9xf`S#SulTY56Ag+
zOmr7!BUh;!7xdL?MP-Hv{-Ts-1M`!}{{TL!!Ou`}p)_X>PR&A1Nu|mMDhYk03XEtjWp_;V|gj+
z`j~-)tGM32m#I-J
z_WG3_Rbq)!%vPa6HvBQ%0w_>OW#D|{lhqXn6?Za_Y*b|)ms(VwLmJB(hfpkT5hy1F
zD;yqo5Jw~9+tkhkZsiFiS%Ar=>5$5*mo*vPV%)LUI}eb!$QcA+V*n3<j_
z%`8dVqU9QvHT^$L)?Q^nTF#*iR-`0j3Wb%uLKis$CzJU-Lno1l9z3@!Ff~{jmP&A2
z5@95Q<#bT8W%s;C802Rez!~TE1mN|hd$M&iW8UK=tvz)?YJrMZ$Zf~c+}Y%XI6U#l
z;OEcDsbvfGXvTK}FB<1=)C`krI(-eLoUFQpE1Z4_By9&7{{T*)bK^eY7~ilWJAL=r
z`fQLvYq;nKmDO4}$(Mqmg9b%q!Bd_OBgcX|pX!heM;Ms;n1|{|XTPa64RY+1pdZwc
zYfB(2l6|OD?<5AofLr#@T;5G?a(JfoBd7I~v@A+uuRgnOs?4c0qfYeJEy&CZJdD1=
z6pi2!kU_>eLFZoR#(x^8(0x^k^>cSZ?Hyj`nH<6ir=HAlMm_slL~Jm8V19x@+yFT_
z>7UNXO@-IRPo*&r`81J!vI(
zn;teBGUW!b-ZRIGeYK(1NOv(Z*p@~-s9nTHHn8^+3Y-uyPdL~zX65x%&J!Bw8By+i
zC+atJX|?p|wMqM3R>(6=q3RP;wqoG063CJ$81*;wSLF{Tlt4?Y9cC^%HB?yQfnzL8ji&dummgy?JAS=9YN2@F4+!5)J9g
zsX^|*{Uk@oM|&jI%Hb%zi=5B5dq3+RYwC8VrS(@&?@IH}B=S_&^x&+UpOA(o3%Czz
zv!MMrImpHgUkk~M@>l+ZT-le*Mg!@iI-QQ;?)tG)r(dz-om$fgq=!?5Vn~;Uos@vs
zCkJi|kTOR-dJNt`6ry86jP_>}?)^{eN!DT3G!Lc!08ZYLv6Io(ZX}j0$RBRxvb)k8
zs8-xmZO+g$xb^2`$K>;{025+n!d8JWTiw^+aLar}+kLnoK@-ujR=keuv|y^l$6=Ey
z7!agj0fNBdm78S$0D%;;^bh%*-&yaEsg=!BF0rdq(vf#nsYzKOjNlLHYy`ThVVC?l
z@#l{{u;jm}nRI2U?h&H@01zKgwC$1K)Gc>dja_Dn)k6YhNCjDb)7G`^c`#Jc2dzHQlZdj+#2!ge$0Uy(Zo;{=p>Ahzo!w4l
zss8{JA4;pu9X%UbzuX07WOu2g+E}EM3>g*PH7?mn7hnM75uEk0{4B+QS`#?Rm-mRO
ze-V^5Smm2T`fuGg>P0+ITqx)(2@7oq?230xSnLs~cV=OQAg<&;4x>)Ob9o>bkGOui
z{d3W5>DB1{7VKRgNuEgJr5>TIyps}8m1YqIjZQ&3Lls5KGlk2GH;Ie>>Zpz7l6sU4
zNBFM#sY=+>sr@PKDC3Sug`C>C95mzfQhni+dU*7kd1A^!?rdQ){CpHGVW3|ug`zhq
zo!R_#caLdLNiO{vi(P4BGOU#By>TmSiG*az2vdoXwG@x()VqE?II3hHRaMv_UmO#kK01zH35V@Vgn3HFu66s{^({T2
zrs4|E81$v~N+t{kDp-_no_{`nJx9nqmybeM9=Z`NO*>4MFSLVBzX|kI3pH^AJ~5y6
zB}gQ5?Zz|btOykS1#Erg0czx?1yiw`O_k@6H<~K47})T!+Q6#!Oj>ohG}t?P
zT&$qWn>J-kETH&MUAK=U{s9~gqP}Ax92ZC2=fEo1!7cYiM~WrZG+kDusopr>(xoX5
zUx41Ks;>h%Cjb-p9aAoPA6l3OI~pQSJ-uF9V@lF>YV%7ZUXsM`hrs^;6k&4G5D%Pt
zPdxPblaL4v547xbHbqqB+V67xEbaY5dAsw!D)xul1&SCugIup!)T#i?$Yl=9eZ-kL
zJBh%~dWJs{7!#89?&IZlZ>2h%_UG{x{44hxO`}KrRQ)xglQ>fy4)di}Nf?9eNfC)<
z8Nu9kleYx$dX_}O+HHR$0XY=tPeyjOp`~lOO=x>xy7cbzh9Ih~Li872LAN7xk+EIJ
z^YQ2L*RSK_L5M20=3Jx}`lEV%vkViQ2cvWa&o5ctln#U$E}Pi3FJa{{T{q
z5y1Zd=dN4|7Per^la}hFA|!yq$hNGxae*u39P@%nG&+IVqgvE8eQJ>*gqal_Wq}#wZ6}^bkIzwc)`wBk
zmCVUvc&fa$^y#OQU;-m5sXX}Hc<8q=itBQ$pqd6n4cl|LgvVVYEAC$;AB>#;0G2WF
z(8rS?05*)xlN}iY&!#I8Wi1JhB&Z}e?m8zZ_Z#Jyt_tzTb!B+tGA{f|PSLvnjFIQY
zFnAgH9Q0oqi+~@zJkqG|BP@wDhb4Xg0I2iE)1TX)+n~rjO7RD{Y$B__uReCERGZU-
z2Ow@^yMh4ZpY8L|Mf=9vjT1U&BSTi!<9g9H(`8T^Bh*C2mVh@e7ay=5hh+5}u;Hhrf9{YS^`_#ES&l%<=f
z15u;VlC=#o@`t-#RGrvi7u-o=Irgv&NhcpZM;!*losm+3{T`%?3s-xGVu#K_ccE}q7hL0aA2Z%nTd!CB0tGK1~NCoGYZoM#KkJPh@X
zvW03UGLm``cBS4LZ8)3kD8F&YNQyY?M&>&Jjy5|F)5pgo{{XL1&zON~EXyV`N7QP0
z`?;U$uX0{}b3)Tu}t+PSV2{ia2mn_&yDGT3b&qm^GA)Dk1nRcmlCfL
z8eG%0_PL;@uiYKg(Nbl3po>e)QXpT|y$d1|f2ggvFx&%hdeQP3jhoa#lrQb-G|{^`
z^%DO8f3No@whMZZHlausjzYwk{YYB{R3O^SP6<*=6OOFDDuGpUO^}AME-BdBHDs0t
z?aeu)Vz!?UNXA3Q8&8(bbA=v#*)7$^Ai7aGbDoT}CW}_Jgr21>Rq(Cvf|7?AIlv_0
z3Gxu3+m3O^PEnNg5smg_GQ%pvXZgm1r_-*JNOAY8ERt?f%Pgs}cMZghfsS%W9XSq1
z)FR|{AGF5uE%l{5$)Lw%k-dK9m@9%Z$&k5GkUzq?#{h$pqk5df*@I0NcI*}#L+y&l
zp>{_hm8C?$4gFOl5;Dhb0LB0~1oT@DPf~4>N8TiRpHiK6OG~u0DYNczJKB|%7a#`o
z)d0v)08@&e@yQW*7p*weBYoel$YX!n(Esw61!kf#Z+JifAEGswxlx8(_k78hvxU&`7FdKH{QHwd>
zD~eqYLZ#P8)w|ln(Y~psOQ+I~M?llUBbITvZD6QmSjuG-rzOKN_LT(R7taUk>Lx;!
zL%8C%dQgg5>or|$#=yHy+EauFKEb&%MxY`b9X(_3@yW6vTAz9LR*7a9^OaQ!(Q1-uRq2lH?i;r(dJu}5W5R5*l^9Q4
zf_=;s9ix!mcD5OQ5u7zRsH4kM(45m){yc%U>Da$VyNkVNh5b+e0NG^DnZab)5%&J3
z41J7<&JNA4Zr&3w(~-3Y>IolArGNZee-r)BEvsFR^ooXo+X5t-J36m!*;X)>JHu|U
zz%nd**+TD8ums;AMX301js@~EM=N(3FVvsoTe_-RdU0qvHC?EItZDGZkvLXn5}p_`
zt3t(aKdS2pVp2&D9C#OSu2<>>Ezjcr09U^iskOaM2$^Fnj#l>Kwm=-q&*NUS#XFquEvGe?&GoIpE)
z>@z%Qssfeia(Ny%d?m4b!f05NYnq4G9@x9vANlj^&cE8y+lt+~kXf+Pr=wX(dE~PM
zZ;2x^RZyi=$kR3kz1F5pgE=K!-jfy6Py!l=fkR=suTXStPyR{mZV^ssQxovd?fiW?+o_
zlhEH9Mp_
zjjY235d?qE8Gvnw#z$oY$LVBpuIkRNBSF+h#JM*XGOOVlCQ$n;>h`1D8ns^C?@9Z6
zU(+Uer@WHZg<^JC$u7(4M(&Cum)_OM2vvf!*itoQ@(>Q|`pkIoa&8^}07=&10GP*UZVIUvxfUW)GR5`)a#@&0?a=st#giechs*d;L&S=n
ze)^5{%SP_6s*!f5Zs`5J+d6A$U;h9w)E`QcdisTlE7)lOK%y|}vak{>stFOYrGZuU
z6U+#j?s?7P{ri=^`2Gtl&)V9Squ71ZCCyqJLU|yT7M2(sP3wPI@(4bhxgaSY(TNTY
zPgcR_*|KV87zq8saUBD{zPEo5v+H_|y??x`L0VT5RI8;%G^U{{6$dukR$DVcU$gS{qs>{TX5;!w|`2WoS{sO
z5owu^rV3%t=^oN>9Ch;EUw}gu0P+6--*eT$<|>P6=pA>x^BKT18@6u*$Vwc5r*TXkJMsz0e_2w?Rs^C~`n5w{DX8gPtMx0nmR9s(
zGig99TVx8#3lvpg!yMs`4F~B7)7SKho?s_wsmE;p02*J!Z+%qICiUp7w0h)69h{NE-_z)?vc?owf%Y2PY*Iz7f7q0a1UP7*)$z?s(#e
z?+7&tzMii;#>JTWCH^wo^3d9eb
zWclj0dVx;0D_ZAx>7BTZCaBXug!qmP?0Fs(dzk+KV0y*md50M4Suiu`MAbX8jr#`f
zo2_edR$-N@wX$Rc`4N%7AdlN0zg~KIVb35<#yA;8A5slhxHWrHOQ%w8GCQC&u}5ZO
z45J^Un3cCO`R60?jygHKmv#$aAIz$#t;qDPQ&+yz!*jRVg9lXmyHi6NStsT-Mr;lT
z$UKaE^i}ec^^fave=HWEyOw2^vVfN2s`D?mF-y5|@q&5k%0Lyb{r;01ozH?4kpBQi
zyug(sbC6q(0XRHh6O*4f>(a?}sVru?lf)yH2n6h3)D|&9f?<%iR)ozi*qgdvm!v-rLI@B9@RUnP%YnCyy}3QLe-+v<-^
zvyL(w$^d;cf)akAMsckp>5tZq_tblfxc>l%4*hEjsZyUyrP$q_t=!W4dqbxvWpg!{
z9WCls{{X1ju_Li#oQNcN-&qV_NGQQjIKW>(YU
zp`{ISqvVt}+aEXU|Yc2L=5v9c_qVZmX)
zt!ajri8nK5bz@*u?X}{)9P>#&ulJVb-0fXsuj!N{2tp$aFN~7gh&*S(bQPdtG1j9B
zcU;RKr>|DMP%{!`GDkFRfZG;TBy9&HXi}hIMu3mB2f2axhC{867o}G-1X*@R3@-m&l(n%EH05;%oNy7}|I3K@9g_}Et
zpY0@?wGXke9!2Ylc06gsxHLq~|
z!&{JvF9&-PX_T5d*Kew}$GNu>KrI*;^uZ!ReBcquB88v!_axzKeM;V`-~F){sBZT+
zZPs)usLQHXl)2r2OlVo7!791roxuBf9X2l;G0<3pIv$MT9qaX<==3)#Q`YbD_FBPKTW3ZmvUEEv0IZb9Vn(|`VdE1N6+fB1v`qL{sp?I^l;@gwxVvoBcD
zyE?zr9_7?EIis?bbscG%Re4rcSr_gLOX!t~L+w@y0_1*>G~xdMOg=N{e{&8`ij0E0
zAKEAGPvWOvr*_SK1L@w0PQ+>J%4}*9&LNFsUD6nn(`P6jV*)_TfmE@{75@OGBDhj+
zXZ>RVInJm2LVpw$qSke&J9F*y=qogFjXhxz!eb-Vk*2Zik|@blZIdUza~5=u9WRlG
z7mfAR{{UHR4sz*xg(jW#`@LwkvE4o6saer(*jA#CsU%4RERD8BDoQGVRn|?LnHl8^
z?NsYQ_>YZ2(0_j7whX+unO?Wsn!jXN?j0stw)I+a2!hXMS%3AAO&_WyHW$=*6vZ1Z
z6iQ=>^2Hukajy@=OYLzvD~oU
zvvR(xM_7-x%nfZ7k*Q9}86-`zGb=%}F(SwMVO2e1&aZTW?p%2E^hD^-{Av9k`i-Yq
z)4ra4Z>yzuDo%awFD^wNMIBK!6GML(&r_{Ld!5nguS$#Ppe8%`5r;o|D
zs7?MPGujK~QsaN>r_;Xi?RtH$-sQ3p|UqIMMO2Lh%4lfwhs-
zZ(DG9IZ%>C34^tUj%gJ4b0RXVCg@D+v4N>a;tx1}jpisWfS6%#uYAl1XIr@wG#|
zfN;L$RU2{UMGeJPD*30>^EQ;C}E*4|!bAMLc{o$35K_FvU4Gg0j8{mDlD
zvt<_T54Y}Fp=e1`kw~&j8BLC|`P$NIq
zhB8DbMpq}VFU;|Li~{=~gFPPz;mGz48Pq!m_=xS@TFi9fo(W)b$q_l391s)wL^zZw
z0W-9p)h2&f>&EduEycc}zy0++FEhlU$eW&t(>|`~S{0phxNE~_TGb>(Nu{oWcf08`
zK?1%u&e5w+7wHG}JK=zGKBva`k1v1TcwR?_pz|}lc2!>F);^#waq1G@r=;kqCY`3&
zlF1eA&S6xV$yo8~uJG15yi5!bs(p{&
zI)xVE%C1%{-W{@W7CKVB@cl
z@_fW5r_%VoCixL_&+`3Py{+o{-)>vh&atLfB55h=WhD^UE4vtU&*?9g45Tn4CkL+`
z{MS}2u1DhTZEp!5OrPRdHIL#eRqdYAsdu(!)-6F4QA4Nc`d!Gy_5bXCir$WJ!*Ll$98Jz-kSES-(BD7!An+x1#Ln`l6bdD!w&2U
z!~=-QVV2w+0*r{f+{{Tzt?vCqRXk)i#B^d`05_j{|fwP$@Vwly;PWvHvwqi!(*
z*C~%>b0V%iHX|i@;XqYEmBsN)*F=x+2>4S_AMqo{Wz^^z(x%q~Z6
zG?ea8$fbZdbZG=-myO%w1KHar
zCWuPs0w{!Pa&QzZXrSdt3f@MH9~ZRLT)rR#-lyna@LAja%k(R@Z};Uo5VpCgkxuOM
zW?7-C4n#%K+YE3ErgsDFKcEh~!ps{Xb~DHLh88VGezf*C)z|L37AWc!A)$IKEGd#!
zXs`*CU|}*z!sjFb*SUk@!=KB24`2TPW)|^Uxed;=y6S&MHHGH|#8Jr~{{TgZ=XW_c
z$C00(!Ru@JoK029DieB!&n)vg`Yr*^Ax6qbv5bI(nr^hND`B
zuU*RpO9-x^2*Ch=c>MnWpFLywOu}5`;x#CgzMR+XPh57ki`zN{Cn86VtyrP;TNopH
z#gYF2-MF4lf!0qW;qDEk2;(xJrMs4B`*s>jPTgl(zTm}5*0oNxCKuGC71z%a6FQ7x2P6?>IXKr
ztiwD}CWEDKVLOcD_bv#^FJ+b4p#K-a2$~Rk8IeQr4ks
z4)KfnWoa5<4-tvVk+m`iU;}ahCmf7^8z|x!!g49?VA1w(dtQI=BV%X#`V52z5#!h;UOb**7Q5|aPLk^9uH1f{}eNS$@
zrP)?(G6@p15JH&PFDKjy2aE%QlYutz^`dSuWxFc{k4^8%Dn@Jfj+bOb8jn&|0
z1zUmu+l(3hjfTsZ-#LAVOsn+;l2~f@L9YP}OqG!`M&d|jE1muT#{(n#^tpU|hqg0@
zOnuasfVdzsZUbreV2!;M!=9+Lpg4%CiMxa9AAa`lt8-Aj
zpnZ&XB_5d5w;Km(+vzs$2r2^~a03U%I@j?301wH}lw(%Zo_yFbEwG$+f2_Yzbrq1O
za9W9w<^_b;ioiG^TXeH7M}Aen8&URfoWtd*KbrH(@zM5M${R23dW5I|gM@ldE)4&!C;
znZ0T}`gio7_;=Fj>$=BkwOwmbn3(L|TL#67q~bisAON;T?ZB1~_cv|oI%QoaPOH9muM{@YJ5e_5q0D@Z;XPN{K?esp1fIO)_<_iO
z-4oEu@X?J!WA1ow_=@hosQt_I=e0C`-tNj81ln(N>6a(c+F!SeR@0JD@#*Ge*(^Xu
zR;#kY%&e=%;CA$<3ued@7`u;w*B{lFQknIKyuPMd?Ruh(t9_wbI@^>-2c;ybpVGt0
znb`vdQo8|M05aWu=fnOFiwX-iXMyH?NnQ2S{J8Ia&s|oTzh25y*W|n*f=P^M^4!dh
z%CER?%x@zSa-#)($308T$e7NcT&B0w^7nDJqp5%9EfNY7#aXRIeuQeTHGRz@vn&a?
zG{F``SA-blXMTNBvJSjg_>m}h&qp7QKXBCauBF-i!`sy#y1Sb~)G2l*i`$iN%#z8y
zZjlqu6}2j=l4a~fsVFK-N;4Fu%PRSY;>pNa+)ho*BOd3KbS~%C`?gb}y^HDGeZz3w
z)aQ;+e%g#IP)5we7mxb#%2qag>MO1oMltIr#8$qeHa->Vr|8e{?c80-r&Q8qw;Wm@
zZ)zx&C~5Osu_W51c;F2!n(C)(1Ch2!z>Xmr?m67Y(88wY6=VwEt*Y%1qE8gtOUu69s?GEp)sN9>sQ@OO83X(x2(tX5Qg+1zH^{Y1j07-S<5oAK}`Zlj-7j9+%vDoZ8&?C{q^kP*aKu@H1d2^%rQ}ovIaz
zk7P^lefvk9lP%=YoAG$WspO+mSw5`y>$SZPS2}-j_XHZh^3fz-oobB$0Afn%HJ01y
zylh(t#F5}~Jx5mIN_Y90aBu|uI+cI?iKX^n`yDGyift77LTD&3tktB95~iw+vPO;x
zLJK}oR2&W)!SeCvp+jT2>v*e;mwKHCQPt%7m89D3XmrW1(`n&*S*Sns3LG-omB=6g
z>Fr?32_b@#PCC(t%gV--L7T=!Ug35v(E4lIGexO)4!PbPu{@|2bEP8&u86N7^p)M_
zFSbV;e?qGePB6MWjL(l9j>p<&!hroPIKO9iC-B7_y8fOtPVdwtouaL+C7nU6eUmaa
z$z^C_2vPE=3rInCIRKN^bMah7fh!q2Oo-O4ITRnm@9?K{U9+wB=h2hfdU2L(Cf9B2
zdX-4;NUI@lQYUwX*lo#FRcwL*DYSKs@nOt)alg0U`p-o?yvy5vdFI-mLwldKyK7VR
zKCX86d}(oCHX>PTx+QAm-Z*2i8_HcRH>ixL6JU18-IdRklf?ckZ9-@sNFs{QbA1W=
z?+1L)QD0VS`khH;f=PW{$d&-uWH9JgA&ALWoh$aW%melP;W^+uJU
z>2&l6H0f=<9R&%qZL8TZSk){$KKQ^~n33K>%?w`M=o172$Ljoioc6mj!F37yr?j13toe`Sce`h_YNo|0^KlPx(`&mQfwxT&@45#aH
z;cfwlo^D(g)q0ga(faSHeJbxtn)=)mQj+4&c8b}`U8r&QD2vBi`kD0>w{7ZD$sAfPi()fUuvaLMM(Ge9l9d?G
zAQnvI=dZ{zc=)n?V}7cA;`zDgC^ReWD;56$@bdm0X;_Zbx{q{eQL;R9Mf}FZCdZt!
z2AI1Q+;h(xusT!Vc&D*Z+v0veTIK%$zuI@2pTZy08$GcuiCP_M?K@DcY%%usVgSZ>
zj43gaVqLBRocTE$^vl1hP_KJZ`kv;fi6ME`
zzLHGHJ?;z<$0V6x4mWKCE;lZBSv*WRM;2f1L{HUbU8$UM&;A%+O4C=;-%jpdsLmz!aByiIvNcswLyHG*6~|?HwNC
z(P7f9T!~t@-9=$Zi371+uL(tE1e1fDd>;p`STkYG^$-b}Gb7_XM^n&RewnM+NG6`X
znLVHkQp*KraLb=*EW}BS5HXx&{>MJ1HgIVF0L00aLHx^O-IBRj^$6N5w%FIx%Vhrm
zgyW3y@&E&^pRFKnPxQj57?wNLnrc@x?MnXu+SoEj40TGh7LIw5MhIfNNb)cN`8{U+
zRWKl#$HyR1jCFfG!4-I)N3)_w30BO}4b?T$xDkILmi#2GP>)QM!0sPrvW4^hh%ZRLGVe@GY#
zamRz?@;c|tLw+V>-lT>~b79OviN-xPkwO0a93St|e2B*Q{{Y01z#@v(BCB0VSB)2c
zy~iv8&$q`J1mJUmI?@$X(SdVcxKc7INequ1a{7A+RVl_e7|HlPGI{>oAYpVMzzEcy
zCPhYY^CWSc=1hYNDucDM08^a)e{=FENE$NjwWfrN5LcCB4;kEmqsK3-xX+Rl5u6k5
z#~+e9C+X{Q3`qb~Y)Mu>b}mvF6v1m6$tSyz@Z45At$C9PoGd0IN)=)
z1D<^3Mc34F!0Hw1M*)>Ul*Y`gyUy*nE&3FLftDB?A8!N3cw;@q%ES@&l94QT#I|L*
zX&AF4Mocgu<2hhJCkG40KWuQ0jo6ZMDq=;Nb%j842gAZ3JjQgC9_tq0%yy|wJ
z{{W}|08p${6QpgcN2E^l>xm9gnnFT_8w#AMU_mDUWMCd!D{$leW}$&dCignckQ^CQ
zT-;QGvdTu_#0-Vr3CRF?1K@t!`D_`04kuIdtN6wGk84NQD${tRzO-o9^rAo0c9Lci
zj0`ysLhN96_4JgqHipDgkQviE
zLl7NQ2^-X6FgYwfojk=ojZCBA#0yhB8}wKBqxwyy>npG7y55Q|Tch^2V>6|EA4p6#
zyT*v~fZ5t#d0;^#07sGWJS&w?2BZ0x!V0kFCvwyO0ElkfpGlGi?|lyBGDyo((q04Z
zA<6W}KC3xC)+CbLmKgTr{0E)jc_V7KUupDS7EBdH{{VP~-`83VLfkQ5j>OsHWNPbt*%{7*V1jBQ`Y%VPLuJN?4{0GOPsy1$5y>6S|8N$sxU
zeL%Tbktt0ap^FddEZa*)0chKE6+((ZBgoC;;rNDKt|qU&fAiGWmoJZuQ)CzXcJ5xibpHTM`T1++(l6x9E%AR8
z4oG<)#7C_CeeLZ7O?%o*^7qcU94$GwEOJ7LyLmDte|M+>ft0$23IJvxGh^@m3E<%H
zlqfup-h8LUy!_egj)bFB`n5N7SeCDKUzPzcA)u};&m2k}nn_@Vvn)-7a>FDnZM4e3
zqigg0wOo9Z?s@Bt*}yxMN$=${*U0lsPdd+3T#gU;=U4i>GLl~
z^Dj_M^uPFP(zH!FjrhBt>V5N5Ow{|1)KN-C%}
zJX~hGpCtbP?yx7R&$K_P-OGPU?+t!V$-Cd(m7xef^CS||Ll%o==5&YEyzscFBzWw&
zPB#^0K`7nnGkLg>2FY8LUm2Z>uc_N~kE}mIyTZ1UYhCU;(W_UI{-IJ?>&GaOA&O^c
ze&b-2Vpvrsc2dGU(eDZaYi!E;BT`6=lh~g~e~6^)Z{@weH+O3{`weZ$I_WyZ(X6o`
z^p)FBkU=b{!6%A96yeD})I%bl7tHuFIp(INv3Y!2ns@C>y?buscDL0E-N~c&Jes@>
zet9leu`MlPsLD+xS>uZq9tV)b(d~?73jywppX2f?QCDpC
zpVZA;S=BA~mv76VQ?EFsPKCKl>wTR=dbx#)q$H^Wa|0$A{9w(-Yn2($3KQ2lCW~K7
z&~&ZZMfwtmp2U;G4#=mGR|?V;R7T2-5j3YEu?laEu)ny=BFQtj&6=V|twTE3lHpV*xq
zCUm1MNTzMC84jRMzOlrPSy->>1-EsHa67jp+_P~VwFyzWo&DMtv>hw9ChbXW)4w%K
z6(rHV*1T%%8Hlvo-4Mnk1jqvsA_RMKUyJg}-65YKi-WtO_P^wvgHpKCEyI3(W>`YqVont?ym
z1fHiz?;rd?;nyhlB=tL%uVzVRs1>ws-_qVm?QZl$ZV8#8{=knY2_QYGcz+~
z$Si%N{SGW4YwRbM0ApmzE#IqJeT$IVmPfrAc?^Gik0Fi0$EUtbAf0_pTy(%Vq2+n-BxsWn+rTQOK^S?jd2XZo^$%+6Ts
zT&l4E6&MP7*1)OyRv=WZoyuz3`k_x+)L&5geOI{Xyg*h>ItozCs*&KtS=}XXd%nj1
z01}A+qHE%v+A^F(2CQN-}1{3ZXH5gvW1A$XjVHU`;;hb
zraWMtlP{T#Tv#Lf$H#yXQt4j28s*JS{<(JMiFWD;W7F;JniZ4NB^&J=F^P!A;gt=r
z4Y!qrRgL{LONWT5~m3)!}&<7PDcItHcgB4Xe3_;W9jYbrQ@44aX{n
zaw|Rm0NS-@lD4I%YJYZzYZQ;y3_b{N?T$V=(w;y9nRhe0@1Y*no}}|5!Kt;WrDk}Q
zH;43)V}fu98Q-`b2Gft8x8%qhxqxE^Y=){l6@{8nvtc29{>h&i+Nzvn<96bB86HkL
z;lyb7C^4I*Gk>_OO?QloOu%}(m59Mmxy~?2-I0(mJ~NK1$!~F#6a1ktfZ`ZZ6}K5X
z99JL~J4)|dE^=6&ehxl5ZF-7w4pla+)vD#GR5nI;Gl~6KCFh4E@^DIita%6LsSK>7
zgVb}rG`LdlWsH7+`5}2Eh94t$jCDfrs@Cp747OoO
z3PBFs7KF
z{0KXji)~h+Y*cyTW=+Oe%yn222qV~bj2w~f{Qm%cn}Wp(0l1!9vi+x`+P#RE5)~zk
zOkAIny}3BaB>entJwcbzj62Y|`KSD1_HF*~?ta;WLAOSX7c^^zNvzE`sWzbmk(jo}
zf{nTfEFJ84F(QcB8Ob<4ATU4THctldRq1O?!MZ)q(`L*noHkzn5xeb
zqsYc|4nT>$FH`-LvTl(h1JfXTtMQa?b1LGc-*c2}-Kv^>JJ$88w|i4hNMb4c*1Jb7
zYU~NGC79dPD52fdQHyMi6Qq(HWP_8=#t(8~nEfMWd9P8SdqvhBTF_b0ELH}ju-*(F
z0-;=2nZQ7&KKNmW1Y`ooCA!y(&J+MK*AFpFHxH@YUF|DPS^KhC5_rL9zj=hn$jp67
zb16tI6@{bwMd8%sg2#u=<^>eTxnYDaVs#rG<*d=TuutFJ#Sk>(*&184vDYq+<|TOM
zefQntDzqd9HNbBxp0PKQ0MVHSE-%O|b0cY=j`7s
zOg1*fx>BcL=WZ7RtK##9fj2ZhApNZv`Z_w54R=o$^pUD}as$v2s=sL`CeyZ(iL4PBiK
zdT(-Ty}R8Mp#FqaG@3Q+ED)AOVw27U5EH!RhD(P8DZtOr{5$C
z)ONzjtk*es?ObBUDJOq%laC7PrlBzPOG!HA5
zoN7h%g&WBo2cy9T`;?pHP8mQ)*iM>lUwCntH4jS>yYbhB;Nwu(ZWiRgOp96jIExG!iT^
zvob$ZY^*)NxcHkP}IslJ-_bzLX_04jFpd>M2q+U@xCm|~R!Pihc0&*^P)MmI?xshy|N
zM_iU8uM6WJ8pn?vF79aXyhnF0Q}z$(U1+-lwbQjUuc$P1D5@A?(zOULMLU|&soe`B
zAy5g6)MJvxLHB@mpU=u;QdB(yy>~suXXq`@PH5jxG@0sX6e?*ct>_Rt-JwfR^{+#v
z*~r{#+4T&Sf}N~@291>M;lECtULx_(vHm}w^PKrve{|_OXJY7e=tgu?t#WJp9urP{7I5~{51A^ay`f>BsPa?E?
zk_-oM*|fmzx1gHSt^J~NH5lpR;JG+jo}vnHvj)|8+Q
zvAk@hzd4c7zJ5pfo~&}DYd)%Vnwo1#>AbaLX$mj(3Wab35~Ko|3^t7CIU~jfI`$I{
z^e2$H2@+MIIkNIwo~@>AB#|^pgu@2p;QN5v?mxbAafUdRM^RXiYB5QpS;&^wY`c21
z?P7_%f;TAf@(ETW=N#jp0QVgju;L~jtE6uwg_7uEcx}?gyq{ptAL2OA0|Oim5Q;Z0
zmtRyKO(j-ZZyf&sXB;PJiY8bC?n1aJ85#Dm<0KLn@H#FzcRCPYp1L%0nvulpsO3y?
zsoj7Tqabc92Ltst9Qh!wx}0*>r1fbUCF7DvwHQi)A@->#r9jF3Do30JzyrYKw-67g
z%UYwrEZGr3
z>3}jB1`h8yJow`r=dJ>*nZ;LC8AS{>OsNB;=wd#Z2V{xPd0aB!dx+F
zap!@~a6!QU^)+aLKC6rEzPIa^NDU<+gUG<(OV0){34CN>bI8ZIbA)_<#VbT^R35&p
zkouP9uWDvM2`Fi#hi(SY#~I3=2P5(DW0AQXnZ*A9+bKHa;;Y)PHBVN=l1B{ARR~dm
zlb>$(ZhuMOXMxn(x)H;khNqBzvUaC)cP%%Pg-gAysMgKKE?3sC>ArWf;6$j~MtCfN
z*C*Y_ljUMLH#hj+Qi28Ucy|8)e|KMNQMx423M8CkMDHfbae
z=*`CH-H)-9pD!Y^AH4Jmb<$B-n!SriYiq54cZR(V(q-_e$%p$L()+@Q)
zqh(c|WA4~@W(d5l()L$=h6?(#^{yp!&7@-hTD19KDOX23EK!*j7a_9DF->PD@k
zTkOqKu)8O;^cxixC}*`&lmN@N7ml2@gfWM#>s+*qk<98!mjJ%(YRLXMwQvo}W5V_>
z%FHH|_F(b|H$go@x-O6Z7R4J7Lvh_UWqBg>yqsUyVlJUKIa-&Hn4kEr`pc>7vPo}P?XKN;T1h9C4IU_pgg@72a;#XwhyyZi
zW_L~Cd%E<0OXH&01NzA2S1M@i%U5+-?tiGuqUfF9s%jLk>Y~+()#hl!Y4HzB?q!lj
z!bcQk)-B2EyGqO((h3h=dH9GzN{}jNRcZv8$nX6L`kSR|cWddropQ`k+bU&|Jy_+P
zUEP>T8y1qH$dJIcBg`2~Cm2^fP5%JYIP^3Ll|rQbJC>f=`bkIV{-3OOzkh4G7NOkG
z>HD<$wk@JT9%D$t*m#E0GZL_9S$Q%mD`02k{ww~WY@b4#6Vk=tyN*Oq6T+XwO#c8;
zyPLH~YIc8b>M_G2%dzzt#4@}poAVMA{Xn6&Z3F^8NI5-c@GysdStqrR8e>fz&r|;Z
z1U=~wX?+yX{+oSXr=-u_nhaKSx}#FB_YF3iERp%96LIEG9u5Y_bCw5yH3wC$B@!
z$^o&VG2z3^l4quSzqNZ0>Ry2!hflVTh%IN
zkXHeLbJ%_=acH)rTf{f?&noNx0K*jee#rZ7jSV+K$gwn5Zb_)tGQ}&kd07lh(^^w6
zqSaMojbU}2&Nrfacz!gsN_8_nJ_Ow0$LP;?=rLQ9SM2`l(d(a;Oh8KOznKG+&D%W%d`<)w6(Jg04o))nLT4*giG~5Q7
zT3)mfH1gG$yZ!94?T#{W;0~_7Q2sGF4URIitkJWmyVsvl(=Olsa2Bh2zkdp9ZijE6
zjV%8F%vR!&x^`blBpea$2AKSj%E{EOH3~SHSM}o2^w{NU@YmDJP3*E<2lU+f^rn4~
zeg30a)HGo~qaNUZoF25No5`qkBb&ss8B7{oTIKsUE8Nvq#AxtMcFSBdn5RGg0EwyE
z+dTfYk+oFRjBQ+U7ml0tC)udw2NDFkS6d#k(f&^$)!N+98go)xwhp2|dFt$1Oe|WR
z2$mQ_dT?iRG4a&@09HVRR5e{l4@X@taA4X)K6~hlV))HA8h+L9==+
zYZjqojm)YL0qrJh6b*%YxF5)f#JMJ;<>GfSpQ>{LcC6Qif4Dd@#Ae=yoV-A8Tk-cb9WrV9B-v_N_
z#{l#Pm&f**F^Yw*<-EZqpLj5l$GPSVx&Q!%NeMzK1wM+_dVY2%JLP*{MV3Q)l|$*cN;M#88Bm{|@S
z{{Rz36ig!lEyPbq_D9ez=AKm1JB@U$16`UM5>HY0w=aUf_(t9s=(T&m)TP3`n-DQ-)XwFxTLG6)>0V51W4{I+w@w^
z_>{L%VWv%ekM+a<=+Tbixc4G&yk4(XZ%f=kPUS%IwxuNS76w!OJ38p_;f8}QAg9HjbpR?
zw)%!=G2C^MW%SF+swB#+as^`JEz&X!e_0wm%O-O66wg!Wt9Q1vO&u0a=)
zOf)tRq54&3f(2B9V3sqzgoa?A-*>qjF#_@3e}C~OT#wb9%(djn+PabX(6sH}@DYhnkUHb~{;J2T_Q
zJp2ad`woI;2T|J?8!}66BSPj0rj^3)+a#cQ@TWMy&mKqoI<1D8*-yATSEX_~%cxS1
zP>g$mdS$SG0fsVj@_ENxxJOlS{&DnY$)&=ZefHuJv2CG?h4|a<&IdmpJ`bL__}A1?
za;Vfk~oQu{+5;pK%~vip;yos8>1b|GOgVlqk2
z2^q;gIQ_cJeiEaj6ZLC43)zh-MP@5?tjy%&{{T`6;NatN$iO6@!N)y%86Q>-=2Qmi
zPPQvVs%D-VF^Gz@$u1RuDhD|LoackbIr+z3izq$9bZ$tt1N+`M=7`3`e@I~MY-byC
ze13U9+dUJI4W8u_7y`tQ+_`26E4sx3dz6Nq93yeYHjueEI5-0!^Y9ljZsQC4G!w0!7OlV75uHy0ovlh>+S%!E8i}e%*4;f(VFCmCK
zKn&Zqfx!cgayeBwjc#)x)kTL~y8=n}=8D7#NMCb1$?hKY2Oy|Y?YJHR2ORO2CQ~#b
zlNLi}M2f}vsmpeEAz+FzwpcJp!6$>CYZuQ0A#sd!pE!|Ht4pEoJEUQ-)zV0zvn%X)
zy;*-ll-fwZ7{)l-OLOBHBAD1Lo<-v!Z68ae1ohUvZ5lGN7O<)c?B{~{zyR$f$x)s&
zj;g6*ghX9gGsr(u{{V>J;mfh0*Xh{qtt-AehKA#Ewuq71ma!S$U5u)VGRwG)*aUHs
zGpKwh@w2I%xEr6IA6CDK^RH>|U+IU^KGBMsiR-nC^UHMFkVP{H@f##&U9l+$;O_;7
zNXJloK0Y@3!6s073MWF{ofegRn@uaUfJ#
zGm^le!jZN7+(^9ffJ7;gnEhtQ^PZ0SN%S|pJF~UNbN96ivr~DiQWtN{y(ULoym^?h
zFod-tGh=eJZU?2yADHw0B0gpEshc9OZ=io}r&I1f;CHbv%Try_d&2(!ZqGa*ww$yX
ztHR1d^2G04Ov{u-WML9U!IyNa<>vnYQ57|JJue>yQY-H|M`8Z}4ZlfyCDUBhYQ?H+
zYRI*l?iREV1&N`JquM~3y=-amS2Q1+e>|3!t74;O;kg9{lB`S*r#8tXbi)}V!@{HjA@?df8RW%_&+?svW11J?NT=&EsjdG2
z2tBVWsI%DGR8vawM=gKnfC=7N!@Q~#kRSSLt-NiPGw2l?caKcw^H}YKDdOT*zUM}*
z+6&v-GIj8^RMawkwu$Ut)v|>5j8y#pDBLC76rg
z;kzKuBmH}!+OS6)-SPDIR5Qs=CK|4nrTXaxNsK?=!jBUYiqU3LLoP8Iz8|c({O>&t
z4yLSM4Ds6N)bfpZe}<0Q*7XS`tZd$@O=)OdvcW3)Pne=;oQXFyjCyc4+}(F(aSIb?Q?Gz`~#R2&o0{s6SY{-&9>2ORw7BOElfm`t~(R5vPq1v@hvX
zjnoNNMfBTrtG7610rGi19{CMBSMrWV6jQ(hQavZ=eEq?%MP8n_t$j%D`n#jmZCa-l
zhof~>ob7>B!3b0V!Bl4p%Q0Zs_Iy7CD_$k3iTvWiuBUwMPvK9#HO*x;eJen1Qv8yM
zZObGXLlk3qS1c6*g(Msi%YM9uJ$eJde)ll9mB`fe-jVzz{WiCxxcZPZO-5?+y>X}M
zZj%aQ(hup_l_vn5j*Mf8m<{T_Lbp}wZMa(1ss1`^Z~zoY@O(Ik)M9_qcb+3loSxl=&x8;LL)
zD%2gly3Ld6lvo-_oXSC0K&rb~hTP=tIHAprS5U4SJx@7zSMbI3KkELSM#iP3*3q?D
zZOkEl&evqC2D|7=8WB8khlX5F1Y06x*d-t#CH9ev%SKQKaK0u7eSe9?JG=NV`b}2m
zo2~a(bN4QtuIe%mQVn}W(=}~RRGlCZpH4NcT6=8J