From 8ff221a5f127ae15858285a54d0b38b197689fc9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=BB=8E=E6=9C=A8=E6=9E=97?= <762129126@qq.com> Date: Fri, 7 Apr 2023 13:31:22 +0800 Subject: [PATCH 1/4] [ADD] add Resnet50_ID3915_for_Pytorch and resnext_ID3918_for_Pytorch --- .../Resnet50_for_PyTorch/.gitignore | 118 ++ .../.pre-commit-config.yaml | 54 + .../Resnet50_for_PyTorch/.readthedocs.yml | 9 + .../Resnet50_for_PyTorch/CITATION.cff | 9 + .../Resnet50_for_PyTorch/LICENSE | 203 +++ .../Resnet50_for_PyTorch/MANIFEST.in | 3 + .../Resnet50_for_PyTorch/README.md | 104 ++ .../Resnet50_for_PyTorch/README_zh-CN.md | 108 ++ .../configs/_base_/datasets/cifar100_bs16.py | 36 + .../configs/_base_/datasets/cifar10_bs16.py | 35 + .../_base_/datasets/imagenet21k_bs128.py | 43 + .../configs/_base_/datasets/imagenet_bs32.py | 40 + .../datasets/imagenet_bs32_pil_resize.py | 40 + .../configs/_base_/datasets/imagenet_bs64.py | 40 + .../_base_/datasets/imagenet_bs64_autoaug.py | 43 + .../datasets/imagenet_bs64_pil_resize.py | 40 + .../imagenet_bs64_pil_resize_autoaug.py | 45 + .../_base_/datasets/imagenet_bs64_swin_224.py | 71 + .../_base_/datasets/imagenet_bs64_swin_384.py | 43 + .../_base_/datasets/imagenet_bs64_t2t_224.py | 71 + .../_base_/datasets/pipelines/auto_aug.py | 96 ++ .../_base_/datasets/pipelines/rand_aug.py | 43 + .../configs/_base_/datasets/voc_bs16.py | 41 + .../configs/_base_/default_runtime.py | 16 + .../configs/_base_/models/mobilenet_v2_1x.py | 12 + .../models/mobilenet_v3_large_imagenet.py | 14 + .../_base_/models/mobilenet_v3_small_cifar.py | 13 + .../models/mobilenet_v3_small_imagenet.py | 14 + .../_base_/models/regnet/regnetx_1.6gf.py | 12 + .../_base_/models/regnet/regnetx_12gf.py | 12 + .../_base_/models/regnet/regnetx_3.2gf.py | 12 + .../_base_/models/regnet/regnetx_4.0gf.py | 12 + .../_base_/models/regnet/regnetx_400mf.py | 12 + .../_base_/models/regnet/regnetx_6.4gf.py | 12 + .../_base_/models/regnet/regnetx_8.0gf.py | 12 + .../_base_/models/regnet/regnetx_800mf.py | 12 + .../configs/_base_/models/repvgg-A0_in1k.py | 15 + .../_base_/models/repvgg-B3_lbs-mixup_in1k.py | 23 + .../_base_/models/res2net101-w26-s4.py | 18 + .../configs/_base_/models/res2net50-w14-s8.py | 18 + .../configs/_base_/models/res2net50-w26-s4.py | 18 + .../configs/_base_/models/res2net50-w26-s6.py | 18 + .../configs/_base_/models/res2net50-w26-s8.py | 18 + .../configs/_base_/models/res2net50-w48-s2.py | 18 + .../configs/_base_/models/resnest101.py | 24 + .../configs/_base_/models/resnest200.py | 24 + .../configs/_base_/models/resnest269.py | 24 + .../configs/_base_/models/resnest50.py | 23 + .../configs/_base_/models/resnet101.py | 17 + .../configs/_base_/models/resnet101_cifar.py | 16 + .../configs/_base_/models/resnet152.py | 17 + .../configs/_base_/models/resnet152_cifar.py | 16 + .../configs/_base_/models/resnet18.py | 17 + .../configs/_base_/models/resnet18_cifar.py | 16 + .../configs/_base_/models/resnet34.py | 17 + .../configs/_base_/models/resnet34_cifar.py | 16 + .../configs/_base_/models/resnet50.py | 17 + .../configs/_base_/models/resnet50_cifar.py | 16 + .../_base_/models/resnet50_cifar_cutmix.py | 18 + .../_base_/models/resnet50_cifar_mixup.py | 17 + .../configs/_base_/models/resnet50_cutmix.py | 18 + .../_base_/models/resnet50_label_smooth.py | 18 + .../configs/_base_/models/resnet50_mixup.py | 18 + .../configs/_base_/models/resnetv1d101.py | 17 + .../configs/_base_/models/resnetv1d152.py | 17 + .../configs/_base_/models/resnetv1d50.py | 17 + .../configs/_base_/models/resnext101_32x4d.py | 19 + .../configs/_base_/models/resnext101_32x8d.py | 19 + .../configs/_base_/models/resnext152_32x4d.py | 19 + .../configs/_base_/models/resnext50_32x4d.py | 19 + .../configs/_base_/models/seresnet101.py | 17 + .../configs/_base_/models/seresnet50.py | 17 + .../_base_/models/seresnext101_32x4d.py | 20 + .../_base_/models/seresnext50_32x4d.py | 20 + .../configs/_base_/models/shufflenet_v1_1x.py | 12 + .../configs/_base_/models/shufflenet_v2_1x.py | 12 + .../models/swin_transformer/base_224.py | 22 + .../models/swin_transformer/base_384.py | 16 + .../models/swin_transformer/large_224.py | 12 + .../models/swin_transformer/large_384.py | 16 + .../models/swin_transformer/small_224.py | 23 + .../models/swin_transformer/tiny_224.py | 22 + .../configs/_base_/models/t2t-vit-t-14.py | 41 + .../configs/_base_/models/t2t-vit-t-19.py | 41 + .../configs/_base_/models/t2t-vit-t-24.py | 41 + .../_base_/models/tnt_s_patch16_224.py | 29 + .../configs/_base_/models/vgg11.py | 10 + .../configs/_base_/models/vgg11bn.py | 11 + .../configs/_base_/models/vgg13.py | 10 + .../configs/_base_/models/vgg13bn.py | 11 + .../configs/_base_/models/vgg16.py | 10 + .../configs/_base_/models/vgg16bn.py | 11 + .../configs/_base_/models/vgg19.py | 10 + .../configs/_base_/models/vgg19bn.py | 11 + .../configs/_base_/models/vit-base-p16.py | 25 + .../configs/_base_/models/vit-base-p32.py | 24 + .../configs/_base_/models/vit-large-p16.py | 24 + .../configs/_base_/models/vit-large-p32.py | 24 + .../configs/_base_/schedules/cifar10_bs128.py | 6 + .../schedules/imagenet_bs1024_adamw_swin.py | 30 + .../_base_/schedules/imagenet_bs1024_coslr.py | 12 + .../imagenet_bs1024_linearlr_bn_nowd.py | 17 + .../_base_/schedules/imagenet_bs2048.py | 12 + .../_base_/schedules/imagenet_bs2048_AdamW.py | 20 + .../_base_/schedules/imagenet_bs2048_coslr.py | 12 + .../_base_/schedules/imagenet_bs256.py | 6 + .../_base_/schedules/imagenet_bs256_140e.py | 6 + .../imagenet_bs256_200e_coslr_warmup.py | 11 + .../_base_/schedules/imagenet_bs256_coslr.py | 6 + .../schedules/imagenet_bs256_epochstep.py | 6 + .../_base_/schedules/imagenet_bs4096_AdamW.py | 18 + .../configs/fp16/README.md | 20 + .../configs/fp16/metafile.yml | 35 + .../resnet50_b32x8_fp16_dynamic_imagenet.py | 4 + .../fp16/resnet50_b32x8_fp16_imagenet.py | 4 + .../configs/lenet/README.md | 19 + .../configs/lenet/lenet5_mnist.py | 59 + .../configs/mobilenet_v2/README.md | 27 + .../configs/mobilenet_v2/metafile.yml | 34 + .../mobilenet_v2_b32x8_imagenet.py | 6 + .../configs/mobilenet_v3/README.md | 31 + .../configs/mobilenet_v3/metafile.yml | 42 + .../mobilenet_v3_large_imagenet.py | 158 +++ .../mobilenet_v3/mobilenet_v3_small_cifar.py | 8 + .../mobilenet_v3_small_imagenet.py | 158 +++ .../configs/regnet/README.md | 38 + .../regnet/regnetx_1.6gf_b32x8_imagenet.py | 51 + .../regnet/regnetx_12gf_b32x8_imagenet.py | 51 + .../regnet/regnetx_3.2gf_b32x8_imagenet.py | 51 + .../regnet/regnetx_4.0gf_b32x8_imagenet.py | 51 + .../regnet/regnetx_400mf_b32x8_imagenet.py | 51 + .../regnet/regnetx_6.4gf_b32x8_imagenet.py | 51 + .../regnet/regnetx_8.0gf_b32x8_imagenet.py | 51 + .../regnet/regnetx_800mf_b32x8_imagenet.py | 51 + .../configs/repvgg/README.md | 51 + .../repvgg-A0_deploy_4xb64-coslr-120e_in1k.py | 3 + .../repvgg-A1_deploy_4xb64-coslr-120e_in1k.py | 3 + .../repvgg-A2_deploy_4xb64-coslr-120e_in1k.py | 3 + .../repvgg-B0_deploy_4xb64-coslr-120e_in1k.py | 3 + .../repvgg-B1_deploy_4xb64-coslr-120e_in1k.py | 3 + ...epvgg-B1g2_deploy_4xb64-coslr-120e_in1k.py | 3 + ...epvgg-B1g4_deploy_4xb64-coslr-120e_in1k.py | 3 + .../repvgg-B2_deploy_4xb64-coslr-120e_in1k.py | 3 + ...4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py | 3 + ...4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py | 3 + ...4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py | 3 + ...4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py | 3 + .../configs/repvgg/metafile.yml | 208 +++ .../repvgg/repvgg-A0_4xb64-coslr-120e_in1k.py | 8 + .../repvgg/repvgg-A1_4xb64-coslr-120e_in1k.py | 3 + .../repvgg/repvgg-A2_4xb64-coslr-120e_in1k.py | 3 + .../repvgg/repvgg-B0_4xb64-coslr-120e_in1k.py | 3 + .../repvgg/repvgg-B1_4xb64-coslr-120e_in1k.py | 3 + .../repvgg-B1g2_4xb64-coslr-120e_in1k.py | 3 + .../repvgg-B1g4_4xb64-coslr-120e_in1k.py | 3 + .../repvgg/repvgg-B2_4xb64-coslr-120e_in1k.py | 3 + ...4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py | 3 + ...4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py | 6 + ...4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py | 3 + ...4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py | 3 + .../configs/res2net/README.md | 30 + .../configs/res2net/metafile.yml | 67 + .../res2net/res2net101-w26-s4_8xb32_in1k.py | 5 + .../res2net/res2net50-w14-s8_8xb32_in1k.py | 5 + .../res2net/res2net50-w26-s8_8xb32_in1k.py | 5 + .../configs/resnest/README.md | 17 + .../resnest/resnest101_b64x32_imagenet.py | 181 +++ .../resnest/resnest200_b32x64_imagenet.py | 181 +++ .../resnest/resnest269_b32x64_imagenet.py | 181 +++ .../resnest/resnest50_b64x32_imagenet.py | 181 +++ .../configs/resnet/README.md | 47 + .../configs/resnet/metafile.yml | 217 +++ .../configs/resnet/resnet101_b16x8_cifar10.py | 5 + .../resnet/resnet101_b32x8_imagenet.py | 4 + .../configs/resnet/resnet152_b16x8_cifar10.py | 5 + .../resnet/resnet152_b32x8_imagenet.py | 4 + .../configs/resnet/resnet18_b16x8_cifar10.py | 4 + .../configs/resnet/resnet18_b32x8_imagenet.py | 4 + .../configs/resnet/resnet34_b16x8_cifar10.py | 4 + .../configs/resnet/resnet34_b32x8_imagenet.py | 4 + .../resnet/resnet50_8xb128_coslr-90e_in21k.py | 11 + .../configs/resnet/resnet50_b16x8_cifar10.py | 4 + .../configs/resnet/resnet50_b16x8_cifar100.py | 10 + .../resnet/resnet50_b16x8_cifar10_mixup.py | 5 + .../resnet/resnet50_b32x8_coslr_imagenet.py | 5 + .../resnet/resnet50_b32x8_cutmix_imagenet.py | 5 + .../configs/resnet/resnet50_b32x8_imagenet.py | 4 + .../resnet50_b32x8_label_smooth_imagenet.py | 5 + .../resnet/resnet50_b32x8_mixup_imagenet.py | 5 + .../resnet50_b64x32_warmup_coslr_imagenet.py | 5 + .../resnet/resnet50_b64x32_warmup_imagenet.py | 4 + ...t50_b64x32_warmup_label_smooth_imagenet.py | 12 + .../resnet/resnetv1d101_b32x8_imagenet.py | 5 + .../resnet/resnetv1d152_b32x8_imagenet.py | 5 + .../resnet/resnetv1d50_b32x8_imagenet.py | 5 + .../configs/resnext/README.md | 27 + .../configs/resnext/metafile.yml | 73 + .../resnext101_32x4d_b32x8_imagenet.py | 5 + .../resnext101_32x8d_b32x8_imagenet.py | 5 + .../resnext152_32x4d_b32x8_imagenet.py | 5 + .../resnext/resnext50_32x4d_b32x8_imagenet.py | 5 + .../configs/seresnet/README.md | 25 + .../configs/seresnet/metafile.yml | 47 + .../seresnet/seresnet101_b32x8_imagenet.py | 5 + .../seresnet/seresnet50_b32x8_imagenet.py | 6 + .../configs/seresnext/README.md | 16 + .../seresnext101_32x4d_b32x8_imagenet.py | 5 + .../seresnext50_32x4d_b32x8_imagenet.py | 5 + .../configs/shufflenet_v1/README.md | 24 + .../configs/shufflenet_v1/metafile.yml | 35 + ..._v1_1x_b64x16_linearlr_bn_nowd_imagenet.py | 6 + .../configs/shufflenet_v2/README.md | 24 + .../configs/shufflenet_v2/metafile.yml | 35 + ..._v2_1x_b64x16_linearlr_bn_nowd_imagenet.py | 6 + .../configs/swin_transformer/README.md | 42 + .../configs/swin_transformer/metafile.yml | 188 +++ .../swin_base_224_b16x64_300e_imagenet.py | 6 + .../swin_base_384_evalonly_imagenet.py | 7 + .../swin_large_224_evalonly_imagenet.py | 7 + .../swin_large_384_evalonly_imagenet.py | 7 + .../swin_small_224_b16x64_300e_imagenet.py | 6 + .../swin_tiny_224_b16x64_300e_imagenet.py | 6 + .../configs/t2t_vit/README.md | 33 + .../configs/t2t_vit/metafile.yml | 64 + .../t2t_vit/t2t-vit-t-14_8xb64_in1k.py | 31 + .../t2t_vit/t2t-vit-t-19_8xb64_in1k.py | 31 + .../t2t_vit/t2t-vit-t-24_8xb64_in1k.py | 31 + .../configs/tnt/README.md | 32 + .../configs/tnt/metafile.yml | 29 + .../tnt_s_patch16_224_evalonly_imagenet.py | 39 + .../configs/vgg/README.md | 31 + .../configs/vgg/metafile.yml | 125 ++ .../configs/vgg/vgg11_b32x8_imagenet.py | 7 + .../configs/vgg/vgg11bn_b32x8_imagenet.py | 5 + .../configs/vgg/vgg13_b32x8_imagenet.py | 6 + .../configs/vgg/vgg13bn_b32x8_imagenet.py | 5 + .../configs/vgg/vgg16_b16x8_voc.py | 25 + .../configs/vgg/vgg16_b32x8_imagenet.py | 6 + .../configs/vgg/vgg16bn_b32x8_imagenet.py | 5 + .../configs/vgg/vgg19_b32x8_imagenet.py | 6 + .../configs/vgg/vgg19bn_b32x8_imagenet.py | 5 + .../configs/vision_transformer/README.md | 51 + .../configs/vision_transformer/metafile.yml | 76 + .../vit-base-p16_ft-64xb64_in1k-384.py | 36 + .../vit-base-p16_pt-64xb64_in1k-224.py | 12 + .../vit-base-p32_ft-64xb64_in1k-384.py | 36 + .../vit-base-p32_pt-64xb64_in1k-224.py | 12 + .../vit-large-p16_ft-64xb64_in1k-384.py | 36 + .../vit-large-p16_pt-64xb64_in1k-224.py | 12 + .../vit-large-p32_ft-64xb64_in1k-384.py | 37 + .../vit-large-p32_pt-64xb64_in1k-224.py | 12 + .../Resnet50_for_PyTorch/demo/demo.JPEG | Bin 0 -> 109527 bytes .../Resnet50_for_PyTorch/demo/image_demo.py | 25 + .../Resnet50_for_PyTorch/docker/Dockerfile | 22 + .../docker/serve/Dockerfile | 49 + .../docker/serve/config.properties | 5 + .../docker/serve/entrypoint.sh | 12 + .../Resnet50_for_PyTorch/docs/Makefile | 20 + .../docs/_static/css/readthedocs.css | 16 + .../docs/_static/image/concat.JPEG | Bin 0 -> 45505 bytes .../docs/_static/image/original.JPEG | Bin 0 -> 9414 bytes .../docs/_static/image/pipeline.JPEG | Bin 0 -> 19054 bytes .../docs/_static/js/custom.js | 1 + .../Resnet50_for_PyTorch/docs/changelog.md | 403 ++++++ .../docs/community/CONTRIBUTING.md | 71 + .../Resnet50_for_PyTorch/docs/conf.py | 297 ++++ .../docs/getting_started.md | 232 +++ .../Resnet50_for_PyTorch/docs/install.md | 142 ++ .../Resnet50_for_PyTorch/docs/model_zoo.md | 75 + .../Resnet50_for_PyTorch/docs/stat.py | 107 ++ .../docs/tools/model_serving.md | 87 ++ .../docs/tools/onnx2tensorrt.md | 80 ++ .../docs/tools/pytorch2onnx.md | 204 +++ .../docs/tools/pytorch2torchscript.md | 56 + .../docs/tools/visualization.md | 81 ++ .../docs/tutorials/config.md | 403 ++++++ .../docs/tutorials/data_pipeline.md | 148 ++ .../docs/tutorials/finetune.md | 237 ++++ .../docs/tutorials/new_dataset.md | 141 ++ .../docs/tutorials/new_modules.md | 272 ++++ .../Resnet50_for_PyTorch/docs_zh-CN/Makefile | 20 + .../docs_zh-CN/_static/css/readthedocs.css | 16 + .../docs_zh-CN/_static/image/concat.JPEG | Bin 0 -> 45505 bytes .../docs_zh-CN/_static/image/original.JPEG | Bin 0 -> 9414 bytes .../docs_zh-CN/_static/image/pipeline.JPEG | Bin 0 -> 19054 bytes .../docs_zh-CN/community/CONTRIBUTING.md | 73 + .../Resnet50_for_PyTorch/docs_zh-CN/conf.py | 284 ++++ .../docs_zh-CN/getting_started.md | 228 +++ .../docs_zh-CN/install.md | 134 ++ .../docs_zh-CN/model_zoo.md | 1 + .../Resnet50_for_PyTorch/docs_zh-CN/stat.py | 107 ++ .../docs_zh-CN/tools/model_serving.md | 87 ++ .../docs_zh-CN/tools/onnx2tensorrt.md | 76 + .../docs_zh-CN/tools/pytorch2onnx.md | 89 ++ .../docs_zh-CN/tools/pytorch2torchscript.md | 55 + .../docs_zh-CN/tools/visualization.md | 82 ++ .../docs_zh-CN/tutorials/config.md | 405 ++++++ .../docs_zh-CN/tutorials/data_pipeline.md | 148 ++ .../docs_zh-CN/tutorials/finetune.md | 222 +++ .../docs_zh-CN/tutorials/new_dataset.md | 140 ++ .../docs_zh-CN/tutorials/new_modules.md | 281 ++++ .../Resnet50_for_PyTorch/mmcls/__init__.py | 60 + .../mmcls/apis/__init__.py | 9 + .../mmcls/apis/inference.py | 119 ++ .../Resnet50_for_PyTorch/mmcls/apis/test.py | 198 +++ .../Resnet50_for_PyTorch/mmcls/apis/train.py | 177 +++ .../mmcls/core/__init__.py | 4 + .../mmcls/core/evaluation/__init__.py | 12 + .../mmcls/core/evaluation/eval_hooks.py | 107 ++ .../mmcls/core/evaluation/eval_metrics.py | 248 ++++ .../mmcls/core/evaluation/mean_ap.py | 74 + .../evaluation/multilabel_eval_metrics.py | 72 + .../mmcls/core/export/__init__.py | 4 + .../mmcls/core/export/test.py | 96 ++ .../mmcls/core/fp16/__init__.py | 5 + .../mmcls/core/fp16/decorators.py | 161 +++ .../mmcls/core/fp16/hooks.py | 129 ++ .../mmcls/core/fp16/utils.py | 24 + .../mmcls/core/utils/__init__.py | 5 + .../mmcls/core/utils/dist_utils.py | 57 + .../mmcls/core/utils/misc.py | 8 + .../mmcls/core/visualization/__init__.py | 7 + .../mmcls/core/visualization/image.py | 326 +++++ .../mmcls/datasets/__init__.py | 19 + .../mmcls/datasets/base_dataset.py | 206 +++ .../mmcls/datasets/builder.py | 122 ++ .../mmcls/datasets/cifar.py | 133 ++ .../mmcls/datasets/dataset_wrappers.py | 172 +++ .../mmcls/datasets/imagenet.py | 1103 +++++++++++++++ .../mmcls/datasets/imagenet21k.py | 141 ++ .../mmcls/datasets/mnist.py | 185 +++ .../mmcls/datasets/multi_label.py | 83 ++ .../mmcls/datasets/pipelines/__init__.py | 22 + .../mmcls/datasets/pipelines/auto_augment.py | 921 ++++++++++++ .../mmcls/datasets/pipelines/compose.py | 43 + .../mmcls/datasets/pipelines/formating.py | 9 + .../mmcls/datasets/pipelines/formatting.py | 180 +++ .../mmcls/datasets/pipelines/loading.py | 70 + .../mmcls/datasets/pipelines/transforms.py | 1065 ++++++++++++++ .../mmcls/datasets/samplers/__init__.py | 4 + .../datasets/samplers/distributed_sampler.py | 43 + .../mmcls/datasets/utils.py | 153 ++ .../mmcls/datasets/voc.py | 69 + .../mmcls/models/__init__.py | 14 + .../mmcls/models/backbones/__init__.py | 29 + .../mmcls/models/backbones/alexnet.py | 56 + .../mmcls/models/backbones/base_backbone.py | 33 + .../mmcls/models/backbones/lenet.py | 42 + .../mmcls/models/backbones/mobilenet_v2.py | 264 ++++ .../mmcls/models/backbones/mobilenet_v3.py | 195 +++ .../mmcls/models/backbones/regnet.py | 312 +++++ .../mmcls/models/backbones/repvgg.py | 537 +++++++ .../mmcls/models/backbones/res2net.py | 306 ++++ .../mmcls/models/backbones/resnest.py | 339 +++++ .../mmcls/models/backbones/resnet.py | 651 +++++++++ .../mmcls/models/backbones/resnet_cifar.py | 81 ++ .../mmcls/models/backbones/resnext.py | 148 ++ .../mmcls/models/backbones/seresnet.py | 125 ++ .../mmcls/models/backbones/seresnext.py | 155 ++ .../mmcls/models/backbones/shufflenet_v1.py | 321 +++++ .../mmcls/models/backbones/shufflenet_v2.py | 297 ++++ .../models/backbones/swin_transformer.py | 401 ++++++ .../mmcls/models/backbones/t2t_vit.py | 367 +++++ .../mmcls/models/backbones/timm_backbone.py | 57 + .../mmcls/models/backbones/tnt.py | 367 +++++ .../mmcls/models/backbones/vgg.py | 183 +++ .../models/backbones/vision_transformer.py | 368 +++++ .../mmcls/models/builder.py | 38 + .../mmcls/models/classifiers/__init__.py | 5 + .../mmcls/models/classifiers/base.py | 215 +++ .../mmcls/models/classifiers/image.py | 141 ++ .../mmcls/models/heads/__init__.py | 12 + .../mmcls/models/heads/base_head.py | 15 + .../mmcls/models/heads/cls_head.py | 78 ++ .../mmcls/models/heads/linear_head.py | 54 + .../mmcls/models/heads/multi_label_head.py | 64 + .../models/heads/multi_label_linear_head.py | 59 + .../mmcls/models/heads/stacked_head.py | 137 ++ .../models/heads/vision_transformer_head.py | 87 ++ .../mmcls/models/losses/__init__.py | 17 + .../mmcls/models/losses/accuracy.py | 130 ++ .../mmcls/models/losses/asymmetric_loss.py | 112 ++ .../mmcls/models/losses/cross_entropy_loss.py | 189 +++ .../mmcls/models/losses/focal_loss.py | 114 ++ .../mmcls/models/losses/label_smooth_loss.py | 167 +++ .../mmcls/models/losses/seesaw_loss.py | 173 +++ .../mmcls/models/losses/utils.py | 121 ++ .../mmcls/models/necks/__init__.py | 4 + .../mmcls/models/necks/gap.py | 45 + .../mmcls/models/utils/__init__.py | 16 + .../mmcls/models/utils/attention.py | 370 +++++ .../mmcls/models/utils/augment/__init__.py | 7 + .../mmcls/models/utils/augment/augments.py | 73 + .../mmcls/models/utils/augment/builder.py | 8 + .../mmcls/models/utils/augment/cutmix.py | 140 ++ .../mmcls/models/utils/augment/identity.py | 30 + .../mmcls/models/utils/augment/mixup.py | 57 + .../mmcls/models/utils/channel_shuffle.py | 29 + .../mmcls/models/utils/embed.py | 253 ++++ .../mmcls/models/utils/helpers.py | 42 + .../mmcls/models/utils/inverted_residual.py | 114 ++ .../mmcls/models/utils/make_divisible.py | 25 + .../mmcls/models/utils/se_layer.py | 74 + .../mmcls/utils/__init__.py | 5 + .../mmcls/utils/collect_env.py | 17 + .../mmcls/utils/logger.py | 8 + .../Resnet50_for_PyTorch/mmcls/version.py | 28 + .../Resnet50_for_PyTorch/model-index.yml | 15 + .../Resnet50_for_PyTorch/requirements.txt | 3 + .../requirements/docs.txt | 7 + .../requirements/mminstall.txt | 1 + .../requirements/optional.txt | 2 + .../requirements/readthedocs.txt | 3 + .../requirements/runtime.txt | 3 + .../requirements/tests.txt | 8 + .../Resnet50_for_PyTorch/setup.cfg | 24 + .../Resnet50_for_PyTorch/setup.py | 174 +++ .../Resnet50_for_PyTorch/test/env_npu.sh | 55 + .../Resnet50_for_PyTorch/test/set_conda.sh | 2 + .../test/train_ID3915_performance_8p.sh | 141 ++ .../test/train_ID3918_performance_8p.sh | 142 ++ .../tests/data/dataset/ann.txt | 3 + .../tests/data/dataset/b/2.jpeg | 0 .../tests/data/retinanet.py | 82 ++ .../test_data/test_datasets/test_common.py | 295 ++++ .../test_datasets/test_dataset_utils.py | 22 + .../test_datasets/test_dataset_wrapper.py | 84 ++ .../test_pipelines/test_auto_augment.py | 1241 +++++++++++++++++ .../test_data/test_pipelines/test_loading.py | 59 + .../test_pipelines/test_transform.py | 1188 ++++++++++++++++ .../test_downstream/test_mmdet_inference.py | 96 ++ .../tests/test_metrics/test_losses.py | 303 ++++ .../tests/test_metrics/test_metrics.py | 57 + .../test_backbones/test_mobilenet_v2.py | 259 ++++ .../test_backbones/test_mobilenet_v3.py | 175 +++ .../test_models/test_backbones/test_regnet.py | 94 ++ .../test_models/test_backbones/test_repvgg.py | 293 ++++ .../test_backbones/test_res2net.py | 71 + .../test_backbones/test_resnest.py | 44 + .../test_models/test_backbones/test_resnet.py | 566 ++++++++ .../test_backbones/test_resnet_cifar.py | 67 + .../test_backbones/test_resnext.py | 61 + .../test_backbones/test_seresnet.py | 247 ++++ .../test_backbones/test_seresnext.py | 74 + .../test_backbones/test_shufflenet_v1.py | 246 ++++ .../test_backbones/test_shufflenet_v2.py | 205 +++ .../test_backbones/test_swin_transformer.py | 168 +++ .../test_backbones/test_t2t_vit.py | 84 ++ .../test_backbones/test_timm_backbone.py | 43 + .../test_models/test_backbones/test_tnt.py | 50 + .../test_models/test_backbones/test_vgg.py | 139 ++ .../test_backbones/test_vision_transformer.py | 162 +++ .../tests/test_models/test_classifiers.py | 296 ++++ .../tests/test_models/test_heads.py | 152 ++ .../tests/test_models/test_neck.py | 39 + .../test_models/test_utils/test_attention.py | 178 +++ .../test_models/test_utils/test_augment.py | 52 + .../test_models/test_utils/test_embed.py | 83 ++ .../test_utils/test_inverted_residual.py | 82 ++ .../tests/test_models/test_utils/test_misc.py | 60 + .../tests/test_models/test_utils/test_se.py | 94 ++ .../tests/test_runtime/test_eval_hook.py | 219 +++ .../tests/test_utils/test_version_utils.py | 21 + .../tests/test_utils/test_visualization.py | 106 ++ .../tools/analysis_tools/analyze_logs.py | 183 +++ .../tools/analysis_tools/analyze_results.py | 126 ++ .../tools/analysis_tools/eval_metric.py | 75 + .../tools/analysis_tools/get_flops.py | 55 + .../convert_models/mobilenetv2_to_mmcls.py | 135 ++ .../tools/convert_models/publish_model.py | 55 + .../convert_models/reparameterize_repvgg.py | 46 + .../tools/convert_models/repvgg_to_mmcls.py | 59 + .../convert_models/shufflenetv2_to_mmcls.py | 113 ++ .../tools/convert_models/vgg_to_mmcls.py | 117 ++ .../tools/deployment/mmcls2torchserve.py | 111 ++ .../tools/deployment/mmcls_handler.py | 51 + .../tools/deployment/onnx2tensorrt.py | 142 ++ .../tools/deployment/pytorch2onnx.py | 233 ++++ .../tools/deployment/pytorch2torchscript.py | 139 ++ .../tools/deployment/test.py | 116 ++ .../tools/deployment/test_torchserver.py | 44 + .../Resnet50_for_PyTorch/tools/dist_test.sh | 10 + .../Resnet50_for_PyTorch/tools/dist_train.sh | 9 + .../tools/misc/print_config.py | 55 + .../tools/misc/verify_dataset.py | 131 ++ .../Resnet50_for_PyTorch/tools/slurm_test.sh | 24 + .../Resnet50_for_PyTorch/tools/slurm_train.sh | 24 + .../Resnet50_for_PyTorch/tools/test.py | 219 +++ .../Resnet50_for_PyTorch/tools/train.py | 185 +++ .../tools/visualizations/vis_pipeline.py | 257 ++++ 490 files changed, 39275 insertions(+) create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/.gitignore create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/.pre-commit-config.yaml create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/.readthedocs.yml create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/CITATION.cff create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/LICENSE create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/MANIFEST.in create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/README.md create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/README_zh-CN.md create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/cifar100_bs16.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/cifar10_bs16.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet21k_bs128.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet_bs32.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet_bs32_pil_resize.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet_bs64.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet_bs64_autoaug.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet_bs64_pil_resize.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet_bs64_pil_resize_autoaug.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet_bs64_swin_224.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet_bs64_swin_384.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet_bs64_t2t_224.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/pipelines/auto_aug.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/pipelines/rand_aug.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/voc_bs16.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/default_runtime.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/mobilenet_v2_1x.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/mobilenet_v3_large_imagenet.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/mobilenet_v3_small_cifar.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/mobilenet_v3_small_imagenet.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/regnet/regnetx_1.6gf.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/regnet/regnetx_12gf.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/regnet/regnetx_3.2gf.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/regnet/regnetx_4.0gf.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/regnet/regnetx_400mf.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/regnet/regnetx_6.4gf.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/regnet/regnetx_8.0gf.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/regnet/regnetx_800mf.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/repvgg-A0_in1k.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/repvgg-B3_lbs-mixup_in1k.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/res2net101-w26-s4.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/res2net50-w14-s8.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/res2net50-w26-s4.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/res2net50-w26-s6.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/res2net50-w26-s8.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/res2net50-w48-s2.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnest101.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnest200.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnest269.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnest50.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet101.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet101_cifar.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet152.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet152_cifar.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet18.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet18_cifar.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet34.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet34_cifar.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet50.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet50_cifar.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet50_cifar_cutmix.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet50_cifar_mixup.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet50_cutmix.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet50_label_smooth.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet50_mixup.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnetv1d101.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnetv1d152.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnetv1d50.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnext101_32x4d.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnext101_32x8d.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnext152_32x4d.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnext50_32x4d.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/seresnet101.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/seresnet50.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/seresnext101_32x4d.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/seresnext50_32x4d.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/shufflenet_v1_1x.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/shufflenet_v2_1x.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/swin_transformer/base_224.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/swin_transformer/base_384.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/swin_transformer/large_224.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/swin_transformer/large_384.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/swin_transformer/small_224.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/swin_transformer/tiny_224.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/t2t-vit-t-14.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/t2t-vit-t-19.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/t2t-vit-t-24.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/tnt_s_patch16_224.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vgg11.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vgg11bn.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vgg13.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vgg13bn.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vgg16.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vgg16bn.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vgg19.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vgg19bn.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vit-base-p16.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vit-base-p32.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vit-large-p16.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vit-large-p32.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/cifar10_bs128.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs1024_adamw_swin.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs1024_coslr.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs1024_linearlr_bn_nowd.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs2048.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs2048_AdamW.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs2048_coslr.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs256.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs256_140e.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs256_200e_coslr_warmup.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs256_coslr.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs256_epochstep.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs4096_AdamW.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/fp16/README.md create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/fp16/metafile.yml create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/fp16/resnet50_b32x8_fp16_dynamic_imagenet.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/fp16/resnet50_b32x8_fp16_imagenet.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/lenet/README.md create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/lenet/lenet5_mnist.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/mobilenet_v2/README.md create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/mobilenet_v2/metafile.yml create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/mobilenet_v2/mobilenet_v2_b32x8_imagenet.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/mobilenet_v3/README.md create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/mobilenet_v3/metafile.yml create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/mobilenet_v3/mobilenet_v3_large_imagenet.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/mobilenet_v3/mobilenet_v3_small_cifar.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/mobilenet_v3/mobilenet_v3_small_imagenet.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/regnet/README.md create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/regnet/regnetx_1.6gf_b32x8_imagenet.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/regnet/regnetx_12gf_b32x8_imagenet.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/regnet/regnetx_3.2gf_b32x8_imagenet.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/regnet/regnetx_4.0gf_b32x8_imagenet.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/regnet/regnetx_400mf_b32x8_imagenet.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/regnet/regnetx_6.4gf_b32x8_imagenet.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/regnet/regnetx_8.0gf_b32x8_imagenet.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/regnet/regnetx_800mf_b32x8_imagenet.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/README.md create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-A0_deploy_4xb64-coslr-120e_in1k.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-A1_deploy_4xb64-coslr-120e_in1k.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-A2_deploy_4xb64-coslr-120e_in1k.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-B0_deploy_4xb64-coslr-120e_in1k.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-B1_deploy_4xb64-coslr-120e_in1k.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-B1g2_deploy_4xb64-coslr-120e_in1k.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-B1g4_deploy_4xb64-coslr-120e_in1k.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-B2_deploy_4xb64-coslr-120e_in1k.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-B2g4_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-B3_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-B3g4_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-D2se_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/metafile.yml create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-A0_4xb64-coslr-120e_in1k.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-A1_4xb64-coslr-120e_in1k.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-A2_4xb64-coslr-120e_in1k.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-B0_4xb64-coslr-120e_in1k.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-B1_4xb64-coslr-120e_in1k.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-B1g2_4xb64-coslr-120e_in1k.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-B1g4_4xb64-coslr-120e_in1k.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-B2_4xb64-coslr-120e_in1k.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-B2g4_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-B3_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-B3g4_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-D2se_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/res2net/README.md create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/res2net/metafile.yml create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/res2net/res2net101-w26-s4_8xb32_in1k.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/res2net/res2net50-w14-s8_8xb32_in1k.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/res2net/res2net50-w26-s8_8xb32_in1k.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnest/README.md create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnest/resnest101_b64x32_imagenet.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnest/resnest200_b32x64_imagenet.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnest/resnest269_b32x64_imagenet.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnest/resnest50_b64x32_imagenet.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/README.md create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/metafile.yml create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet101_b16x8_cifar10.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet101_b32x8_imagenet.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet152_b16x8_cifar10.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet152_b32x8_imagenet.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet18_b16x8_cifar10.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet18_b32x8_imagenet.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet34_b16x8_cifar10.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet34_b32x8_imagenet.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_8xb128_coslr-90e_in21k.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b16x8_cifar10.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b16x8_cifar100.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b16x8_cifar10_mixup.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b32x8_coslr_imagenet.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b32x8_cutmix_imagenet.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b32x8_imagenet.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b32x8_label_smooth_imagenet.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b32x8_mixup_imagenet.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b64x32_warmup_coslr_imagenet.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b64x32_warmup_imagenet.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b64x32_warmup_label_smooth_imagenet.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnetv1d101_b32x8_imagenet.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnetv1d152_b32x8_imagenet.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnetv1d50_b32x8_imagenet.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnext/README.md create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnext/metafile.yml create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnext/resnext101_32x4d_b32x8_imagenet.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnext/resnext101_32x8d_b32x8_imagenet.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnext/resnext152_32x4d_b32x8_imagenet.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnext/resnext50_32x4d_b32x8_imagenet.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/seresnet/README.md create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/seresnet/metafile.yml create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/seresnet/seresnet101_b32x8_imagenet.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/seresnet/seresnet50_b32x8_imagenet.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/seresnext/README.md create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/seresnext/seresnext101_32x4d_b32x8_imagenet.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/seresnext/seresnext50_32x4d_b32x8_imagenet.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/shufflenet_v1/README.md create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/shufflenet_v1/metafile.yml create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/shufflenet_v1/shufflenet_v1_1x_b64x16_linearlr_bn_nowd_imagenet.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/shufflenet_v2/README.md create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/shufflenet_v2/metafile.yml create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/shufflenet_v2/shufflenet_v2_1x_b64x16_linearlr_bn_nowd_imagenet.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/swin_transformer/README.md create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/swin_transformer/metafile.yml create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/swin_transformer/swin_base_224_b16x64_300e_imagenet.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/swin_transformer/swin_base_384_evalonly_imagenet.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/swin_transformer/swin_large_224_evalonly_imagenet.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/swin_transformer/swin_large_384_evalonly_imagenet.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/swin_transformer/swin_small_224_b16x64_300e_imagenet.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/swin_transformer/swin_tiny_224_b16x64_300e_imagenet.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/t2t_vit/README.md create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/t2t_vit/metafile.yml create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/t2t_vit/t2t-vit-t-14_8xb64_in1k.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/t2t_vit/t2t-vit-t-19_8xb64_in1k.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/t2t_vit/t2t-vit-t-24_8xb64_in1k.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/tnt/README.md create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/tnt/metafile.yml create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/tnt/tnt_s_patch16_224_evalonly_imagenet.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/README.md create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/metafile.yml create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/vgg11_b32x8_imagenet.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/vgg11bn_b32x8_imagenet.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/vgg13_b32x8_imagenet.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/vgg13bn_b32x8_imagenet.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/vgg16_b16x8_voc.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/vgg16_b32x8_imagenet.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/vgg16bn_b32x8_imagenet.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/vgg19_b32x8_imagenet.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/vgg19bn_b32x8_imagenet.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/README.md create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/metafile.yml create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/vit-base-p16_ft-64xb64_in1k-384.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/vit-base-p16_pt-64xb64_in1k-224.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/vit-base-p32_ft-64xb64_in1k-384.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/vit-base-p32_pt-64xb64_in1k-224.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/vit-large-p16_ft-64xb64_in1k-384.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/vit-large-p16_pt-64xb64_in1k-224.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/vit-large-p32_ft-64xb64_in1k-384.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/vit-large-p32_pt-64xb64_in1k-224.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/demo/demo.JPEG create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/demo/image_demo.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docker/Dockerfile create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docker/serve/Dockerfile create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docker/serve/config.properties create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docker/serve/entrypoint.sh create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/Makefile create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/_static/css/readthedocs.css create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/_static/image/concat.JPEG create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/_static/image/original.JPEG create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/_static/image/pipeline.JPEG create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/_static/js/custom.js create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/changelog.md create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/community/CONTRIBUTING.md create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/conf.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/getting_started.md create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/install.md create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/model_zoo.md create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/stat.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/tools/model_serving.md create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/tools/onnx2tensorrt.md create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/tools/pytorch2onnx.md create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/tools/pytorch2torchscript.md create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/tools/visualization.md create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/tutorials/config.md create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/tutorials/data_pipeline.md create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/tutorials/finetune.md create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/tutorials/new_dataset.md create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/tutorials/new_modules.md create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/Makefile create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/_static/css/readthedocs.css create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/_static/image/concat.JPEG create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/_static/image/original.JPEG create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/_static/image/pipeline.JPEG create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/community/CONTRIBUTING.md create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/conf.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/getting_started.md create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/install.md create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/model_zoo.md create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/stat.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/tools/model_serving.md create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/tools/onnx2tensorrt.md create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/tools/pytorch2onnx.md create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/tools/pytorch2torchscript.md create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/tools/visualization.md create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/tutorials/config.md create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/tutorials/data_pipeline.md create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/tutorials/finetune.md create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/tutorials/new_dataset.md create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/tutorials/new_modules.md create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/__init__.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/apis/__init__.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/apis/inference.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/apis/test.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/apis/train.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/__init__.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/evaluation/__init__.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/evaluation/eval_hooks.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/evaluation/eval_metrics.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/evaluation/mean_ap.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/evaluation/multilabel_eval_metrics.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/export/__init__.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/export/test.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/fp16/__init__.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/fp16/decorators.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/fp16/hooks.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/fp16/utils.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/utils/__init__.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/utils/dist_utils.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/utils/misc.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/visualization/__init__.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/visualization/image.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/__init__.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/base_dataset.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/builder.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/cifar.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/dataset_wrappers.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/imagenet.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/imagenet21k.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/mnist.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/multi_label.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/pipelines/__init__.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/pipelines/auto_augment.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/pipelines/compose.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/pipelines/formating.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/pipelines/formatting.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/pipelines/loading.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/pipelines/transforms.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/samplers/__init__.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/samplers/distributed_sampler.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/utils.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/voc.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/__init__.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/__init__.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/alexnet.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/base_backbone.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/lenet.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/mobilenet_v2.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/mobilenet_v3.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/regnet.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/repvgg.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/res2net.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/resnest.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/resnet.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/resnet_cifar.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/resnext.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/seresnet.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/seresnext.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/shufflenet_v1.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/shufflenet_v2.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/swin_transformer.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/t2t_vit.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/timm_backbone.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/tnt.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/vgg.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/vision_transformer.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/builder.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/classifiers/__init__.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/classifiers/base.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/classifiers/image.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/heads/__init__.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/heads/base_head.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/heads/cls_head.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/heads/linear_head.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/heads/multi_label_head.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/heads/multi_label_linear_head.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/heads/stacked_head.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/heads/vision_transformer_head.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/losses/__init__.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/losses/accuracy.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/losses/asymmetric_loss.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/losses/cross_entropy_loss.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/losses/focal_loss.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/losses/label_smooth_loss.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/losses/seesaw_loss.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/losses/utils.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/necks/__init__.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/necks/gap.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/utils/__init__.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/utils/attention.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/utils/augment/__init__.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/utils/augment/augments.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/utils/augment/builder.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/utils/augment/cutmix.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/utils/augment/identity.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/utils/augment/mixup.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/utils/channel_shuffle.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/utils/embed.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/utils/helpers.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/utils/inverted_residual.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/utils/make_divisible.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/utils/se_layer.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/utils/__init__.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/utils/collect_env.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/utils/logger.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/version.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/model-index.yml create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/requirements.txt create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/requirements/docs.txt create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/requirements/mminstall.txt create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/requirements/optional.txt create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/requirements/readthedocs.txt create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/requirements/runtime.txt create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/requirements/tests.txt create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/setup.cfg create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/setup.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/test/env_npu.sh create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/test/set_conda.sh create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/test/train_ID3915_performance_8p.sh create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/test/train_ID3918_performance_8p.sh create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/data/dataset/ann.txt create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/data/dataset/b/2.jpeg create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/data/retinanet.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_data/test_datasets/test_common.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_data/test_datasets/test_dataset_utils.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_data/test_datasets/test_dataset_wrapper.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_data/test_pipelines/test_auto_augment.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_data/test_pipelines/test_loading.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_data/test_pipelines/test_transform.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_downstream/test_mmdet_inference.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_metrics/test_losses.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_metrics/test_metrics.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_mobilenet_v2.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_mobilenet_v3.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_regnet.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_repvgg.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_res2net.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_resnest.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_resnet.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_resnet_cifar.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_resnext.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_seresnet.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_seresnext.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_shufflenet_v1.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_shufflenet_v2.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_swin_transformer.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_t2t_vit.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_timm_backbone.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_tnt.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_vgg.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_vision_transformer.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_classifiers.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_heads.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_neck.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_utils/test_attention.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_utils/test_augment.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_utils/test_embed.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_utils/test_inverted_residual.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_utils/test_misc.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_utils/test_se.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_runtime/test_eval_hook.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_utils/test_version_utils.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_utils/test_visualization.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/analysis_tools/analyze_logs.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/analysis_tools/analyze_results.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/analysis_tools/eval_metric.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/analysis_tools/get_flops.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/convert_models/mobilenetv2_to_mmcls.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/convert_models/publish_model.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/convert_models/reparameterize_repvgg.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/convert_models/repvgg_to_mmcls.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/convert_models/shufflenetv2_to_mmcls.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/convert_models/vgg_to_mmcls.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/deployment/mmcls2torchserve.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/deployment/mmcls_handler.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/deployment/onnx2tensorrt.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/deployment/pytorch2onnx.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/deployment/pytorch2torchscript.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/deployment/test.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/deployment/test_torchserver.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/dist_test.sh create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/dist_train.sh create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/misc/print_config.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/misc/verify_dataset.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/slurm_test.sh create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/slurm_train.sh create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/test.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/train.py create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/visualizations/vis_pipeline.py diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/.gitignore b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/.gitignore new file mode 100644 index 0000000000..786a839695 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/.gitignore @@ -0,0 +1,118 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class +**/*.pyc + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# pyenv +.python-version + +# celery beat schedule file +celerybeat-schedule + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ + +# custom +/data +.vscode +.idea +*.pkl +*.pkl.json +*.log.json +/work_dirs +/mmcls/.mim + +# Pytorch +*.pth diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/.pre-commit-config.yaml b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/.pre-commit-config.yaml new file mode 100644 index 0000000000..19e9f8d481 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/.pre-commit-config.yaml @@ -0,0 +1,54 @@ +exclude: ^tests/data/ +repos: + - repo: https://gitlab.com/pycqa/flake8.git + rev: 3.8.3 + hooks: + - id: flake8 + - repo: https://github.com/asottile/seed-isort-config + rev: v2.2.0 + hooks: + - id: seed-isort-config + - repo: https://github.com/timothycrosley/isort + rev: 4.3.21 + hooks: + - id: isort + - repo: https://github.com/pre-commit/mirrors-yapf + rev: v0.30.0 + hooks: + - id: yapf + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v3.1.0 + hooks: + - id: trailing-whitespace + - id: check-yaml + - id: end-of-file-fixer + - id: requirements-txt-fixer + - id: double-quote-string-fixer + - id: check-merge-conflict + - id: fix-encoding-pragma + args: ["--remove"] + - id: mixed-line-ending + args: ["--fix=lf"] + - repo: https://github.com/jumanjihouse/pre-commit-hooks + rev: 2.1.4 + hooks: + - id: markdownlint + args: ["-r", "~MD002,~MD013,~MD029,~MD033,~MD034", + "-t", "allow_different_nesting"] + - repo: https://github.com/codespell-project/codespell + rev: v2.1.0 + hooks: + - id: codespell + - repo: https://github.com/myint/docformatter + rev: v1.3.1 + hooks: + - id: docformatter + args: ["--in-place", "--wrap-descriptions", "79"] + # - repo: local + # hooks: + # - id: clang-format + # name: clang-format + # description: Format files with ClangFormat + # entry: clang-format -style=google -i + # language: system + # files: \.(c|cc|cxx|cpp|cu|h|hpp|hxx|cuh|proto)$ diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/.readthedocs.yml b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/.readthedocs.yml new file mode 100644 index 0000000000..6cfbf5d310 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/.readthedocs.yml @@ -0,0 +1,9 @@ +version: 2 + +formats: all + +python: + version: 3.7 + install: + - requirements: requirements/docs.txt + - requirements: requirements/readthedocs.txt diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/CITATION.cff b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/CITATION.cff new file mode 100644 index 0000000000..0c0d773021 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/CITATION.cff @@ -0,0 +1,9 @@ +cff-version: 1.2.0 +message: "If you use this software, please cite it as below." +title: "OpenMMLab's Image Classification Toolbox and Benchmark" +authors: + - name: "MMClassification Contributors" +version: 0.15.0 +date-released: 2020-07-09 +repository-code: "https://github.com/open-mmlab/mmclassification" +license: Apache-2.0 diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/LICENSE b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/LICENSE new file mode 100644 index 0000000000..f731325b2c --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/LICENSE @@ -0,0 +1,203 @@ +Copyright (c) OpenMMLab. All rights reserved + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2020 MMClassification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/MANIFEST.in b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/MANIFEST.in new file mode 100644 index 0000000000..c4ce6d7f6c --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/MANIFEST.in @@ -0,0 +1,3 @@ +include mmcls/.mim/model-index.yml +recursive-include mmcls/.mim/configs *.py *.yml +recursive-include mmcls/.mim/tools *.py *.sh diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/README.md b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/README.md new file mode 100644 index 0000000000..12209eefc8 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/README.md @@ -0,0 +1,104 @@ +
+ +
+ +[![Build Status](https://github.com/open-mmlab/mmclassification/workflows/build/badge.svg)](https://github.com/open-mmlab/mmclassification/actions) +[![Documentation Status](https://readthedocs.org/projects/mmclassification/badge/?version=latest)](https://mmclassification.readthedocs.io/en/latest/?badge=latest) +[![codecov](https://codecov.io/gh/open-mmlab/mmclassification/branch/master/graph/badge.svg)](https://codecov.io/gh/open-mmlab/mmclassification) +[![license](https://img.shields.io/github/license/open-mmlab/mmclassification.svg)](https://github.com/open-mmlab/mmclassification/blob/master/LICENSE) + +## Introduction + +English | [简体中文](/README_zh-CN.md) + +MMClassification is an open source image classification toolbox based on PyTorch. It is +a part of the [OpenMMLab](https://openmmlab.com/) project. + +Documentation: https://mmclassification.readthedocs.io/en/latest/ + +![demo](https://user-images.githubusercontent.com/9102141/87268895-3e0d0780-c4fe-11ea-849e-6140b7e0d4de.gif) + +### Major features + +- Various backbones and pretrained models +- Bag of training tricks +- Large-scale training configs +- High efficiency and extensibility + +## License + +This project is released under the [Apache 2.0 license](LICENSE). + +## Changelog + +v0.17.0 was released in 29/10/2021. + +Highlights of the new version: +- Support **Tokens-to-Token ViT** backbone and **Res2Net** backbone. Welcome to use! +- Support **ImageNet21k** dataset. +- Add a **pipeline visualization** tool. Try it with the [tutorials](https://mmclassification.readthedocs.io/en/latest/tools/visualization.html#pipeline-visualization)! + +Please refer to [changelog.md](docs/changelog.md) for more details and other release history. + +## Benchmark and model zoo + +Results and models are available in the [model zoo](docs/model_zoo.md). + +Supported backbones: + +- [x] ResNet +- [x] ResNeXt +- [x] SE-ResNet +- [x] SE-ResNeXt +- [x] RegNet +- [x] ShuffleNetV1 +- [x] ShuffleNetV2 +- [x] MobileNetV2 +- [x] MobileNetV3 +- [x] Swin-Transformer + +## Installation + +Please refer to [install.md](docs/install.md) for installation and dataset preparation. + +## Getting Started + +Please see [getting_started.md](docs/getting_started.md) for the basic usage of MMClassification. There are also tutorials for [finetuning models](docs/tutorials/finetune.md), [adding new dataset](docs/tutorials/new_dataset.md), [designing data pipeline](docs/tutorials/data_pipeline.md), and [adding new modules](docs/tutorials/new_modules.md). + +## Citation + +If you find this project useful in your research, please consider cite: + +```BibTeX +@misc{2020mmclassification, + title={OpenMMLab's Image Classification Toolbox and Benchmark}, + author={MMClassification Contributors}, + howpublished = {\url{https://github.com/open-mmlab/mmclassification}}, + year={2020} +} +``` + +## Contributing + +We appreciate all contributions to improve MMClassification. +Please refer to [CONTRUBUTING.md](docs/community/CONTRIBUTING.md) for the contributing guideline. + +## Acknowledgement + +MMClassification is an open source project that is contributed by researchers and engineers from various colleges and companies. We appreciate all the contributors who implement their methods or add new features, as well as users who give valuable feedbacks. +We wish that the toolbox and benchmark could serve the growing research community by providing a flexible toolkit to reimplement existing methods and develop their own new classifiers. + +## Projects in OpenMMLab + +- [MMCV](https://github.com/open-mmlab/mmcv): OpenMMLab foundational library for computer vision. +- [MIM](https://github.com/open-mmlab/mim): MIM Installs OpenMMLab Packages. +- [MMClassification](https://github.com/open-mmlab/mmclassification): OpenMMLab image classification toolbox and benchmark. +- [MMDetection](https://github.com/open-mmlab/mmdetection): OpenMMLab detection toolbox and benchmark. +- [MMDetection3D](https://github.com/open-mmlab/mmdetection3d): OpenMMLab's next-generation platform for general 3D object detection. +- [MMSegmentation](https://github.com/open-mmlab/mmsegmentation): OpenMMLab semantic segmentation toolbox and benchmark. +- [MMAction2](https://github.com/open-mmlab/mmaction2): OpenMMLab's next-generation action understanding toolbox and benchmark. +- [MMTracking](https://github.com/open-mmlab/mmtracking): OpenMMLab video perception toolbox and benchmark. +- [MMPose](https://github.com/open-mmlab/mmpose): OpenMMLab pose estimation toolbox and benchmark. +- [MMEditing](https://github.com/open-mmlab/mmediting): OpenMMLab image and video editing toolbox. +- [MMOCR](https://github.com/open-mmlab/mmocr): OpenMMLab toolbox for text detection, recognition and understanding. +- [MMGeneration](https://github.com/open-mmlab/mmgeneration): OpenMMlab toolkit for generative models. diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/README_zh-CN.md b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/README_zh-CN.md new file mode 100644 index 0000000000..206b9771b8 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/README_zh-CN.md @@ -0,0 +1,108 @@ +
+ +
+ +[English](/README.md) | 简体中文 + +[![Build Status](https://github.com/open-mmlab/mmclassification/workflows/build/badge.svg)](https://github.com/open-mmlab/mmclassification/actions) +[![Documentation Status](https://readthedocs.org/projects/mmclassification/badge/?version=latest)](https://mmclassification.readthedocs.io/en/latest/?badge=latest) +[![codecov](https://codecov.io/gh/open-mmlab/mmclassification/branch/master/graph/badge.svg)](https://codecov.io/gh/open-mmlab/mmclassification) +[![license](https://img.shields.io/github/license/open-mmlab/mmclassification.svg)](https://github.com/open-mmlab/mmclassification/blob/master/LICENSE) + +## Introduction + +MMClassification 是一款基于 PyTorch 的开源图像分类工具箱,是 [OpenMMLab](https://openmmlab.com/) 项目的成员之一 + +参考文档:https://mmclassification.readthedocs.io/en/latest/ + +![demo](https://user-images.githubusercontent.com/9102141/87268895-3e0d0780-c4fe-11ea-849e-6140b7e0d4de.gif) + +### 主要特性 + +- 支持多样的主干网络与预训练模型 +- 支持配置多种训练技巧 +- 大量的训练配置文件 +- 高效率和高可扩展性 + +## 许可证 + +该项目开源自 [Apache 2.0 license](LICENSE). + +## 更新日志 + +2021/10/29 发布了 v0.17.0 版本 + +新版本的一些新功能如下: +- 支持了 **Tokens-to-Token ViT** 主干网络和 **Res2Net** 主干网络,欢迎使用! +- 支持了 **ImageNet21k** 数据集 +- 添加了一个**可视化数据预处理**的工具,可以参考[教程](https://mmclassification.readthedocs.io/zh_CN/latest/tools/visualization.html#id2)使用 + +发布历史和更新细节请参考 [更新日志](docs/changelog.md) + +## 基准测试及模型库 + +相关结果和模型可在 [model zoo](docs/model_zoo.md) 中获得 + +支持的主干网络: + +- [x] ResNet +- [x] ResNeXt +- [x] SE-ResNet +- [x] SE-ResNeXt +- [x] RegNet +- [x] ShuffleNetV1 +- [x] ShuffleNetV2 +- [x] MobileNetV2 +- [x] MobileNetV3 +- [x] Swin-Transformer + +## 安装 + +请参考 [安装指南](docs_zh-CN/install.md) 进行安装 + +## 基础教程 + +请参考 [基础教程](docs_zh-CN/getting_started.md) 来了解 MMClassification 的基本使用。其中还包含了 [如何微调模型](docs_zh-CN/tutorials/finetune.md), [如何增加新数据集](docs_zh-CN/tutorials/new_dataset.md), [如何设计数据处理流程](docs_zh-CN/tutorials/data_pipeline.md), 以及 [如何增加新模块](docs_zh-CN/tutorials/new_modules.md) 等指南。 + +## 参与贡献 + +我们非常欢迎任何有助于提升 MMClassification 的贡献,请参考 [贡献指南](docs_zh-CN/community/CONTRIBUTING.md) 来了解如何参与贡献。 + +## 致谢 + +MMClassification 是一款由不同学校和公司共同贡献的开源项目。我们感谢所有为项目提供算法复现和新功能支持的贡献者,以及提供宝贵反馈的用户。 + +我们希望该工具箱和基准测试可以为社区提供灵活的代码工具,供用户复现现有算法并开发自己的新模型,从而不断为开源社区提供贡献。 + +## OpenMMLab 的其他项目 + +- [MMCV](https://github.com/open-mmlab/mmcv): OpenMMLab 计算机视觉基础库 +- [MIM](https://github.com/open-mmlab/mim): MIM 是 OpenMMlab 项目、算法、模型的统一入口 +- [MMDetection](https://github.com/open-mmlab/mmdetection): OpenMMLab 检测工具箱与测试基准 +- [MMDetection3D](https://github.com/open-mmlab/mmdetection3d): OpenMMLab 新一代通用 3D 目标检测平台 +- [MMSegmentation](https://github.com/open-mmlab/mmsegmentation): OpenMMLab 语义分割工具箱与测试基准 +- [MMAction2](https://github.com/open-mmlab/mmaction2): OpenMMLab 新一代视频理解工具箱与测试基准 +- [MMTracking](https://github.com/open-mmlab/mmtracking): OpenMMLab 一体化视频目标感知平台 +- [MMPose](https://github.com/open-mmlab/mmpose): OpenMMLab 姿态估计工具箱与测试基准 +- [MMEditing](https://github.com/open-mmlab/mmediting): OpenMMLab 图像视频编辑工具箱 +- [MMOCR](https://github.com/open-mmlab/mmocr): OpenMMLab 全流程文字检测识别理解工具包 +- [MMGeneration](https://github.com/open-mmlab/mmgeneration): OpenMMLab 生成模型工具箱 + +## 欢迎加入 OpenMMLab 社区 + +扫描下方的二维码可关注 OpenMMLab 团队的 [知乎官方账号](https://www.zhihu.com/people/openmmlab),加入 OpenMMLab 团队的 [官方交流 QQ 群](https://jq.qq.com/?_wv=1027&k=GJP18SjI) + +
+ +
+ +我们会在 OpenMMLab 社区为大家 + +- 📢 分享 AI 框架的前沿核心技术 +- 💻 解读 PyTorch 常用模块源码 +- 📰 发布 OpenMMLab 的相关新闻 +- 🚀 介绍 OpenMMLab 开发的前沿算法 +- 🏃 获取更高效的问题答疑和意见反馈 +- 🔥 提供与各行各业开发者充分交流的平台 + +干货满满 📘,等你来撩 💗,OpenMMLab 社区期待您的加入 👬 diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/cifar100_bs16.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/cifar100_bs16.py new file mode 100644 index 0000000000..d4f8db75f8 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/cifar100_bs16.py @@ -0,0 +1,36 @@ +# dataset settings +dataset_type = 'CIFAR100' +img_norm_cfg = dict( + mean=[129.304, 124.070, 112.434], + std=[68.170, 65.392, 70.418], + to_rgb=False) +train_pipeline = [ + dict(type='RandomCrop', size=32, padding=4), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [ + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + samples_per_gpu=16, + workers_per_gpu=2, + train=dict( + type=dataset_type, + data_prefix='data/cifar100', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_prefix='data/cifar100', + pipeline=test_pipeline, + test_mode=True), + test=dict( + type=dataset_type, + data_prefix='data/cifar100', + pipeline=test_pipeline, + test_mode=True)) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/cifar10_bs16.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/cifar10_bs16.py new file mode 100644 index 0000000000..0d28adf5bf --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/cifar10_bs16.py @@ -0,0 +1,35 @@ +# dataset settings +dataset_type = 'CIFAR10' +img_norm_cfg = dict( + mean=[125.307, 122.961, 113.8575], + std=[51.5865, 50.847, 51.255], + to_rgb=False) +train_pipeline = [ + dict(type='RandomCrop', size=32, padding=4), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [ + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + samples_per_gpu=16, + workers_per_gpu=2, + train=dict( + type=dataset_type, data_prefix='data/cifar10', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_prefix='data/cifar10', + pipeline=test_pipeline, + test_mode=True), + test=dict( + type=dataset_type, + data_prefix='data/cifar10', + pipeline=test_pipeline, + test_mode=True)) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet21k_bs128.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet21k_bs128.py new file mode 100644 index 0000000000..b81a7466f4 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet21k_bs128.py @@ -0,0 +1,43 @@ +# dataset settings +dataset_type = 'ImageNet21k' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', size=224), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', size=(256, -1)), + dict(type='CenterCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + samples_per_gpu=128, + workers_per_gpu=2, + train=dict( + type=dataset_type, + data_prefix='data/imagenet21k/train', + pipeline=train_pipeline, + recursion_subdir=True), + val=dict( + type=dataset_type, + data_prefix='data/imagenet21k/val', + ann_file='data/imagenet21k/meta/val.txt', + pipeline=test_pipeline, + recursion_subdir=True), + test=dict( + # replace `data/val` with `data/test` for standard test + type=dataset_type, + data_prefix='data/imagenet21k/val', + ann_file='data/imagenet21k/meta/val.txt', + pipeline=test_pipeline, + recursion_subdir=True)) +evaluation = dict(interval=1, metric='accuracy') diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet_bs32.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet_bs32.py new file mode 100644 index 0000000000..8a5465902a --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet_bs32.py @@ -0,0 +1,40 @@ +# dataset settings +dataset_type = 'ImageNet' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', size=224), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', size=(256, -1)), + dict(type='CenterCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + samples_per_gpu=32, + workers_per_gpu=2, + train=dict( + type=dataset_type, + data_prefix='data/imagenet/train', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline), + test=dict( + # replace `data/val` with `data/test` for standard test + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline)) +evaluation = dict(interval=1, metric='accuracy') diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet_bs32_pil_resize.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet_bs32_pil_resize.py new file mode 100644 index 0000000000..22b74f76b1 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet_bs32_pil_resize.py @@ -0,0 +1,40 @@ +# dataset settings +dataset_type = 'ImageNet' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', size=224, backend='pillow'), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', size=(256, -1), backend='pillow'), + dict(type='CenterCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + samples_per_gpu=32, + workers_per_gpu=2, + train=dict( + type=dataset_type, + data_prefix='data/imagenet/train', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline), + test=dict( + # replace `data/val` with `data/test` for standard test + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline)) +evaluation = dict(interval=1, metric='accuracy') diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet_bs64.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet_bs64.py new file mode 100644 index 0000000000..b9f866a404 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet_bs64.py @@ -0,0 +1,40 @@ +# dataset settings +dataset_type = 'ImageNet' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', size=224), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', size=(256, -1)), + dict(type='CenterCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + samples_per_gpu=64, + workers_per_gpu=2, + train=dict( + type=dataset_type, + data_prefix='data/imagenet/train', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline), + test=dict( + # replace `data/val` with `data/test` for standard test + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline)) +evaluation = dict(interval=1, metric='accuracy') diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet_bs64_autoaug.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet_bs64_autoaug.py new file mode 100644 index 0000000000..a1092a3124 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet_bs64_autoaug.py @@ -0,0 +1,43 @@ +_base_ = ['./pipelines/auto_aug.py'] + +# dataset settings +dataset_type = 'ImageNet' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', size=224), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='AutoAugment', policies={{_base_.auto_increasing_policies}}), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', size=(256, -1)), + dict(type='CenterCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + samples_per_gpu=64, + workers_per_gpu=2, + train=dict( + type=dataset_type, + data_prefix='data/imagenet/train', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline), + test=dict( + # replace `data/val` with `data/test` for standard test + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline)) +evaluation = dict(interval=1, metric='accuracy') diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet_bs64_pil_resize.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet_bs64_pil_resize.py new file mode 100644 index 0000000000..95d0e1f25a --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet_bs64_pil_resize.py @@ -0,0 +1,40 @@ +# dataset settings +dataset_type = 'ImageNet' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', size=224, backend='pillow'), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', size=(256, -1), backend='pillow'), + dict(type='CenterCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + samples_per_gpu=64, + workers_per_gpu=2, + train=dict( + type=dataset_type, + data_prefix='data/imagenet/train', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline), + test=dict( + # replace `data/val` with `data/test` for standard test + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline)) +evaluation = dict(interval=1, metric='accuracy') diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet_bs64_pil_resize_autoaug.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet_bs64_pil_resize_autoaug.py new file mode 100644 index 0000000000..f9c50267af --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet_bs64_pil_resize_autoaug.py @@ -0,0 +1,45 @@ +_base_ = [ + 'pipelines/auto_aug.py', +] + +# dataset settings +dataset_type = 'ImageNet' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', size=224, backend='pillow'), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='AutoAugment', policies={{_base_.policy_imagenet}}), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', size=(256, -1), backend='pillow'), + dict(type='CenterCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + samples_per_gpu=64, + workers_per_gpu=2, + train=dict( + type=dataset_type, + data_prefix='data/imagenet/train', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline), + test=dict( + # replace `data/val` with `data/test` for standard test + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline)) +evaluation = dict(interval=1, metric='accuracy') diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet_bs64_swin_224.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet_bs64_swin_224.py new file mode 100644 index 0000000000..4a059a3313 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet_bs64_swin_224.py @@ -0,0 +1,71 @@ +_base_ = ['./pipelines/rand_aug.py'] + +# dataset settings +dataset_type = 'ImageNet' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + size=224, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict( + type='RandAugment', + policies={{_base_.rand_increasing_policies}}, + num_policies=2, + total_level=10, + magnitude_level=9, + magnitude_std=0.5, + hparams=dict( + pad_val=[round(x) for x in img_norm_cfg['mean'][::-1]], + interpolation='bicubic')), + dict( + type='RandomErasing', + erase_prob=0.25, + mode='rand', + min_area_ratio=0.02, + max_area_ratio=1 / 3, + fill_color=img_norm_cfg['mean'][::-1], + fill_std=img_norm_cfg['std'][::-1]), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='Resize', + size=(256, -1), + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + samples_per_gpu=64, + workers_per_gpu=8, + train=dict( + type=dataset_type, + data_prefix='data/imagenet/train', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline), + test=dict( + # replace `data/val` with `data/test` for standard test + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline)) + +evaluation = dict(interval=10, metric='accuracy') diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet_bs64_swin_384.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet_bs64_swin_384.py new file mode 100644 index 0000000000..d263939929 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet_bs64_swin_384.py @@ -0,0 +1,43 @@ +# dataset settings +dataset_type = 'ImageNet' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + size=384, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', size=384, backend='pillow', interpolation='bicubic'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + samples_per_gpu=64, + workers_per_gpu=8, + train=dict( + type=dataset_type, + data_prefix='data/imagenet/train', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline), + test=dict( + # replace `data/val` with `data/test` for standard test + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline)) +evaluation = dict(interval=10, metric='accuracy') diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet_bs64_t2t_224.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet_bs64_t2t_224.py new file mode 100644 index 0000000000..375775debd --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/imagenet_bs64_t2t_224.py @@ -0,0 +1,71 @@ +_base_ = ['./pipelines/rand_aug.py'] + +# dataset settings +dataset_type = 'ImageNet' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + size=224, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict( + type='RandAugment', + policies={{_base_.rand_increasing_policies}}, + num_policies=2, + total_level=10, + magnitude_level=9, + magnitude_std=0.5, + hparams=dict( + pad_val=[round(x) for x in img_norm_cfg['mean'][::-1]], + interpolation='bicubic')), + dict( + type='RandomErasing', + erase_prob=0.25, + mode='rand', + min_area_ratio=0.02, + max_area_ratio=1 / 3, + fill_color=img_norm_cfg['mean'][::-1], + fill_std=img_norm_cfg['std'][::-1]), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='Resize', + size=(248, -1), + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + samples_per_gpu=64, + workers_per_gpu=4, + train=dict( + type=dataset_type, + data_prefix='data/imagenet/train', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline), + test=dict( + # replace `data/val` with `data/test` for standard test + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline)) + +evaluation = dict(interval=10, metric='accuracy') diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/pipelines/auto_aug.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/pipelines/auto_aug.py new file mode 100644 index 0000000000..5a10f7eec6 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/pipelines/auto_aug.py @@ -0,0 +1,96 @@ +# Policy for ImageNet, refers to +# https://github.com/DeepVoltaire/AutoAugment/blame/master/autoaugment.py +policy_imagenet = [ + [ + dict(type='Posterize', bits=4, prob=0.4), + dict(type='Rotate', angle=30., prob=0.6) + ], + [ + dict(type='Solarize', thr=256 / 9 * 4, prob=0.6), + dict(type='AutoContrast', prob=0.6) + ], + [dict(type='Equalize', prob=0.8), + dict(type='Equalize', prob=0.6)], + [ + dict(type='Posterize', bits=5, prob=0.6), + dict(type='Posterize', bits=5, prob=0.6) + ], + [ + dict(type='Equalize', prob=0.4), + dict(type='Solarize', thr=256 / 9 * 5, prob=0.2) + ], + [ + dict(type='Equalize', prob=0.4), + dict(type='Rotate', angle=30 / 9 * 8, prob=0.8) + ], + [ + dict(type='Solarize', thr=256 / 9 * 6, prob=0.6), + dict(type='Equalize', prob=0.6) + ], + [dict(type='Posterize', bits=6, prob=0.8), + dict(type='Equalize', prob=1.)], + [ + dict(type='Rotate', angle=10., prob=0.2), + dict(type='Solarize', thr=256 / 9, prob=0.6) + ], + [ + dict(type='Equalize', prob=0.6), + dict(type='Posterize', bits=5, prob=0.4) + ], + [ + dict(type='Rotate', angle=30 / 9 * 8, prob=0.8), + dict(type='ColorTransform', magnitude=0., prob=0.4) + ], + [ + dict(type='Rotate', angle=30., prob=0.4), + dict(type='Equalize', prob=0.6) + ], + [dict(type='Equalize', prob=0.0), + dict(type='Equalize', prob=0.8)], + [dict(type='Invert', prob=0.6), + dict(type='Equalize', prob=1.)], + [ + dict(type='ColorTransform', magnitude=0.4, prob=0.6), + dict(type='Contrast', magnitude=0.8, prob=1.) + ], + [ + dict(type='Rotate', angle=30 / 9 * 8, prob=0.8), + dict(type='ColorTransform', magnitude=0.2, prob=1.) + ], + [ + dict(type='ColorTransform', magnitude=0.8, prob=0.8), + dict(type='Solarize', thr=256 / 9 * 2, prob=0.8) + ], + [ + dict(type='Sharpness', magnitude=0.7, prob=0.4), + dict(type='Invert', prob=0.6) + ], + [ + dict( + type='Shear', + magnitude=0.3 / 9 * 5, + prob=0.6, + direction='horizontal'), + dict(type='Equalize', prob=1.) + ], + [ + dict(type='ColorTransform', magnitude=0., prob=0.4), + dict(type='Equalize', prob=0.6) + ], + [ + dict(type='Equalize', prob=0.4), + dict(type='Solarize', thr=256 / 9 * 5, prob=0.2) + ], + [ + dict(type='Solarize', thr=256 / 9 * 4, prob=0.6), + dict(type='AutoContrast', prob=0.6) + ], + [dict(type='Invert', prob=0.6), + dict(type='Equalize', prob=1.)], + [ + dict(type='ColorTransform', magnitude=0.4, prob=0.6), + dict(type='Contrast', magnitude=0.8, prob=1.) + ], + [dict(type='Equalize', prob=0.8), + dict(type='Equalize', prob=0.6)], +] diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/pipelines/rand_aug.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/pipelines/rand_aug.py new file mode 100644 index 0000000000..f2bab3c364 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/pipelines/rand_aug.py @@ -0,0 +1,43 @@ +# Refers to `_RAND_INCREASING_TRANSFORMS` in pytorch-image-models +rand_increasing_policies = [ + dict(type='AutoContrast'), + dict(type='Equalize'), + dict(type='Invert'), + dict(type='Rotate', magnitude_key='angle', magnitude_range=(0, 30)), + dict(type='Posterize', magnitude_key='bits', magnitude_range=(4, 0)), + dict(type='Solarize', magnitude_key='thr', magnitude_range=(256, 0)), + dict( + type='SolarizeAdd', + magnitude_key='magnitude', + magnitude_range=(0, 110)), + dict( + type='ColorTransform', + magnitude_key='magnitude', + magnitude_range=(0, 0.9)), + dict(type='Contrast', magnitude_key='magnitude', magnitude_range=(0, 0.9)), + dict( + type='Brightness', magnitude_key='magnitude', + magnitude_range=(0, 0.9)), + dict( + type='Sharpness', magnitude_key='magnitude', magnitude_range=(0, 0.9)), + dict( + type='Shear', + magnitude_key='magnitude', + magnitude_range=(0, 0.3), + direction='horizontal'), + dict( + type='Shear', + magnitude_key='magnitude', + magnitude_range=(0, 0.3), + direction='vertical'), + dict( + type='Translate', + magnitude_key='magnitude', + magnitude_range=(0, 0.45), + direction='horizontal'), + dict( + type='Translate', + magnitude_key='magnitude', + magnitude_range=(0, 0.45), + direction='vertical') +] diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/voc_bs16.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/voc_bs16.py new file mode 100644 index 0000000000..73fa0bcc8b --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/datasets/voc_bs16.py @@ -0,0 +1,41 @@ +# dataset settings +dataset_type = 'VOC' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', size=224), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', size=(256, -1)), + dict(type='CenterCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + samples_per_gpu=16, + workers_per_gpu=2, + train=dict( + type=dataset_type, + data_prefix='data/VOCdevkit/VOC2007/', + ann_file='data/VOCdevkit/VOC2007/ImageSets/Main/trainval.txt', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_prefix='data/VOCdevkit/VOC2007/', + ann_file='data/VOCdevkit/VOC2007/ImageSets/Main/test.txt', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + data_prefix='data/VOCdevkit/VOC2007/', + ann_file='data/VOCdevkit/VOC2007/ImageSets/Main/test.txt', + pipeline=test_pipeline)) +evaluation = dict( + interval=1, metric=['mAP', 'CP', 'OP', 'CR', 'OR', 'CF1', 'OF1']) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/default_runtime.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/default_runtime.py new file mode 100644 index 0000000000..ba965a4547 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/default_runtime.py @@ -0,0 +1,16 @@ +# checkpoint saving +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=100, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable + +dist_params = dict(backend='nccl') +log_level = 'INFO' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/mobilenet_v2_1x.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/mobilenet_v2_1x.py new file mode 100644 index 0000000000..6ebff1eff9 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/mobilenet_v2_1x.py @@ -0,0 +1,12 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='MobileNetV2', widen_factor=1.0), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1280, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/mobilenet_v3_large_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/mobilenet_v3_large_imagenet.py new file mode 100644 index 0000000000..b6fdafab6e --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/mobilenet_v3_large_imagenet.py @@ -0,0 +1,14 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='MobileNetV3', arch='large'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='StackedLinearClsHead', + num_classes=1000, + in_channels=960, + mid_channels=[1280], + dropout_rate=0.2, + act_cfg=dict(type='HSwish'), + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5))) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/mobilenet_v3_small_cifar.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/mobilenet_v3_small_cifar.py new file mode 100644 index 0000000000..5dbe980c47 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/mobilenet_v3_small_cifar.py @@ -0,0 +1,13 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='MobileNetV3', arch='small'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='StackedLinearClsHead', + num_classes=10, + in_channels=576, + mid_channels=[1280], + act_cfg=dict(type='HSwish'), + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5))) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/mobilenet_v3_small_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/mobilenet_v3_small_imagenet.py new file mode 100644 index 0000000000..5b8af1f9ac --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/mobilenet_v3_small_imagenet.py @@ -0,0 +1,14 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='MobileNetV3', arch='small'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='StackedLinearClsHead', + num_classes=1000, + in_channels=576, + mid_channels=[1024], + dropout_rate=0.2, + act_cfg=dict(type='HSwish'), + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5))) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/regnet/regnetx_1.6gf.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/regnet/regnetx_1.6gf.py new file mode 100644 index 0000000000..b81f0ad25b --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/regnet/regnetx_1.6gf.py @@ -0,0 +1,12 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='RegNet', arch='regnetx_1.6gf'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=912, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/regnet/regnetx_12gf.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/regnet/regnetx_12gf.py new file mode 100644 index 0000000000..383d4f8799 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/regnet/regnetx_12gf.py @@ -0,0 +1,12 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='RegNet', arch='regnetx_12gf'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2240, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/regnet/regnetx_3.2gf.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/regnet/regnetx_3.2gf.py new file mode 100644 index 0000000000..67d4541395 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/regnet/regnetx_3.2gf.py @@ -0,0 +1,12 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='RegNet', arch='regnetx_3.2gf'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1008, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/regnet/regnetx_4.0gf.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/regnet/regnetx_4.0gf.py new file mode 100644 index 0000000000..01419c64bd --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/regnet/regnetx_4.0gf.py @@ -0,0 +1,12 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='RegNet', arch='regnetx_4.0gf'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1360, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/regnet/regnetx_400mf.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/regnet/regnetx_400mf.py new file mode 100644 index 0000000000..ef518b9f7d --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/regnet/regnetx_400mf.py @@ -0,0 +1,12 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='RegNet', arch='regnetx_400mf'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=384, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/regnet/regnetx_6.4gf.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/regnet/regnetx_6.4gf.py new file mode 100644 index 0000000000..44e6222af0 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/regnet/regnetx_6.4gf.py @@ -0,0 +1,12 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='RegNet', arch='regnetx_6.4gf'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1624, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/regnet/regnetx_8.0gf.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/regnet/regnetx_8.0gf.py new file mode 100644 index 0000000000..29298268d7 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/regnet/regnetx_8.0gf.py @@ -0,0 +1,12 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='RegNet', arch='regnetx_8.0gf'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1920, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/regnet/regnetx_800mf.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/regnet/regnetx_800mf.py new file mode 100644 index 0000000000..210f760fe2 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/regnet/regnetx_800mf.py @@ -0,0 +1,12 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='RegNet', arch='regnetx_800mf'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=672, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/repvgg-A0_in1k.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/repvgg-A0_in1k.py new file mode 100644 index 0000000000..093ffb7eea --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/repvgg-A0_in1k.py @@ -0,0 +1,15 @@ +model = dict( + type='ImageClassifier', + backbone=dict( + type='RepVGG', + arch='A0', + out_indices=(3, ), + ), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1280, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/repvgg-B3_lbs-mixup_in1k.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/repvgg-B3_lbs-mixup_in1k.py new file mode 100644 index 0000000000..5bb07db54d --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/repvgg-B3_lbs-mixup_in1k.py @@ -0,0 +1,23 @@ +model = dict( + type='ImageClassifier', + backbone=dict( + type='RepVGG', + arch='B3', + out_indices=(3, ), + ), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2560, + loss=dict( + type='LabelSmoothLoss', + loss_weight=1.0, + label_smooth_val=0.1, + mode='classy_vision', + num_classes=1000), + topk=(1, 5), + ), + train_cfg=dict( + augments=dict(type='BatchMixup', alpha=0.2, num_classes=1000, + prob=1.))) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/res2net101-w26-s4.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/res2net101-w26-s4.py new file mode 100644 index 0000000000..3bf64c508f --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/res2net101-w26-s4.py @@ -0,0 +1,18 @@ +model = dict( + type='ImageClassifier', + backbone=dict( + type='Res2Net', + depth=101, + scales=4, + base_width=26, + deep_stem=False, + avg_down=False, + ), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/res2net50-w14-s8.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/res2net50-w14-s8.py new file mode 100644 index 0000000000..5875142c34 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/res2net50-w14-s8.py @@ -0,0 +1,18 @@ +model = dict( + type='ImageClassifier', + backbone=dict( + type='Res2Net', + depth=50, + scales=8, + base_width=14, + deep_stem=False, + avg_down=False, + ), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/res2net50-w26-s4.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/res2net50-w26-s4.py new file mode 100644 index 0000000000..be8fdb5859 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/res2net50-w26-s4.py @@ -0,0 +1,18 @@ +model = dict( + type='ImageClassifier', + backbone=dict( + type='Res2Net', + depth=50, + scales=4, + base_width=26, + deep_stem=False, + avg_down=False, + ), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/res2net50-w26-s6.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/res2net50-w26-s6.py new file mode 100644 index 0000000000..281b136a67 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/res2net50-w26-s6.py @@ -0,0 +1,18 @@ +model = dict( + type='ImageClassifier', + backbone=dict( + type='Res2Net', + depth=50, + scales=6, + base_width=26, + deep_stem=False, + avg_down=False, + ), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/res2net50-w26-s8.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/res2net50-w26-s8.py new file mode 100644 index 0000000000..b4f62f3ed1 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/res2net50-w26-s8.py @@ -0,0 +1,18 @@ +model = dict( + type='ImageClassifier', + backbone=dict( + type='Res2Net', + depth=50, + scales=8, + base_width=26, + deep_stem=False, + avg_down=False, + ), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/res2net50-w48-s2.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/res2net50-w48-s2.py new file mode 100644 index 0000000000..8675c91fa0 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/res2net50-w48-s2.py @@ -0,0 +1,18 @@ +model = dict( + type='ImageClassifier', + backbone=dict( + type='Res2Net', + depth=50, + scales=2, + base_width=48, + deep_stem=False, + avg_down=False, + ), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnest101.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnest101.py new file mode 100644 index 0000000000..97f7749cc3 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnest101.py @@ -0,0 +1,24 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNeSt', + depth=101, + num_stages=4, + stem_channels=128, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict( + type='LabelSmoothLoss', + label_smooth_val=0.1, + num_classes=1000, + reduction='mean', + loss_weight=1.0), + topk=(1, 5), + cal_acc=False)) +train_cfg = dict(mixup=dict(alpha=0.2, num_classes=1000)) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnest200.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnest200.py new file mode 100644 index 0000000000..4610017814 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnest200.py @@ -0,0 +1,24 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNeSt', + depth=200, + num_stages=4, + stem_channels=128, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict( + type='LabelSmoothLoss', + label_smooth_val=0.1, + num_classes=1000, + reduction='mean', + loss_weight=1.0), + topk=(1, 5), + cal_acc=False)) +train_cfg = dict(mixup=dict(alpha=0.2, num_classes=1000)) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnest269.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnest269.py new file mode 100644 index 0000000000..ad365d03e1 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnest269.py @@ -0,0 +1,24 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNeSt', + depth=269, + num_stages=4, + stem_channels=128, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict( + type='LabelSmoothLoss', + label_smooth_val=0.1, + num_classes=1000, + reduction='mean', + loss_weight=1.0), + topk=(1, 5), + cal_acc=False)) +train_cfg = dict(mixup=dict(alpha=0.2, num_classes=1000)) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnest50.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnest50.py new file mode 100644 index 0000000000..15269d4a82 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnest50.py @@ -0,0 +1,23 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNeSt', + depth=50, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict( + type='LabelSmoothLoss', + label_smooth_val=0.1, + num_classes=1000, + reduction='mean', + loss_weight=1.0), + topk=(1, 5), + cal_acc=False)) +train_cfg = dict(mixup=dict(alpha=0.2, num_classes=1000)) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet101.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet101.py new file mode 100644 index 0000000000..1147cd4be9 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet101.py @@ -0,0 +1,17 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNet', + depth=101, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet101_cifar.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet101_cifar.py new file mode 100644 index 0000000000..a84d470e3a --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet101_cifar.py @@ -0,0 +1,16 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNet_CIFAR', + depth=101, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=10, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + )) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet152.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet152.py new file mode 100644 index 0000000000..94a718c3ce --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet152.py @@ -0,0 +1,17 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNet', + depth=152, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet152_cifar.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet152_cifar.py new file mode 100644 index 0000000000..55c0cc6c66 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet152_cifar.py @@ -0,0 +1,16 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNet_CIFAR', + depth=152, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=10, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + )) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet18.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet18.py new file mode 100644 index 0000000000..7c66758ee4 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet18.py @@ -0,0 +1,17 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNet', + depth=18, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=512, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet18_cifar.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet18_cifar.py new file mode 100644 index 0000000000..7b9cf1e733 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet18_cifar.py @@ -0,0 +1,16 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNet_CIFAR', + depth=18, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=10, + in_channels=512, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + )) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet34.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet34.py new file mode 100644 index 0000000000..100ee286be --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet34.py @@ -0,0 +1,17 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNet', + depth=34, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=512, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet34_cifar.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet34_cifar.py new file mode 100644 index 0000000000..55d033bc30 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet34_cifar.py @@ -0,0 +1,16 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNet_CIFAR', + depth=34, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=10, + in_channels=512, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + )) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet50.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet50.py new file mode 100644 index 0000000000..129a2bb50c --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet50.py @@ -0,0 +1,17 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet50_cifar.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet50_cifar.py new file mode 100644 index 0000000000..33b66d5264 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet50_cifar.py @@ -0,0 +1,16 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNet_CIFAR', + depth=50, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=10, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + )) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet50_cifar_cutmix.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet50_cifar_cutmix.py new file mode 100644 index 0000000000..73c38be271 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet50_cifar_cutmix.py @@ -0,0 +1,18 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNet_CIFAR', + depth=50, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='MultiLabelLinearClsHead', + num_classes=10, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0, use_soft=True)), + train_cfg=dict( + augments=dict(type='BatchCutMix', alpha=1.0, num_classes=10, + prob=1.0))) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet50_cifar_mixup.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet50_cifar_mixup.py new file mode 100644 index 0000000000..3de14f3f2a --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet50_cifar_mixup.py @@ -0,0 +1,17 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNet_CIFAR', + depth=50, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='MultiLabelLinearClsHead', + num_classes=10, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0, use_soft=True)), + train_cfg=dict( + augments=dict(type='BatchMixup', alpha=1., num_classes=10, prob=1.))) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet50_cutmix.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet50_cutmix.py new file mode 100644 index 0000000000..fb79088b79 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet50_cutmix.py @@ -0,0 +1,18 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='MultiLabelLinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0, use_soft=True)), + train_cfg=dict( + augments=dict( + type='BatchCutMix', alpha=1.0, num_classes=1000, prob=1.0))) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet50_label_smooth.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet50_label_smooth.py new file mode 100644 index 0000000000..b6f7937519 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet50_label_smooth.py @@ -0,0 +1,18 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, loss_weight=1.0), + topk=(1, 5), + )) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet50_mixup.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet50_mixup.py new file mode 100644 index 0000000000..8ff9522605 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnet50_mixup.py @@ -0,0 +1,18 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='MultiLabelLinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0, use_soft=True)), + train_cfg=dict( + augments=dict(type='BatchMixup', alpha=0.2, num_classes=1000, + prob=1.))) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnetv1d101.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnetv1d101.py new file mode 100644 index 0000000000..1e56223121 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnetv1d101.py @@ -0,0 +1,17 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNetV1d', + depth=101, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnetv1d152.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnetv1d152.py new file mode 100644 index 0000000000..58cc73beb3 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnetv1d152.py @@ -0,0 +1,17 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNetV1d', + depth=152, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnetv1d50.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnetv1d50.py new file mode 100644 index 0000000000..015aaa3d81 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnetv1d50.py @@ -0,0 +1,17 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNetV1d', + depth=50, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnext101_32x4d.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnext101_32x4d.py new file mode 100644 index 0000000000..1c89fb6488 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnext101_32x4d.py @@ -0,0 +1,19 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNeXt', + depth=101, + num_stages=4, + out_indices=(3, ), + groups=32, + width_per_group=4, + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnext101_32x8d.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnext101_32x8d.py new file mode 100644 index 0000000000..2bb63f3aeb --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnext101_32x8d.py @@ -0,0 +1,19 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNeXt', + depth=101, + num_stages=4, + out_indices=(3, ), + groups=32, + width_per_group=8, + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnext152_32x4d.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnext152_32x4d.py new file mode 100644 index 0000000000..d392eff3dc --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnext152_32x4d.py @@ -0,0 +1,19 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNeXt', + depth=152, + num_stages=4, + out_indices=(3, ), + groups=32, + width_per_group=4, + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnext50_32x4d.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnext50_32x4d.py new file mode 100644 index 0000000000..060426231e --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/resnext50_32x4d.py @@ -0,0 +1,19 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNeXt', + depth=50, + num_stages=4, + out_indices=(3, ), + groups=32, + width_per_group=4, + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/seresnet101.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/seresnet101.py new file mode 100644 index 0000000000..137a6f90f6 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/seresnet101.py @@ -0,0 +1,17 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='SEResNet', + depth=101, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/seresnet50.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/seresnet50.py new file mode 100644 index 0000000000..e5f6bfce8d --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/seresnet50.py @@ -0,0 +1,17 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='SEResNet', + depth=50, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/seresnext101_32x4d.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/seresnext101_32x4d.py new file mode 100644 index 0000000000..cc8a62c393 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/seresnext101_32x4d.py @@ -0,0 +1,20 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='SEResNeXt', + depth=101, + num_stages=4, + out_indices=(3, ), + groups=32, + width_per_group=4, + se_ratio=16, + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/seresnext50_32x4d.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/seresnext50_32x4d.py new file mode 100644 index 0000000000..0cdf7cb696 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/seresnext50_32x4d.py @@ -0,0 +1,20 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='SEResNeXt', + depth=50, + num_stages=4, + out_indices=(3, ), + groups=32, + width_per_group=4, + se_ratio=16, + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/shufflenet_v1_1x.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/shufflenet_v1_1x.py new file mode 100644 index 0000000000..f0f9d1fbdd --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/shufflenet_v1_1x.py @@ -0,0 +1,12 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='ShuffleNetV1', groups=3), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=960, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/shufflenet_v2_1x.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/shufflenet_v2_1x.py new file mode 100644 index 0000000000..190800e343 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/shufflenet_v2_1x.py @@ -0,0 +1,12 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='ShuffleNetV2', widen_factor=1.0), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1024, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/swin_transformer/base_224.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/swin_transformer/base_224.py new file mode 100644 index 0000000000..e16b4e6099 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/swin_transformer/base_224.py @@ -0,0 +1,22 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='SwinTransformer', arch='base', img_size=224, drop_path_rate=0.5), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1024, + init_cfg=None, # suppress the default init_cfg of LinearClsHead. + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + cal_acc=False), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.) + ], + train_cfg=dict(augments=[ + dict(type='BatchMixup', alpha=0.8, num_classes=1000, prob=0.5), + dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5) + ])) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/swin_transformer/base_384.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/swin_transformer/base_384.py new file mode 100644 index 0000000000..ce78981fb0 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/swin_transformer/base_384.py @@ -0,0 +1,16 @@ +# model settings +# Only for evaluation +model = dict( + type='ImageClassifier', + backbone=dict( + type='SwinTransformer', + arch='base', + img_size=384, + stage_cfgs=dict(block_cfgs=dict(window_size=12))), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1024, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5))) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/swin_transformer/large_224.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/swin_transformer/large_224.py new file mode 100644 index 0000000000..747d00e44d --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/swin_transformer/large_224.py @@ -0,0 +1,12 @@ +# model settings +# Only for evaluation +model = dict( + type='ImageClassifier', + backbone=dict(type='SwinTransformer', arch='large', img_size=224), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1536, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5))) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/swin_transformer/large_384.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/swin_transformer/large_384.py new file mode 100644 index 0000000000..7026f81a31 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/swin_transformer/large_384.py @@ -0,0 +1,16 @@ +# model settings +# Only for evaluation +model = dict( + type='ImageClassifier', + backbone=dict( + type='SwinTransformer', + arch='large', + img_size=384, + stage_cfgs=dict(block_cfgs=dict(window_size=12))), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1536, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5))) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/swin_transformer/small_224.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/swin_transformer/small_224.py new file mode 100644 index 0000000000..78739866f9 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/swin_transformer/small_224.py @@ -0,0 +1,23 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='SwinTransformer', arch='small', img_size=224, + drop_path_rate=0.3), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=768, + init_cfg=None, # suppress the default init_cfg of LinearClsHead. + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + cal_acc=False), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.) + ], + train_cfg=dict(augments=[ + dict(type='BatchMixup', alpha=0.8, num_classes=1000, prob=0.5), + dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5) + ])) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/swin_transformer/tiny_224.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/swin_transformer/tiny_224.py new file mode 100644 index 0000000000..2d68d66b50 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/swin_transformer/tiny_224.py @@ -0,0 +1,22 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='SwinTransformer', arch='tiny', img_size=224, drop_path_rate=0.2), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=768, + init_cfg=None, # suppress the default init_cfg of LinearClsHead. + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + cal_acc=False), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.) + ], + train_cfg=dict(augments=[ + dict(type='BatchMixup', alpha=0.8, num_classes=1000, prob=0.5), + dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5) + ])) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/t2t-vit-t-14.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/t2t-vit-t-14.py new file mode 100644 index 0000000000..91dbb67621 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/t2t-vit-t-14.py @@ -0,0 +1,41 @@ +# model settings +embed_dims = 384 +num_classes = 1000 + +model = dict( + type='ImageClassifier', + backbone=dict( + type='T2T_ViT', + img_size=224, + in_channels=3, + embed_dims=embed_dims, + t2t_cfg=dict( + token_dims=64, + use_performer=False, + ), + num_layers=14, + layer_cfgs=dict( + num_heads=6, + feedforward_channels=3 * embed_dims, # mlp_ratio = 3 + ), + drop_path_rate=0.1, + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=.02), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.), + ]), + neck=None, + head=dict( + type='VisionTransformerClsHead', + num_classes=num_classes, + in_channels=embed_dims, + loss=dict( + type='LabelSmoothLoss', + label_smooth_val=0.1, + mode='original', + ), + topk=(1, 5), + init_cfg=dict(type='TruncNormal', layer='Linear', std=.02)), + train_cfg=dict(augments=[ + dict(type='BatchMixup', alpha=0.8, prob=0.5, num_classes=num_classes), + dict(type='BatchCutMix', alpha=1.0, prob=0.5, num_classes=num_classes), + ])) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/t2t-vit-t-19.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/t2t-vit-t-19.py new file mode 100644 index 0000000000..8ab139d679 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/t2t-vit-t-19.py @@ -0,0 +1,41 @@ +# model settings +embed_dims = 448 +num_classes = 1000 + +model = dict( + type='ImageClassifier', + backbone=dict( + type='T2T_ViT', + img_size=224, + in_channels=3, + embed_dims=embed_dims, + t2t_cfg=dict( + token_dims=64, + use_performer=False, + ), + num_layers=19, + layer_cfgs=dict( + num_heads=7, + feedforward_channels=3 * embed_dims, # mlp_ratio = 3 + ), + drop_path_rate=0.1, + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=.02), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.), + ]), + neck=None, + head=dict( + type='VisionTransformerClsHead', + num_classes=num_classes, + in_channels=embed_dims, + loss=dict( + type='LabelSmoothLoss', + label_smooth_val=0.1, + mode='original', + ), + topk=(1, 5), + init_cfg=dict(type='TruncNormal', layer='Linear', std=.02)), + train_cfg=dict(augments=[ + dict(type='BatchMixup', alpha=0.8, prob=0.5, num_classes=num_classes), + dict(type='BatchCutMix', alpha=1.0, prob=0.5, num_classes=num_classes), + ])) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/t2t-vit-t-24.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/t2t-vit-t-24.py new file mode 100644 index 0000000000..5990960ab4 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/t2t-vit-t-24.py @@ -0,0 +1,41 @@ +# model settings +embed_dims = 512 +num_classes = 1000 + +model = dict( + type='ImageClassifier', + backbone=dict( + type='T2T_ViT', + img_size=224, + in_channels=3, + embed_dims=embed_dims, + t2t_cfg=dict( + token_dims=64, + use_performer=False, + ), + num_layers=24, + layer_cfgs=dict( + num_heads=8, + feedforward_channels=3 * embed_dims, # mlp_ratio = 3 + ), + drop_path_rate=0.1, + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=.02), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.), + ]), + neck=None, + head=dict( + type='VisionTransformerClsHead', + num_classes=num_classes, + in_channels=embed_dims, + loss=dict( + type='LabelSmoothLoss', + label_smooth_val=0.1, + mode='original', + ), + topk=(1, 5), + init_cfg=dict(type='TruncNormal', layer='Linear', std=.02)), + train_cfg=dict(augments=[ + dict(type='BatchMixup', alpha=0.8, prob=0.5, num_classes=num_classes), + dict(type='BatchCutMix', alpha=1.0, prob=0.5, num_classes=num_classes), + ])) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/tnt_s_patch16_224.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/tnt_s_patch16_224.py new file mode 100644 index 0000000000..5e13d07828 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/tnt_s_patch16_224.py @@ -0,0 +1,29 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='TNT', + arch='s', + img_size=224, + patch_size=16, + in_channels=3, + ffn_ratio=4, + qkv_bias=False, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.1, + first_stride=4, + num_fcs=2, + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=.02), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.) + ]), + neck=None, + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=384, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + topk=(1, 5), + init_cfg=dict(type='TruncNormal', layer='Linear', std=.02))) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vgg11.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vgg11.py new file mode 100644 index 0000000000..2b6ee1426a --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vgg11.py @@ -0,0 +1,10 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='VGG', depth=11, num_classes=1000), + neck=None, + head=dict( + type='ClsHead', + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vgg11bn.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vgg11bn.py new file mode 100644 index 0000000000..cb4c64e95a --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vgg11bn.py @@ -0,0 +1,11 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='VGG', depth=11, norm_cfg=dict(type='BN'), num_classes=1000), + neck=None, + head=dict( + type='ClsHead', + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vgg13.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vgg13.py new file mode 100644 index 0000000000..a9389100a6 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vgg13.py @@ -0,0 +1,10 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='VGG', depth=13, num_classes=1000), + neck=None, + head=dict( + type='ClsHead', + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vgg13bn.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vgg13bn.py new file mode 100644 index 0000000000..b12173b51b --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vgg13bn.py @@ -0,0 +1,11 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='VGG', depth=13, norm_cfg=dict(type='BN'), num_classes=1000), + neck=None, + head=dict( + type='ClsHead', + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vgg16.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vgg16.py new file mode 100644 index 0000000000..93ce864fac --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vgg16.py @@ -0,0 +1,10 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='VGG', depth=16, num_classes=1000), + neck=None, + head=dict( + type='ClsHead', + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vgg16bn.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vgg16bn.py new file mode 100644 index 0000000000..765e34f636 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vgg16bn.py @@ -0,0 +1,11 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='VGG', depth=16, norm_cfg=dict(type='BN'), num_classes=1000), + neck=None, + head=dict( + type='ClsHead', + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vgg19.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vgg19.py new file mode 100644 index 0000000000..6f4ab061b2 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vgg19.py @@ -0,0 +1,10 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='VGG', depth=19, num_classes=1000), + neck=None, + head=dict( + type='ClsHead', + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vgg19bn.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vgg19bn.py new file mode 100644 index 0000000000..c468b5dea2 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vgg19bn.py @@ -0,0 +1,11 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='VGG', depth=19, norm_cfg=dict(type='BN'), num_classes=1000), + neck=None, + head=dict( + type='ClsHead', + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vit-base-p16.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vit-base-p16.py new file mode 100644 index 0000000000..bb42bed5fa --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vit-base-p16.py @@ -0,0 +1,25 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='VisionTransformer', + arch='b', + img_size=224, + patch_size=16, + drop_rate=0.1, + init_cfg=[ + dict( + type='Kaiming', + layer='Conv2d', + mode='fan_in', + nonlinearity='linear') + ]), + neck=None, + head=dict( + type='VisionTransformerClsHead', + num_classes=1000, + in_channels=768, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, + mode='classy_vision'), + )) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vit-base-p32.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vit-base-p32.py new file mode 100644 index 0000000000..ad550ef9b9 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vit-base-p32.py @@ -0,0 +1,24 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='VisionTransformer', + arch='b', + img_size=224, + patch_size=32, + drop_rate=0.1, + init_cfg=[ + dict( + type='Kaiming', + layer='Conv2d', + mode='fan_in', + nonlinearity='linear') + ]), + neck=None, + head=dict( + type='VisionTransformerClsHead', + num_classes=1000, + in_channels=768, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vit-large-p16.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vit-large-p16.py new file mode 100644 index 0000000000..9716230456 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vit-large-p16.py @@ -0,0 +1,24 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='VisionTransformer', + arch='l', + img_size=224, + patch_size=16, + drop_rate=0.1, + init_cfg=[ + dict( + type='Kaiming', + layer='Conv2d', + mode='fan_in', + nonlinearity='linear') + ]), + neck=None, + head=dict( + type='VisionTransformerClsHead', + num_classes=1000, + in_channels=1024, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vit-large-p32.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vit-large-p32.py new file mode 100644 index 0000000000..f9491bb561 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/models/vit-large-p32.py @@ -0,0 +1,24 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='VisionTransformer', + arch='l', + img_size=224, + patch_size=32, + drop_rate=0.1, + init_cfg=[ + dict( + type='Kaiming', + layer='Conv2d', + mode='fan_in', + nonlinearity='linear') + ]), + neck=None, + head=dict( + type='VisionTransformerClsHead', + num_classes=1000, + in_channels=1024, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/cifar10_bs128.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/cifar10_bs128.py new file mode 100644 index 0000000000..f134dbce3b --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/cifar10_bs128.py @@ -0,0 +1,6 @@ +# optimizer +optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=None) +# learning policy +lr_config = dict(policy='step', step=[100, 150]) +runner = dict(type='EpochBasedRunner', max_epochs=200) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs1024_adamw_swin.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs1024_adamw_swin.py new file mode 100644 index 0000000000..1a523e44dd --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs1024_adamw_swin.py @@ -0,0 +1,30 @@ +paramwise_cfg = dict( + norm_decay_mult=0.0, + bias_decay_mult=0.0, + custom_keys={ + '.absolute_pos_embed': dict(decay_mult=0.0), + '.relative_position_bias_table': dict(decay_mult=0.0) + }) + +# for batch in each gpu is 128, 8 gpu +# lr = 5e-4 * 128 * 8 / 512 = 0.001 +optimizer = dict( + type='AdamW', + lr=5e-4 * 128 * 8 / 512, + weight_decay=0.05, + eps=1e-8, + betas=(0.9, 0.999), + paramwise_cfg=paramwise_cfg) +optimizer_config = dict(grad_clip=dict(max_norm=5.0)) + +# learning policy +lr_config = dict( + policy='CosineAnnealing', + by_epoch=False, + min_lr_ratio=1e-2, + warmup='linear', + warmup_ratio=1e-3, + warmup_iters=20 * 1252, + warmup_by_epoch=False) + +runner = dict(type='EpochBasedRunner', max_epochs=300) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs1024_coslr.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs1024_coslr.py new file mode 100644 index 0000000000..ee84e7a6f6 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs1024_coslr.py @@ -0,0 +1,12 @@ +# optimizer +optimizer = dict(type='SGD', lr=0.8, momentum=0.9, weight_decay=5e-5) +optimizer_config = dict(grad_clip=None) +# learning policy +lr_config = dict( + policy='CosineAnnealing', + min_lr=0, + warmup='linear', + warmup_iters=5, + warmup_ratio=0.1, + warmup_by_epoch=True) +runner = dict(type='EpochBasedRunner', max_epochs=100) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs1024_linearlr_bn_nowd.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs1024_linearlr_bn_nowd.py new file mode 100644 index 0000000000..99fbdda9f5 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs1024_linearlr_bn_nowd.py @@ -0,0 +1,17 @@ +# optimizer +optimizer = dict( + type='SGD', + lr=0.5, + momentum=0.9, + weight_decay=0.00004, + paramwise_cfg=dict(norm_decay_mult=0)) +optimizer_config = dict(grad_clip=None) +# learning policy +lr_config = dict( + policy='poly', + min_lr=0, + by_epoch=False, + warmup='constant', + warmup_iters=5000, +) +runner = dict(type='EpochBasedRunner', max_epochs=300) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs2048.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs2048.py new file mode 100644 index 0000000000..93fdebfdd1 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs2048.py @@ -0,0 +1,12 @@ +# optimizer +optimizer = dict( + type='SGD', lr=0.8, momentum=0.9, weight_decay=0.0001, nesterov=True) +optimizer_config = dict(grad_clip=None) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=2500, + warmup_ratio=0.25, + step=[30, 60, 90]) +runner = dict(type='EpochBasedRunner', max_epochs=100) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs2048_AdamW.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs2048_AdamW.py new file mode 100644 index 0000000000..6d4f2081b9 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs2048_AdamW.py @@ -0,0 +1,20 @@ +# optimizer +# In ClassyVision, the lr is set to 0.003 for bs4096. +# In this implementation(bs2048), lr = 0.003 / 4096 * (32bs * 64gpus) = 0.0015 +optimizer = dict(type='AdamW', lr=0.0015, weight_decay=0.3) +optimizer_config = dict(grad_clip=dict(max_norm=1.0)) + +# specific to vit pretrain +paramwise_cfg = dict( + custom_keys={ + '.backbone.cls_token': dict(decay_mult=0.0), + '.backbone.pos_embed': dict(decay_mult=0.0) + }) +# learning policy +lr_config = dict( + policy='CosineAnnealing', + min_lr=0, + warmup='linear', + warmup_iters=10000, + warmup_ratio=1e-4) +runner = dict(type='EpochBasedRunner', max_epochs=300) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs2048_coslr.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs2048_coslr.py new file mode 100644 index 0000000000..b9e77f2c6a --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs2048_coslr.py @@ -0,0 +1,12 @@ +# optimizer +optimizer = dict( + type='SGD', lr=0.8, momentum=0.9, weight_decay=0.0001, nesterov=True) +optimizer_config = dict(grad_clip=None) +# learning policy +lr_config = dict( + policy='CosineAnnealing', + min_lr=0, + warmup='linear', + warmup_iters=2500, + warmup_ratio=0.25) +runner = dict(type='EpochBasedRunner', max_epochs=100) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs256.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs256.py new file mode 100644 index 0000000000..3b5d19847a --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs256.py @@ -0,0 +1,6 @@ +# optimizer +optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=None) +# learning policy +lr_config = dict(policy='step', step=[30, 60, 90]) +runner = dict(type='EpochBasedRunner', max_epochs=100) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs256_140e.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs256_140e.py new file mode 100644 index 0000000000..caba1577c7 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs256_140e.py @@ -0,0 +1,6 @@ +# optimizer +optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=None) +# learning policy +lr_config = dict(policy='step', step=[40, 80, 120]) +runner = dict(type='EpochBasedRunner', max_epochs=140) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs256_200e_coslr_warmup.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs256_200e_coslr_warmup.py new file mode 100644 index 0000000000..49456b2cd0 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs256_200e_coslr_warmup.py @@ -0,0 +1,11 @@ +# optimizer +optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=None) +# learning policy +lr_config = dict( + policy='CosineAnnealing', + min_lr=0, + warmup='linear', + warmup_iters=25025, + warmup_ratio=0.25) +runner = dict(type='EpochBasedRunner', max_epochs=200) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs256_coslr.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs256_coslr.py new file mode 100644 index 0000000000..779b4792ed --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs256_coslr.py @@ -0,0 +1,6 @@ +# optimizer +optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=None) +# learning policy +lr_config = dict(policy='CosineAnnealing', min_lr=0) +runner = dict(type='EpochBasedRunner', max_epochs=100) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs256_epochstep.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs256_epochstep.py new file mode 100644 index 0000000000..2347a04354 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs256_epochstep.py @@ -0,0 +1,6 @@ +# optimizer +optimizer = dict(type='SGD', lr=0.045, momentum=0.9, weight_decay=0.00004) +optimizer_config = dict(grad_clip=None) +# learning policy +lr_config = dict(policy='step', gamma=0.98, step=1) +runner = dict(type='EpochBasedRunner', max_epochs=300) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs4096_AdamW.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs4096_AdamW.py new file mode 100644 index 0000000000..859cf4b23a --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/_base_/schedules/imagenet_bs4096_AdamW.py @@ -0,0 +1,18 @@ +# optimizer +optimizer = dict(type='AdamW', lr=0.003, weight_decay=0.3) +optimizer_config = dict(grad_clip=dict(max_norm=1.0)) + +# specific to vit pretrain +paramwise_cfg = dict( + custom_keys={ + '.backbone.cls_token': dict(decay_mult=0.0), + '.backbone.pos_embed': dict(decay_mult=0.0) + }) +# learning policy +lr_config = dict( + policy='CosineAnnealing', + min_lr=0, + warmup='linear', + warmup_iters=10000, + warmup_ratio=1e-4) +runner = dict(type='EpochBasedRunner', max_epochs=300) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/fp16/README.md b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/fp16/README.md new file mode 100644 index 0000000000..2ef4ea13db --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/fp16/README.md @@ -0,0 +1,20 @@ +# Mixed Precision Training + +## Introduction + + + +```latex +@article{micikevicius2017mixed, + title={Mixed precision training}, + author={Micikevicius, Paulius and Narang, Sharan and Alben, Jonah and Diamos, Gregory and Elsen, Erich and Garcia, David and Ginsburg, Boris and Houston, Michael and Kuchaiev, Oleksii and Venkatesh, Ganesh and others}, + journal={arXiv preprint arXiv:1710.03740}, + year={2017} +} +``` + +## Results and models + +| Model | Params(M) | Flops(G) | Mem (GB) | Top-1 (%) | Top-5 (%) | Config | Download | +|:---------------------:|:---------:|:--------:|:---------:|:---------:|:---------:| :---------:|:--------:| +| ResNet-50 | 25.56 | 4.12 | 1.9 |76.30 | 93.07 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/fp16/resnet50_b32x8_fp16_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/fp16/resnet50_batch256_fp16_imagenet_20210320-b3964210.pth) | [log](https://download.openmmlab.com/mmclassification/v0/fp16/resnet50_batch256_fp16_imagenet_20210320-b3964210.log.json) | diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/fp16/metafile.yml b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/fp16/metafile.yml new file mode 100644 index 0000000000..20b42840d5 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/fp16/metafile.yml @@ -0,0 +1,35 @@ +Collections: + - Name: FP16 + Metadata: + Training Data: ImageNet-1k + Training Techniques: + - SGD with Momentum + - Weight Decay + - Mixed Precision Training + Training Resources: 8x V100 GPUs + Paper: + URL: https://arxiv.org/abs/1710.03740 + Title: Mixed Precision Training + README: configs/fp16/README.md + Code: + URL: https://github.com/open-mmlab/mmclassification/blob/a41cb2fa938d957101cc446e271486206188bf5b/mmcls/core/fp16/hooks.py#L13 + Version: v0.15.0 + +Models: + - Name: resnet50_b32x8_fp16_dynamic_imagenet + Metadata: + FLOPs: 4120000000 + Parameters: 25560000 + Epochs: 100 + Batch Size: 256 + Architecture: + - ResNet + In Collection: FP16 + Results: + - Task: Image Classification + Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 76.30 + Top 5 Accuracy: 93.07 + Weights: https://download.openmmlab.com/mmclassification/v0/fp16/resnet50_batch256_fp16_imagenet_20210320-b3964210.pth + Config: configs/fp16/resnet50_b32x8_fp16_dynamic_imagenet.py diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/fp16/resnet50_b32x8_fp16_dynamic_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/fp16/resnet50_b32x8_fp16_dynamic_imagenet.py new file mode 100644 index 0000000000..35b4ff5423 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/fp16/resnet50_b32x8_fp16_dynamic_imagenet.py @@ -0,0 +1,4 @@ +_base_ = ['../resnet/resnet50_b32x8_imagenet.py'] + +# fp16 settings +fp16 = dict(loss_scale='dynamic') diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/fp16/resnet50_b32x8_fp16_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/fp16/resnet50_b32x8_fp16_imagenet.py new file mode 100644 index 0000000000..fbab0cc1ec --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/fp16/resnet50_b32x8_fp16_imagenet.py @@ -0,0 +1,4 @@ +_base_ = ['../resnet/resnet50_b32x8_imagenet.py'] + +# fp16 settings +fp16 = dict(loss_scale=512.) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/lenet/README.md b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/lenet/README.md new file mode 100644 index 0000000000..49647ce4a1 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/lenet/README.md @@ -0,0 +1,19 @@ +# Backpropagation Applied to Handwritten Zip Code Recognition + + +## Introduction + + + +```latex +@ARTICLE{6795724, + author={Y. {LeCun} and B. {Boser} and J. S. {Denker} and D. {Henderson} and R. E. {Howard} and W. {Hubbard} and L. D. {Jackel}}, + journal={Neural Computation}, + title={Backpropagation Applied to Handwritten Zip Code Recognition}, + year={1989}, + volume={1}, + number={4}, + pages={541-551}, + doi={10.1162/neco.1989.1.4.541}} +} +``` diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/lenet/lenet5_mnist.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/lenet/lenet5_mnist.py new file mode 100644 index 0000000000..7286b798ff --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/lenet/lenet5_mnist.py @@ -0,0 +1,59 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='LeNet5', num_classes=10), + neck=None, + head=dict( + type='ClsHead', + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + )) +# dataset settings +dataset_type = 'MNIST' +img_norm_cfg = dict(mean=[33.46], std=[78.87], to_rgb=True) +train_pipeline = [ + dict(type='Resize', size=32), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']), +] +test_pipeline = [ + dict(type='Resize', size=32), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), +] +data = dict( + samples_per_gpu=128, + workers_per_gpu=2, + train=dict( + type=dataset_type, data_prefix='data/mnist', pipeline=train_pipeline), + val=dict( + type=dataset_type, data_prefix='data/mnist', pipeline=test_pipeline), + test=dict( + type=dataset_type, data_prefix='data/mnist', pipeline=test_pipeline)) +evaluation = dict( + interval=5, metric='accuracy', metric_options={'topk': (1, )}) +# optimizer +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=None) +# learning policy +lr_config = dict(policy='step', step=[15]) +# checkpoint saving +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=150, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +# runtime settings +runner = dict(type='EpochBasedRunner', max_epochs=5) +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/mnist/' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/mobilenet_v2/README.md b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/mobilenet_v2/README.md new file mode 100644 index 0000000000..75008d3cad --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/mobilenet_v2/README.md @@ -0,0 +1,27 @@ +# MobileNetV2: Inverted Residuals and Linear Bottlenecks + + +## Introduction + + + +```latex +@INPROCEEDINGS{8578572, + author={M. {Sandler} and A. {Howard} and M. {Zhu} and A. {Zhmoginov} and L. {Chen}}, + booktitle={2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition}, + title={MobileNetV2: Inverted Residuals and Linear Bottlenecks}, + year={2018}, + volume={}, + number={}, + pages={4510-4520}, + doi={10.1109/CVPR.2018.00474}} +} +``` + +## Results and models + +### ImageNet + +| Model | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download | +|:---------------------:|:---------:|:--------:|:---------:|:---------:|:---------:|:--------:| +| MobileNet V2 | 3.5 | 0.319 | 71.86 | 90.42 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/mobilenet_v2/mobilenet_v2_b32x8_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth) | [log](https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.log.json) | diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/mobilenet_v2/metafile.yml b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/mobilenet_v2/metafile.yml new file mode 100644 index 0000000000..3765f0ca85 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/mobilenet_v2/metafile.yml @@ -0,0 +1,34 @@ +Collections: + - Name: MobileNet V2 + Metadata: + Training Data: ImageNet-1k + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Epochs: 300 + Batch Size: 256 + Architecture: + - MobileNet V2 + Paper: + URL: https://arxiv.org/abs/1801.04381 + Title: "MobileNetV2: Inverted Residuals and Linear Bottlenecks" + README: configs/mobilenet_v2/README.md + Code: + URL: https://github.com/open-mmlab/mmclassification/blob/v0.15.0/mmcls/models/backbones/mobilenet_v2.py#L101 + Version: v0.15.0 + +Models: + - Name: mobilenet_v2_b32x8_imagenet + Metadata: + FLOPs: 319000000 + Parameters: 3500000 + In Collection: MobileNet V2 + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 71.86 + Top 5 Accuracy: 90.42 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth + Config: configs/mobilenet_v2/mobilenet_v2_b32x8_imagenet.py diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/mobilenet_v2/mobilenet_v2_b32x8_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/mobilenet_v2/mobilenet_v2_b32x8_imagenet.py new file mode 100644 index 0000000000..afd2d9795a --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/mobilenet_v2/mobilenet_v2_b32x8_imagenet.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/mobilenet_v2_1x.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256_epochstep.py', + '../_base_/default_runtime.py' +] diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/mobilenet_v3/README.md b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/mobilenet_v3/README.md new file mode 100644 index 0000000000..2bb95508da --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/mobilenet_v3/README.md @@ -0,0 +1,31 @@ +# Searching for MobileNetV3 + + +## Introduction + + + +```latex +@inproceedings{Howard_2019_ICCV, + author = {Howard, Andrew and Sandler, Mark and Chu, Grace and Chen, Liang-Chieh and Chen, Bo and Tan, Mingxing and Wang, Weijun and Zhu, Yukun and Pang, Ruoming and Vasudevan, Vijay and Le, Quoc V. and Adam, Hartwig}, + title = {Searching for MobileNetV3}, + booktitle = {Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV)}, + month = {October}, + year = {2019} +} +``` + +## Pretrain model + +The pre-trained modles are converted from [torchvision](https://pytorch.org/vision/stable/_modules/torchvision/models/mobilenetv3.html). + +### ImageNet + +| Model | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Download | +|:---------------------:|:---------:|:--------:|:---------:|:---------:|:--------:| +| MobileNetV3-Large | 5.48 | 0.23 | 74.04 | 91.34 | [model](https://download.openmmlab.com/mmclassification/v0/mobilenet_v3/convert/mobilenet_v3_large-3ea3c186.pth)| +| MobileNetV3-Small | 2.54 | 0.06 | 67.66 | 87.41 | [model](https://download.openmmlab.com/mmclassification/v0/mobilenet_v3/convert/mobilenet_v3_small-8427ecf0.pth)| + +## Results and models + +Waiting for adding. diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/mobilenet_v3/metafile.yml b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/mobilenet_v3/metafile.yml new file mode 100644 index 0000000000..c978fd8f42 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/mobilenet_v3/metafile.yml @@ -0,0 +1,42 @@ +Collections: + - Name: MobileNet V3 + Metadata: + Training Data: ImageNet-1k + Training Techniques: + - RMSprop with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Epochs: 600 + Batch Size: 1024 + Architecture: + - MobileNet V3 + Paper: https://arxiv.org/abs/1905.02244 + README: configs/mobilenet_v3/README.md + +Models: + - Name: mobilenet_v3_small_imagenet + Metadata: + FLOPs: 60000000 + Parameters: 2540000 + In Collection: MobileNet V3 + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 67.66 + Top 5 Accuracy: 87.41 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/mobilenet_v3/convert/mobilenet_v3_small-8427ecf0.pth + Config: configs/mobilenet_v3/mobilenet_v3_small_imagenet.py + - Name: mobilenet_v3_large_imagenet + Metadata: + FLOPs: 230000000 + Parameters: 5480000 + In Collection: MobileNet V3 + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 74.04 + Top 5 Accuracy: 91.34 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/mobilenet_v3/convert/mobilenet_v3_large-3ea3c186.pth + Config: configs/mobilenet_v3/mobilenet_v3_large_imagenet.py diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/mobilenet_v3/mobilenet_v3_large_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/mobilenet_v3/mobilenet_v3_large_imagenet.py new file mode 100644 index 0000000000..985ef520d5 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/mobilenet_v3/mobilenet_v3_large_imagenet.py @@ -0,0 +1,158 @@ +# Refer to https://pytorch.org/blog/ml-models-torchvision-v0.9/#classification +# ---------------------------- +# -[x] auto_augment='imagenet' +# -[x] batch_size=128 (per gpu) +# -[x] epochs=600 +# -[x] opt='rmsprop' +# -[x] lr=0.064 +# -[x] eps=0.0316 +# -[x] alpha=0.9 +# -[x] weight_decay=1e-05 +# -[x] momentum=0.9 +# -[x] lr_gamma=0.973 +# -[x] lr_step_size=2 +# -[x] nproc_per_node=8 +# -[x] random_erase=0.2 +# -[x] workers=16 (workers_per_gpu) +# - modify: RandomErasing use RE-M instead of RE-0 + +_base_ = [ + '../_base_/models/mobilenet_v3_large_imagenet.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/default_runtime.py' +] + +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) + +policies = [ + [ + dict(type='Posterize', bits=4, prob=0.4), + dict(type='Rotate', angle=30., prob=0.6) + ], + [ + dict(type='Solarize', thr=256 / 9 * 4, prob=0.6), + dict(type='AutoContrast', prob=0.6) + ], + [dict(type='Equalize', prob=0.8), + dict(type='Equalize', prob=0.6)], + [ + dict(type='Posterize', bits=5, prob=0.6), + dict(type='Posterize', bits=5, prob=0.6) + ], + [ + dict(type='Equalize', prob=0.4), + dict(type='Solarize', thr=256 / 9 * 5, prob=0.2) + ], + [ + dict(type='Equalize', prob=0.4), + dict(type='Rotate', angle=30 / 9 * 8, prob=0.8) + ], + [ + dict(type='Solarize', thr=256 / 9 * 6, prob=0.6), + dict(type='Equalize', prob=0.6) + ], + [dict(type='Posterize', bits=6, prob=0.8), + dict(type='Equalize', prob=1.)], + [ + dict(type='Rotate', angle=10., prob=0.2), + dict(type='Solarize', thr=256 / 9, prob=0.6) + ], + [ + dict(type='Equalize', prob=0.6), + dict(type='Posterize', bits=5, prob=0.4) + ], + [ + dict(type='Rotate', angle=30 / 9 * 8, prob=0.8), + dict(type='ColorTransform', magnitude=0., prob=0.4) + ], + [ + dict(type='Rotate', angle=30., prob=0.4), + dict(type='Equalize', prob=0.6) + ], + [dict(type='Equalize', prob=0.0), + dict(type='Equalize', prob=0.8)], + [dict(type='Invert', prob=0.6), + dict(type='Equalize', prob=1.)], + [ + dict(type='ColorTransform', magnitude=0.4, prob=0.6), + dict(type='Contrast', magnitude=0.8, prob=1.) + ], + [ + dict(type='Rotate', angle=30 / 9 * 8, prob=0.8), + dict(type='ColorTransform', magnitude=0.2, prob=1.) + ], + [ + dict(type='ColorTransform', magnitude=0.8, prob=0.8), + dict(type='Solarize', thr=256 / 9 * 2, prob=0.8) + ], + [ + dict(type='Sharpness', magnitude=0.7, prob=0.4), + dict(type='Invert', prob=0.6) + ], + [ + dict( + type='Shear', + magnitude=0.3 / 9 * 5, + prob=0.6, + direction='horizontal'), + dict(type='Equalize', prob=1.) + ], + [ + dict(type='ColorTransform', magnitude=0., prob=0.4), + dict(type='Equalize', prob=0.6) + ], + [ + dict(type='Equalize', prob=0.4), + dict(type='Solarize', thr=256 / 9 * 5, prob=0.2) + ], + [ + dict(type='Solarize', thr=256 / 9 * 4, prob=0.6), + dict(type='AutoContrast', prob=0.6) + ], + [dict(type='Invert', prob=0.6), + dict(type='Equalize', prob=1.)], + [ + dict(type='ColorTransform', magnitude=0.4, prob=0.6), + dict(type='Contrast', magnitude=0.8, prob=1.) + ], + [dict(type='Equalize', prob=0.8), + dict(type='Equalize', prob=0.6)], +] + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', size=224, backend='pillow'), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='AutoAugment', policies=policies), + dict( + type='RandomErasing', + erase_prob=0.2, + mode='const', + min_area_ratio=0.02, + max_area_ratio=1 / 3, + fill_color=img_norm_cfg['mean']), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] + +data = dict( + samples_per_gpu=128, + workers_per_gpu=4, + train=dict(pipeline=train_pipeline)) +evaluation = dict(interval=10, metric='accuracy') + +# optimizer +optimizer = dict( + type='RMSprop', + lr=0.064, + alpha=0.9, + momentum=0.9, + eps=0.0316, + weight_decay=1e-5) +optimizer_config = dict(grad_clip=None) +# learning policy +lr_config = dict(policy='step', step=2, gamma=0.973, by_epoch=True) +runner = dict(type='EpochBasedRunner', max_epochs=600) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/mobilenet_v3/mobilenet_v3_small_cifar.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/mobilenet_v3/mobilenet_v3_small_cifar.py new file mode 100644 index 0000000000..2b5c2b1f07 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/mobilenet_v3/mobilenet_v3_small_cifar.py @@ -0,0 +1,8 @@ +_base_ = [ + '../_base_/models/mobilenet_v3_small_cifar.py', + '../_base_/datasets/cifar10_bs16.py', + '../_base_/schedules/cifar10_bs128.py', '../_base_/default_runtime.py' +] + +lr_config = dict(policy='step', step=[120, 170]) +runner = dict(type='EpochBasedRunner', max_epochs=200) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/mobilenet_v3/mobilenet_v3_small_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/mobilenet_v3/mobilenet_v3_small_imagenet.py new file mode 100644 index 0000000000..2612166fd2 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/mobilenet_v3/mobilenet_v3_small_imagenet.py @@ -0,0 +1,158 @@ +# Refer to https://pytorch.org/blog/ml-models-torchvision-v0.9/#classification +# ---------------------------- +# -[x] auto_augment='imagenet' +# -[x] batch_size=128 (per gpu) +# -[x] epochs=600 +# -[x] opt='rmsprop' +# -[x] lr=0.064 +# -[x] eps=0.0316 +# -[x] alpha=0.9 +# -[x] weight_decay=1e-05 +# -[x] momentum=0.9 +# -[x] lr_gamma=0.973 +# -[x] lr_step_size=2 +# -[x] nproc_per_node=8 +# -[x] random_erase=0.2 +# -[x] workers=16 (workers_per_gpu) +# - modify: RandomErasing use RE-M instead of RE-0 + +_base_ = [ + '../_base_/models/mobilenet_v3_small_imagenet.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/default_runtime.py' +] + +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) + +policies = [ + [ + dict(type='Posterize', bits=4, prob=0.4), + dict(type='Rotate', angle=30., prob=0.6) + ], + [ + dict(type='Solarize', thr=256 / 9 * 4, prob=0.6), + dict(type='AutoContrast', prob=0.6) + ], + [dict(type='Equalize', prob=0.8), + dict(type='Equalize', prob=0.6)], + [ + dict(type='Posterize', bits=5, prob=0.6), + dict(type='Posterize', bits=5, prob=0.6) + ], + [ + dict(type='Equalize', prob=0.4), + dict(type='Solarize', thr=256 / 9 * 5, prob=0.2) + ], + [ + dict(type='Equalize', prob=0.4), + dict(type='Rotate', angle=30 / 9 * 8, prob=0.8) + ], + [ + dict(type='Solarize', thr=256 / 9 * 6, prob=0.6), + dict(type='Equalize', prob=0.6) + ], + [dict(type='Posterize', bits=6, prob=0.8), + dict(type='Equalize', prob=1.)], + [ + dict(type='Rotate', angle=10., prob=0.2), + dict(type='Solarize', thr=256 / 9, prob=0.6) + ], + [ + dict(type='Equalize', prob=0.6), + dict(type='Posterize', bits=5, prob=0.4) + ], + [ + dict(type='Rotate', angle=30 / 9 * 8, prob=0.8), + dict(type='ColorTransform', magnitude=0., prob=0.4) + ], + [ + dict(type='Rotate', angle=30., prob=0.4), + dict(type='Equalize', prob=0.6) + ], + [dict(type='Equalize', prob=0.0), + dict(type='Equalize', prob=0.8)], + [dict(type='Invert', prob=0.6), + dict(type='Equalize', prob=1.)], + [ + dict(type='ColorTransform', magnitude=0.4, prob=0.6), + dict(type='Contrast', magnitude=0.8, prob=1.) + ], + [ + dict(type='Rotate', angle=30 / 9 * 8, prob=0.8), + dict(type='ColorTransform', magnitude=0.2, prob=1.) + ], + [ + dict(type='ColorTransform', magnitude=0.8, prob=0.8), + dict(type='Solarize', thr=256 / 9 * 2, prob=0.8) + ], + [ + dict(type='Sharpness', magnitude=0.7, prob=0.4), + dict(type='Invert', prob=0.6) + ], + [ + dict( + type='Shear', + magnitude=0.3 / 9 * 5, + prob=0.6, + direction='horizontal'), + dict(type='Equalize', prob=1.) + ], + [ + dict(type='ColorTransform', magnitude=0., prob=0.4), + dict(type='Equalize', prob=0.6) + ], + [ + dict(type='Equalize', prob=0.4), + dict(type='Solarize', thr=256 / 9 * 5, prob=0.2) + ], + [ + dict(type='Solarize', thr=256 / 9 * 4, prob=0.6), + dict(type='AutoContrast', prob=0.6) + ], + [dict(type='Invert', prob=0.6), + dict(type='Equalize', prob=1.)], + [ + dict(type='ColorTransform', magnitude=0.4, prob=0.6), + dict(type='Contrast', magnitude=0.8, prob=1.) + ], + [dict(type='Equalize', prob=0.8), + dict(type='Equalize', prob=0.6)], +] + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', size=224, backend='pillow'), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='AutoAugment', policies=policies), + dict( + type='RandomErasing', + erase_prob=0.2, + mode='const', + min_area_ratio=0.02, + max_area_ratio=1 / 3, + fill_color=img_norm_cfg['mean']), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] + +data = dict( + samples_per_gpu=128, + workers_per_gpu=4, + train=dict(pipeline=train_pipeline)) +evaluation = dict(interval=10, metric='accuracy') + +# optimizer +optimizer = dict( + type='RMSprop', + lr=0.064, + alpha=0.9, + momentum=0.9, + eps=0.0316, + weight_decay=1e-5) +optimizer_config = dict(grad_clip=None) +# learning policy +lr_config = dict(policy='step', step=2, gamma=0.973, by_epoch=True) +runner = dict(type='EpochBasedRunner', max_epochs=600) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/regnet/README.md b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/regnet/README.md new file mode 100644 index 0000000000..10c42d4289 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/regnet/README.md @@ -0,0 +1,38 @@ +# Designing Network Design Spaces + + +## Introduction + + + +```latex +@article{radosavovic2020designing, + title={Designing Network Design Spaces}, + author={Ilija Radosavovic and Raj Prateek Kosaraju and Ross Girshick and Kaiming He and Piotr Dollár}, + year={2020}, + eprint={2003.13678}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + +## Pretrain model + +The pre-trained modles are converted from [model zoo of pycls](https://github.com/facebookresearch/pycls/blob/master/MODEL_ZOO.md). + +### ImageNet + +| Model | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Download | +|:---------------------:|:---------:|:--------:|:---------:|:---------:|:--------:| +| RegNetX-400MF | 5.16 | 0.41 | 72.55 | 90.91 | [model](https://download.openmmlab.com/mmclassification/v0/regnet/convert/RegNetX-400MF-0db9f35c.pth)| +| RegNetX-800MF | 7.26 | 0.81 | 75.21 | 92.37 | [model](https://download.openmmlab.com/mmclassification/v0/regnet/convert/RegNetX-800MF-4f9d1e8a.pth)| +| RegNetX-1.6GF | 9.19 | 1.63 | 77.04 | 93.51 | [model](https://download.openmmlab.com/mmclassification/v0/regnet/convert/RegNetX-1.6GF-cfb32375.pth)| +| RegNetX-3.2GF | 15.3 | 3.21 | 78.26 | 94.20 | [model](https://download.openmmlab.com/mmclassification/v0/regnet/convert/RegNetX-3.2GF-82c43fd5.pth)| +| RegNetX-4.0GF | 22.12 | 4.0 | 78.72 | 94.22 | [model](https://download.openmmlab.com/mmclassification/v0/regnet/convert/RegNetX-4.0GF-ef8bb32c.pth)| +| RegNetX-6.4GF | 26.21 | 6.51 | 79.22 | 94.61 | [model](https://download.openmmlab.com/mmclassification/v0/regnet/convert/RegNetX-6.4GF-6888c0ea.pth)| +| RegNetX-8.0GF | 39.57 | 8.03 | 79.31 | 94.57 | [model](https://download.openmmlab.com/mmclassification/v0/regnet/convert/RegNetX-8.0GF-cb4c77ec.pth)| +| RegNetX-12GF | 46.11 | 12.15 | 79.91 | 94.78 | [model](https://download.openmmlab.com/mmclassification/v0/regnet/convert/RegNetX-12GF-0574538f.pth)| + +## Results and models + +Waiting for adding. diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/regnet/regnetx_1.6gf_b32x8_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/regnet/regnetx_1.6gf_b32x8_imagenet.py new file mode 100644 index 0000000000..cfa956ff78 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/regnet/regnetx_1.6gf_b32x8_imagenet.py @@ -0,0 +1,51 @@ +_base_ = [ + '../_base_/models/regnet/regnetx_1.6gf.py', + '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] + +# dataset settings +dataset_type = 'ImageNet' + +img_norm_cfg = dict( + # The mean and std are used in PyCls when training RegNets + mean=[103.53, 116.28, 123.675], + std=[57.375, 57.12, 58.395], + to_rgb=False) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', size=224), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', size=(256, -1)), + dict(type='CenterCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + samples_per_gpu=32, + workers_per_gpu=2, + train=dict( + type=dataset_type, + data_prefix='data/imagenet/train', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline), + test=dict( + # replace `data/val` with `data/test` for standard test + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline)) +evaluation = dict(interval=1, metric='accuracy') diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/regnet/regnetx_12gf_b32x8_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/regnet/regnetx_12gf_b32x8_imagenet.py new file mode 100644 index 0000000000..17796a4b78 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/regnet/regnetx_12gf_b32x8_imagenet.py @@ -0,0 +1,51 @@ +_base_ = [ + '../_base_/models/regnet/regnetx_12gf.py', + '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] + +# dataset settings +dataset_type = 'ImageNet' + +img_norm_cfg = dict( + # The mean and std are used in PyCls when training RegNets + mean=[103.53, 116.28, 123.675], + std=[57.375, 57.12, 58.395], + to_rgb=False) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', size=224), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', size=(256, -1)), + dict(type='CenterCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + samples_per_gpu=32, + workers_per_gpu=2, + train=dict( + type=dataset_type, + data_prefix='data/imagenet/train', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline), + test=dict( + # replace `data/val` with `data/test` for standard test + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline)) +evaluation = dict(interval=1, metric='accuracy') diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/regnet/regnetx_3.2gf_b32x8_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/regnet/regnetx_3.2gf_b32x8_imagenet.py new file mode 100644 index 0000000000..b772c78604 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/regnet/regnetx_3.2gf_b32x8_imagenet.py @@ -0,0 +1,51 @@ +_base_ = [ + '../_base_/models/regnet/regnetx_3.2gf.py', + '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] + +# dataset settings +dataset_type = 'ImageNet' + +img_norm_cfg = dict( + # The mean and std are used in PyCls when training RegNets + mean=[103.53, 116.28, 123.675], + std=[57.375, 57.12, 58.395], + to_rgb=False) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', size=224), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', size=(256, -1)), + dict(type='CenterCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + samples_per_gpu=32, + workers_per_gpu=2, + train=dict( + type=dataset_type, + data_prefix='data/imagenet/train', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline), + test=dict( + # replace `data/val` with `data/test` for standard test + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline)) +evaluation = dict(interval=1, metric='accuracy') diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/regnet/regnetx_4.0gf_b32x8_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/regnet/regnetx_4.0gf_b32x8_imagenet.py new file mode 100644 index 0000000000..98e6c53b88 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/regnet/regnetx_4.0gf_b32x8_imagenet.py @@ -0,0 +1,51 @@ +_base_ = [ + '../_base_/models/regnet/regnetx_4.0gf.py', + '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] + +# dataset settings +dataset_type = 'ImageNet' + +img_norm_cfg = dict( + # The mean and std are used in PyCls when training RegNets + mean=[103.53, 116.28, 123.675], + std=[57.375, 57.12, 58.395], + to_rgb=False) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', size=224), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', size=(256, -1)), + dict(type='CenterCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + samples_per_gpu=32, + workers_per_gpu=2, + train=dict( + type=dataset_type, + data_prefix='data/imagenet/train', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline), + test=dict( + # replace `data/val` with `data/test` for standard test + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline)) +evaluation = dict(interval=1, metric='accuracy') diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/regnet/regnetx_400mf_b32x8_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/regnet/regnetx_400mf_b32x8_imagenet.py new file mode 100644 index 0000000000..88ccec943d --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/regnet/regnetx_400mf_b32x8_imagenet.py @@ -0,0 +1,51 @@ +_base_ = [ + '../_base_/models/regnet/regnetx_400mf.py', + '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] + +# dataset settings +dataset_type = 'ImageNet' + +img_norm_cfg = dict( + # The mean and std are used in PyCls when training RegNets + mean=[103.53, 116.28, 123.675], + std=[57.375, 57.12, 58.395], + to_rgb=False) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', size=224), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', size=(256, -1)), + dict(type='CenterCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + samples_per_gpu=32, + workers_per_gpu=2, + train=dict( + type=dataset_type, + data_prefix='data/imagenet/train', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline), + test=dict( + # replace `data/val` with `data/test` for standard test + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline)) +evaluation = dict(interval=1, metric='accuracy') diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/regnet/regnetx_6.4gf_b32x8_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/regnet/regnetx_6.4gf_b32x8_imagenet.py new file mode 100644 index 0000000000..4e5e36a07d --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/regnet/regnetx_6.4gf_b32x8_imagenet.py @@ -0,0 +1,51 @@ +_base_ = [ + '../_base_/models/regnet/regnetx_6.4gf.py', + '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] + +# dataset settings +dataset_type = 'ImageNet' + +img_norm_cfg = dict( + # The mean and std are used in PyCls when training RegNets + mean=[103.53, 116.28, 123.675], + std=[57.375, 57.12, 58.395], + to_rgb=False) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', size=224), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', size=(256, -1)), + dict(type='CenterCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + samples_per_gpu=32, + workers_per_gpu=2, + train=dict( + type=dataset_type, + data_prefix='data/imagenet/train', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline), + test=dict( + # replace `data/val` with `data/test` for standard test + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline)) +evaluation = dict(interval=1, metric='accuracy') diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/regnet/regnetx_8.0gf_b32x8_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/regnet/regnetx_8.0gf_b32x8_imagenet.py new file mode 100644 index 0000000000..37d7c8fbfb --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/regnet/regnetx_8.0gf_b32x8_imagenet.py @@ -0,0 +1,51 @@ +_base_ = [ + '../_base_/models/regnet/regnetx_8.0gf.py', + '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] + +# dataset settings +dataset_type = 'ImageNet' + +img_norm_cfg = dict( + # The mean and std are used in PyCls when training RegNets + mean=[103.53, 116.28, 123.675], + std=[57.375, 57.12, 58.395], + to_rgb=False) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', size=224), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', size=(256, -1)), + dict(type='CenterCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + samples_per_gpu=32, + workers_per_gpu=2, + train=dict( + type=dataset_type, + data_prefix='data/imagenet/train', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline), + test=dict( + # replace `data/val` with `data/test` for standard test + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline)) +evaluation = dict(interval=1, metric='accuracy') diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/regnet/regnetx_800mf_b32x8_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/regnet/regnetx_800mf_b32x8_imagenet.py new file mode 100644 index 0000000000..3db65b36ef --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/regnet/regnetx_800mf_b32x8_imagenet.py @@ -0,0 +1,51 @@ +_base_ = [ + '../_base_/models/regnet/regnetx_800mf.py', + '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] + +# dataset settings +dataset_type = 'ImageNet' + +img_norm_cfg = dict( + # The mean and std are used in PyCls when training RegNets + mean=[103.53, 116.28, 123.675], + std=[57.375, 57.12, 58.395], + to_rgb=False) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', size=224), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', size=(256, -1)), + dict(type='CenterCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + samples_per_gpu=32, + workers_per_gpu=2, + train=dict( + type=dataset_type, + data_prefix='data/imagenet/train', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline), + test=dict( + # replace `data/val` with `data/test` for standard test + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline)) +evaluation = dict(interval=1, metric='accuracy') diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/README.md b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/README.md new file mode 100644 index 0000000000..20e8c35b3f --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/README.md @@ -0,0 +1,51 @@ +# Repvgg: Making vgg-style convnets great again + + +## Introduction + + + +```latex +@inproceedings{ding2021repvgg, + title={Repvgg: Making vgg-style convnets great again}, + author={Ding, Xiaohan and Zhang, Xiangyu and Ma, Ningning and Han, Jungong and Ding, Guiguang and Sun, Jian}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, + pages={13733--13742}, + year={2021} +} +``` + +## Pretrain model + +| Model | Epochs | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :---------: | :----: | :-------------------------------: | :-----------------------------: | :-------: | :-------: | :----------------------------------------------------------: | :----------------------------------------------------------: | +| RepVGG-A0\* | 120 | 9.11(train) \| 8.31 (deploy) | 1.52 (train) \| 1.36 (deploy) | 72.41 | 90.50 | [config (train)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/repvgg-A0_4xb64-coslr-120e_in1k.py) \| [config (deploy)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/deploy/repvgg-A0_deploy_4xb64-coslr-120e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-A0_3rdparty_4xb64-coslr-120e_in1k_20210909-883ab98c.pth) | +| RepVGG-A1\* | 120 | 14.09 (train) \| 12.79 (deploy) | 2.64 (train) \| 2.37 (deploy) | 74.47 | 91.85 | [config (train)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/repvgg-A1_4xb64-coslr-120e_in1k.py) \| [config (deploy)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/deploy/repvgg-A1_deploy_4xb64-coslr-120e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-A1_3rdparty_4xb64-coslr-120e_in1k_20210909-24003a24.pth) | +| RepVGG-A2\* | 120 | 28.21 (train) \| 25.5 (deploy) | 5.7 (train) \| 5.12 (deploy) | 76.48 | 93.01 | [config (train)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/repvgg-A2_4xb64-coslr-120e_in1k.py) \|[config (deploy)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/deploy/repvgg-A2_deploy_4xb64-coslr-120e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-A2_3rdparty_4xb64-coslr-120e_in1k_20210909-97d7695a.pth) | +| RepVGG-B0\* | 120 | 15.82 (train) \| 14.34 (deploy) | 3.42 (train) \| 3.06 (deploy) | 75.14 | 92.42 | [config (train)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/repvgg-B0_4xb64-coslr-120e_in1k.py) \|[config (deploy)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/deploy/repvgg-B0_deploy_4xb64-coslr-120e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B0_3rdparty_4xb64-coslr-120e_in1k_20210909-446375f4.pth) | +| RepVGG-B1\* | 120 | 57.42 (train) \| 51.83 (deploy) | 13.16 (train) \| 11.82 (deploy) | 78.37 | 94.11 | [config (train)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/repvgg-B1_4xb64-coslr-120e_in1k.py) \|[config (deploy)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/deploy/repvgg-B1_deploy_4xb64-coslr-120e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B1_3rdparty_4xb64-coslr-120e_in1k_20210909-750cdf67.pth) | +| RepVGG-B1g2\* | 120 | 45.78 (train) \| 41.36 (deploy) | 9.82 (train) \| 8.82 (deploy) | 77.79 | 93.88 | [config (train)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/repvgg-B1g2_4xb64-coslr-120e_in1k.py) \|[config (deploy)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/deploy/repvgg-B1g2_deploy_4xb64-coslr-120e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B1g2_3rdparty_4xb64-coslr-120e_in1k_20210909-344f6422.pth) | +| RepVGG-B1g4\* | 120 | 39.97 (train) \| 36.13 (deploy) | 8.15 (train) \| 7.32 (deploy) | 77.58 | 93.84 | [config (train)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/repvgg-B1g4_4xb64-coslr-120e_in1k.py) \|[config (deploy)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/deploy/repvgg-B1g4_deploy_4xb64-coslr-120e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B1g4_3rdparty_4xb64-coslr-120e_in1k_20210909-d4c1a642.pth) | +| RepVGG-B2\* | 120 | 89.02 (train) \| 80.32 (deploy) | 20.46 (train) \| 18.39 (deploy) | 78.78 | 94.42 | [config (train)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/repvgg-B2_4xb64-coslr-120e_in1k.py) \|[config (deploy)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/deploy/repvgg-B2_deploy_4xb64-coslr-120e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B2_3rdparty_4xb64-coslr-120e_in1k_20210909-bd6b937c.pth) | +| RepVGG-B2g4\* | 200 | 61.76 (train) \| 55.78 (deploy) | 12.63 (train) \| 11.34 (deploy) | 79.38 | 94.68 | [config (train)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/repvgg-B2g4_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py) \|[config (deploy)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/deploy/repvgg-B2g4_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B2g4_3rdparty_4xb64-autoaug-lbs-mixup-coslr-200e_in1k_20210909-7b7955f0.pth) | +| RepVGG-B3\* | 200 | 123.09 (train) \| 110.96 (deploy) | 29.17 (train) \| 26.22 (deploy) | 80.52 | 95.26 | [config (train)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/repvgg-B3_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py) \|[config (deploy)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/deploy/repvgg-B3_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B3_3rdparty_4xb64-autoaug-lbs-mixup-coslr-200e_in1k_20210909-dda968bf.pth) | +| RepVGG-B3g4\* | 200 | 83.83 (train) \| 75.63 (deploy) | 17.9 (train) \| 16.08 (deploy) | 80.22 | 95.10 | [config (train)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/repvgg-B3g4_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py) \|[config (deploy)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/deploy/repvgg-B3g4_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B3g4_3rdparty_4xb64-autoaug-lbs-mixup-coslr-200e_in1k_20210909-4e54846a.pth) | +| RepVGG-D2se\* | 200 | 133.33 (train) \| 120.39 (deploy) | 36.56 (train) \| 32.85 (deploy) | 81.81 | 95.94 | [config (train)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/repvgg-D2se_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py) \|[config (deploy)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/deploy/repvgg-D2se_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-D2se_3rdparty_4xb64-autoaug-lbs-mixup-coslr-200e_in1k_20210909-cf3139b7.pth) | + +*Models with \* are converted from other repos.* + +## Reparameterize RepVGG + +The checkpoints provided are all in `train` form. Use the reparameterize tool to switch them to more efficient `deploy` form, which not only has fewer parameters but also less calculations. + +```bash +python ./tools/convert_models/reparameterize_repvgg.py ${CFG_PATH} ${SRC_CKPT_PATH} ${TARGET_CKPT_PATH} +``` + +`${CFG_PATH}` is the config file, `${SRC_CKPT_PATH}` is the source chenpoint file, `${TARGET_CKPT_PATH}` is the target deploy weight file path. + +To use reparameterized repvgg weight, the config file must switch to [the deploy config files](./deploy) as below: + +```bash +python ./tools/test.py ${RapVGG_Deploy_CFG} ${CHECK_POINT} +``` diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-A0_deploy_4xb64-coslr-120e_in1k.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-A0_deploy_4xb64-coslr-120e_in1k.py new file mode 100644 index 0000000000..20787f286d --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-A0_deploy_4xb64-coslr-120e_in1k.py @@ -0,0 +1,3 @@ +_base_ = '../repvgg-A0_4xb64-coslr-120e_in1k.py' + +model = dict(backbone=dict(deploy=True)) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-A1_deploy_4xb64-coslr-120e_in1k.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-A1_deploy_4xb64-coslr-120e_in1k.py new file mode 100644 index 0000000000..eea0da9c58 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-A1_deploy_4xb64-coslr-120e_in1k.py @@ -0,0 +1,3 @@ +_base_ = '../repvgg-A1_4xb64-coslr-120e_in1k.py' + +model = dict(backbone=dict(deploy=True)) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-A2_deploy_4xb64-coslr-120e_in1k.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-A2_deploy_4xb64-coslr-120e_in1k.py new file mode 100644 index 0000000000..7b0cea7b7d --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-A2_deploy_4xb64-coslr-120e_in1k.py @@ -0,0 +1,3 @@ +_base_ = '../repvgg-A2_4xb64-coslr-120e_in1k.py' + +model = dict(backbone=dict(deploy=True)) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-B0_deploy_4xb64-coslr-120e_in1k.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-B0_deploy_4xb64-coslr-120e_in1k.py new file mode 100644 index 0000000000..23a2898ac5 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-B0_deploy_4xb64-coslr-120e_in1k.py @@ -0,0 +1,3 @@ +_base_ = '../repvgg-B0_4xb64-coslr-120e_in1k.py' + +model = dict(backbone=dict(deploy=True)) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-B1_deploy_4xb64-coslr-120e_in1k.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-B1_deploy_4xb64-coslr-120e_in1k.py new file mode 100644 index 0000000000..24355edac7 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-B1_deploy_4xb64-coslr-120e_in1k.py @@ -0,0 +1,3 @@ +_base_ = '../repvgg-B1_4xb64-coslr-120e_in1k.py' + +model = dict(backbone=dict(deploy=True)) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-B1g2_deploy_4xb64-coslr-120e_in1k.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-B1g2_deploy_4xb64-coslr-120e_in1k.py new file mode 100644 index 0000000000..579fcc47b9 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-B1g2_deploy_4xb64-coslr-120e_in1k.py @@ -0,0 +1,3 @@ +_base_ = '../repvgg-B1g2_4xb64-coslr-120e_in1k.py' + +model = dict(backbone=dict(deploy=True)) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-B1g4_deploy_4xb64-coslr-120e_in1k.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-B1g4_deploy_4xb64-coslr-120e_in1k.py new file mode 100644 index 0000000000..eab5d44037 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-B1g4_deploy_4xb64-coslr-120e_in1k.py @@ -0,0 +1,3 @@ +_base_ = '../repvgg-B1g4_4xb64-coslr-120e_in1k.py' + +model = dict(backbone=dict(deploy=True)) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-B2_deploy_4xb64-coslr-120e_in1k.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-B2_deploy_4xb64-coslr-120e_in1k.py new file mode 100644 index 0000000000..0681f14dc3 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-B2_deploy_4xb64-coslr-120e_in1k.py @@ -0,0 +1,3 @@ +_base_ = '../repvgg-B2_4xb64-coslr-120e_in1k.py' + +model = dict(backbone=dict(deploy=True)) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-B2g4_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-B2g4_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py new file mode 100644 index 0000000000..8f1840145f --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-B2g4_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py @@ -0,0 +1,3 @@ +_base_ = '../repvgg-B2g4_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py' + +model = dict(backbone=dict(deploy=True)) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-B3_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-B3_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py new file mode 100644 index 0000000000..e60b0678a9 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-B3_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py @@ -0,0 +1,3 @@ +_base_ = '../repvgg-B3_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py' + +model = dict(backbone=dict(deploy=True)) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-B3g4_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-B3g4_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py new file mode 100644 index 0000000000..46f187789a --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-B3g4_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py @@ -0,0 +1,3 @@ +_base_ = '../repvgg-B3g4_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py' + +model = dict(backbone=dict(deploy=True)) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-D2se_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-D2se_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py new file mode 100644 index 0000000000..66dff3b6d4 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/deploy/repvgg-D2se_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py @@ -0,0 +1,3 @@ +_base_ = '../repvgg-D2se_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py' + +model = dict(backbone=dict(deploy=True)) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/metafile.yml b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/metafile.yml new file mode 100644 index 0000000000..fc3d8ab355 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/metafile.yml @@ -0,0 +1,208 @@ +Collections: + - Name: RepVGG + Metadata: + Training Data: ImageNet-1k + Architecture: + - re-parameterization Convolution + - VGG-style Neural Network + Paper: + URL: https://arxiv.org/abs/2101.03697 + Title: 'RepVGG: Making VGG-style ConvNets Great Again' + README: configs/repvgg/README.md + Code: + URL: https://github.com/open-mmlab/mmclassification/blob/v0.16.0/mmcls/models/backbones/repvgg.py#L257 + Version: v0.16.0 + +Models: + - Name: repvgg-A0_4xb64-coslr-120e_in1k + In Collection: RepVGG + Config: configs/repvgg/repvgg-A0_4xb64-coslr-120e_in1k.py + Metadata: + FLOPs: 1520000000 + Parameters: 9110000 + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 72.41 + Top 5 Accuracy: 90.50 + Weights: https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-A0_3rdparty_4xb64-coslr-120e_in1k_20210909-883ab98c.pth + Converted From: + Weights: https://drive.google.com/drive/folders/1Avome4KvNp0Lqh2QwhXO6L5URQjzCjUq + Code: https://github.com/DingXiaoH/RepVGG/blob/9f272318abfc47a2b702cd0e916fca8d25d683e7/repvgg.py#L196 + - Name: repvgg-A1_4xb64-coslr-120e_in1k + In Collection: Repvgg + Config: configs/repvgg/repvgg-A1_4xb64-coslr-120e_in1k.py + Metadata: + FLOPs: 2640000000 + Parameters: 14090000 + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 74.47 + Top 5 Accuracy: 91.85 + Weights: https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-A1_3rdparty_4xb64-coslr-120e_in1k_20210909-24003a24.pth + Converted From: + Weights: https://drive.google.com/drive/folders/1Avome4KvNp0Lqh2QwhXO6L5URQjzCjUq + Code: https://github.com/DingXiaoH/RepVGG/blob/9f272318abfc47a2b702cd0e916fca8d25d683e7/repvgg.py#L200 + - Name: repvgg-A2_4xb64-coslr-120e_in1k + In Collection: Repvgg + Config: configs/repvgg/repvgg-A2_4xb64-coslr-120e_in1k.py + Metadata: + FLOPs: 28210000000 + Parameters: 5700000 + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 76.48 + Top 5 Accuracy: 93.01 + Weights: https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-A2_3rdparty_4xb64-coslr-120e_in1k_20210909-97d7695a.pth + Converted From: + Weights: https://drive.google.com/drive/folders/1Avome4KvNp0Lqh2QwhXO6L5URQjzCjUq + Code: https://github.com/DingXiaoH/RepVGG/blob/9f272318abfc47a2b702cd0e916fca8d25d683e7/repvgg.py#L204 + - Name: repvgg-B0_4xb64-coslr-120e_in1k + In Collection: RepVGG + Config: configs/repvgg/repvgg-B0_4xb64-coslr-120e_in1k.py + Metadata: + FLOPs: 15820000000 + Parameters: 3420000 + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 75.14 + Top 5 Accuracy: 92.42 + Weights: https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B0_3rdparty_4xb64-coslr-120e_in1k_20210909-446375f4.pth + Converted From: + Weights: https://drive.google.com/drive/folders/1Avome4KvNp0Lqh2QwhXO6L5URQjzCjUq + Code: https://github.com/DingXiaoH/RepVGG/blob/9f272318abfc47a2b702cd0e916fca8d25d683e7/repvgg.py#L208 + - Name: repvgg-B1_4xb64-coslr-120e_in1k + In Collection: RepVGG + Config: configs/repvgg/repvgg-B1_4xb64-coslr-120e_in1k.py + Metadata: + FLOPs: 57420000000 + Parameters: 13160000 + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 78.37 + Top 5 Accuracy: 94.11 + Weights: https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B1_3rdparty_4xb64-coslr-120e_in1k_20210909-750cdf67.pth + Converted From: + Weights: https://drive.google.com/drive/folders/1Avome4KvNp0Lqh2QwhXO6L5URQjzCjUq + Code: https://github.com/DingXiaoH/RepVGG/blob/9f272318abfc47a2b702cd0e916fca8d25d683e7/repvgg.py#L212 + - Name: repvgg-B1g2_4xb64-coslr-120e_in1k + In Collection: RepVGG + Config: configs/repvgg/repvgg-B1g2_4xb64-coslr-120e_in1k.py + Metadata: + FLOPs: 45780000000 + Parameters: 9820000 + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 77.79 + Top 5 Accuracy: 93.88 + Weights: https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B1g2_3rdparty_4xb64-coslr-120e_in1k_20210909-344f6422.pth + Converted From: + Weights: https://drive.google.com/drive/folders/1Avome4KvNp0Lqh2QwhXO6L5URQjzCjUq + Code: https://github.com/DingXiaoH/RepVGG/blob/9f272318abfc47a2b702cd0e916fca8d25d683e7/repvgg.py#L216 + - Name: repvgg-B1g4_4xb64-coslr-120e_in1k + In Collection: RepVGG + Config: configs/repvgg/repvgg-B1g4_4xb64-coslr-120e_in1k.py + Metadata: + FLOPs: 39970000000 + Parameters: 8150000 + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 77.58 + Top 5 Accuracy: 93.84 + Weights: https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B1g4_3rdparty_4xb64-coslr-120e_in1k_20210909-d4c1a642.pth + Converted From: + Weights: https://drive.google.com/drive/folders/1Avome4KvNp0Lqh2QwhXO6L5URQjzCjUq + Code: https://github.com/DingXiaoH/RepVGG/blob/9f272318abfc47a2b702cd0e916fca8d25d683e7/repvgg.py#L220 + - Name: repvgg-B2_4xb64-coslr-120e_in1k + In Collection: RepVGG + Config: configs/repvgg/repvgg-B2_4xb64-coslr-120e_in1k.py + Metadata: + FLOPs: 89020000000 + Parameters: 20420000 + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 78.78 + Top 5 Accuracy: 94.42 + Weights: https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B2_3rdparty_4xb64-coslr-120e_in1k_20210909-bd6b937c.pth + Converted From: + Weights: https://drive.google.com/drive/folders/1Avome4KvNp0Lqh2QwhXO6L5URQjzCjUq + Code: https://github.com/DingXiaoH/RepVGG/blob/9f272318abfc47a2b702cd0e916fca8d25d683e7/repvgg.py#L225 + - Name: repvgg-B2g4_4xb64-autoaug-lbs-mixup-coslr-200e_in1k + In Collection: RepVGG + Config: configs/repvgg/repvgg-B2g4_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py + Metadata: + FLOPs: 61760000000 + Parameters: 12630000 + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 79.38 + Top 5 Accuracy: 94.68 + Weights: https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B2g4_3rdparty_4xb64-autoaug-lbs-mixup-coslr-200e_in1k_20210909-7b7955f0.pth + Converted From: + Weights: https://drive.google.com/drive/folders/1Avome4KvNp0Lqh2QwhXO6L5URQjzCjUq + Code: https://github.com/DingXiaoH/RepVGG/blob/9f272318abfc47a2b702cd0e916fca8d25d683e7/repvgg.py#L229 + - Name: repvgg-B3_4xb64-autoaug-lbs-mixup-coslr-200e_in1k + In Collection: RepVGG + Config: configs/repvgg/repvgg-B3_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py + Metadata: + FLOPs: 123090000000 + Parameters: 29170000 + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 80.52 + Top 5 Accuracy: 95.26 + Weights: https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B3_3rdparty_4xb64-autoaug-lbs-mixup-coslr-200e_in1k_20210909-dda968bf.pth + Converted From: + Weights: https://drive.google.com/drive/folders/1Avome4KvNp0Lqh2QwhXO6L5URQjzCjUq + Code: https://github.com/DingXiaoH/RepVGG/blob/9f272318abfc47a2b702cd0e916fca8d25d683e7/repvgg.py#L238 + - Name: repvgg-B3g4_4xb64-autoaug-lbs-mixup-coslr-200e_in1k + In Collection: RepVGG + Config: configs/repvgg/repvgg-B3g4_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py + Metadata: + FLOPs: 83830000000 + Parameters: 17900000 + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 80.22 + Top 5 Accuracy: 95.10 + Weights: https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B3g4_3rdparty_4xb64-autoaug-lbs-mixup-coslr-200e_in1k_20210909-4e54846a.pth + Converted From: + Weights: https://drive.google.com/drive/folders/1Avome4KvNp0Lqh2QwhXO6L5URQjzCjUq + Code: https://github.com/DingXiaoH/RepVGG/blob/9f272318abfc47a2b702cd0e916fca8d25d683e7/repvgg.py#L238 + - Name: repvgg-D2se_4xb64-autoaug-lbs-mixup-coslr-200e_in1k + In Collection: RepVGG + Config: configs/repvgg/repvgg-D2se_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py + Metadata: + FLOPs: 133330000000 + Parameters: 36560000 + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 81.81 + Top 5 Accuracy: 95.94 + Weights: https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-D2se_3rdparty_4xb64-autoaug-lbs-mixup-coslr-200e_in1k_20210909-cf3139b7.pth + Converted From: + Weights: https://drive.google.com/drive/folders/1Avome4KvNp0Lqh2QwhXO6L5URQjzCjUq + Code: https://github.com/DingXiaoH/RepVGG/blob/9f272318abfc47a2b702cd0e916fca8d25d683e7/repvgg.py#L250 diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-A0_4xb64-coslr-120e_in1k.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-A0_4xb64-coslr-120e_in1k.py new file mode 100644 index 0000000000..a7fd3bbe91 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-A0_4xb64-coslr-120e_in1k.py @@ -0,0 +1,8 @@ +_base_ = [ + '../_base_/models/repvgg-A0_in1k.py', + '../_base_/datasets/imagenet_bs64_pil_resize.py', + '../_base_/schedules/imagenet_bs256_coslr.py', + '../_base_/default_runtime.py' +] + +runner = dict(max_epochs=120) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-A1_4xb64-coslr-120e_in1k.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-A1_4xb64-coslr-120e_in1k.py new file mode 100644 index 0000000000..649020f2c6 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-A1_4xb64-coslr-120e_in1k.py @@ -0,0 +1,3 @@ +_base_ = './repvgg-A0_4xb64-coslr-120e_in1k.py' + +model = dict(backbone=dict(arch='A1')) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-A2_4xb64-coslr-120e_in1k.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-A2_4xb64-coslr-120e_in1k.py new file mode 100644 index 0000000000..eedaf2d29b --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-A2_4xb64-coslr-120e_in1k.py @@ -0,0 +1,3 @@ +_base_ = './repvgg-A0_4xb64-coslr-120e_in1k.py' + +model = dict(backbone=dict(arch='A2'), head=dict(in_channels=1408)) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-B0_4xb64-coslr-120e_in1k.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-B0_4xb64-coslr-120e_in1k.py new file mode 100644 index 0000000000..b3ce7ea27d --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-B0_4xb64-coslr-120e_in1k.py @@ -0,0 +1,3 @@ +_base_ = './repvgg-A0_4xb64-coslr-120e_in1k.py' + +model = dict(backbone=dict(arch='B0'), head=dict(in_channels=1280)) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-B1_4xb64-coslr-120e_in1k.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-B1_4xb64-coslr-120e_in1k.py new file mode 100644 index 0000000000..30adea3dc8 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-B1_4xb64-coslr-120e_in1k.py @@ -0,0 +1,3 @@ +_base_ = './repvgg-A0_4xb64-coslr-120e_in1k.py' + +model = dict(backbone=dict(arch='B1'), head=dict(in_channels=2048)) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-B1g2_4xb64-coslr-120e_in1k.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-B1g2_4xb64-coslr-120e_in1k.py new file mode 100644 index 0000000000..2749db8d95 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-B1g2_4xb64-coslr-120e_in1k.py @@ -0,0 +1,3 @@ +_base_ = './repvgg-A0_4xb64-coslr-120e_in1k.py' + +model = dict(backbone=dict(arch='B1g2'), head=dict(in_channels=2048)) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-B1g4_4xb64-coslr-120e_in1k.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-B1g4_4xb64-coslr-120e_in1k.py new file mode 100644 index 0000000000..2647690975 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-B1g4_4xb64-coslr-120e_in1k.py @@ -0,0 +1,3 @@ +_base_ = './repvgg-A0_4xb64-coslr-120e_in1k.py' + +model = dict(backbone=dict(arch='B1g4'), head=dict(in_channels=2048)) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-B2_4xb64-coslr-120e_in1k.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-B2_4xb64-coslr-120e_in1k.py new file mode 100644 index 0000000000..4d215567f4 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-B2_4xb64-coslr-120e_in1k.py @@ -0,0 +1,3 @@ +_base_ = './repvgg-A0_4xb64-coslr-120e_in1k.py' + +model = dict(backbone=dict(arch='B2'), head=dict(in_channels=2560)) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-B2g4_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-B2g4_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py new file mode 100644 index 0000000000..11331cf02f --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-B2g4_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py @@ -0,0 +1,3 @@ +_base_ = './repvgg-B3_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py' + +model = dict(backbone=dict(arch='B2g4')) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-B3_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-B3_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py new file mode 100644 index 0000000000..7b6dc5065d --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-B3_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/repvgg-B3_lbs-mixup_in1k.py', + '../_base_/datasets/imagenet_bs64_pil_resize.py', + '../_base_/schedules/imagenet_bs256_200e_coslr_warmup.py', + '../_base_/default_runtime.py' +] diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-B3g4_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-B3g4_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py new file mode 100644 index 0000000000..67e3688c5a --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-B3g4_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py @@ -0,0 +1,3 @@ +_base_ = './repvgg-B3_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py' + +model = dict(backbone=dict(arch='B3g4')) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-D2se_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-D2se_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py new file mode 100644 index 0000000000..d235610f07 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/repvgg/repvgg-D2se_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py @@ -0,0 +1,3 @@ +_base_ = './repvgg-B3_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py' + +model = dict(backbone=dict(arch='D2se')) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/res2net/README.md b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/res2net/README.md new file mode 100644 index 0000000000..befe4ba6d6 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/res2net/README.md @@ -0,0 +1,30 @@ +# Res2Net: A New Multi-scale Backbone Architecture + + +## Introduction + + + +```latex +@article{gao2019res2net, + title={Res2Net: A New Multi-scale Backbone Architecture}, + author={Gao, Shang-Hua and Cheng, Ming-Ming and Zhao, Kai and Zhang, Xin-Yu and Yang, Ming-Hsuan and Torr, Philip}, + journal={IEEE TPAMI}, + year={2021}, + doi={10.1109/TPAMI.2019.2938758}, +} +``` + +## Pretrain model + +The pre-trained models are converted from [official repo](https://github.com/Res2Net/Res2Net-PretrainedModels). + +### ImageNet 1k + +| Model | resolution | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Download | +|:---------------------:|:-----------:|:---------:|:---------:|:---------:|:---------:|:--------:| +| Res2Net-50-14w-8s\* | 224x224 | 25.06 | 4.22 | 78.14 | 93.85 | [model](https://download.openmmlab.com/mmclassification/v0/res2net/res2net50-w14-s8_3rdparty_8xb32_in1k_20210927-bc967bf1.pth)| +| Res2Net-50-26w-8s\* | 224x224 | 48.40 | 8.39 | 79.20 | 94.36 | [model](https://download.openmmlab.com/mmclassification/v0/res2net/res2net50-w26-s8_3rdparty_8xb32_in1k_20210927-f547a94b.pth)| +| Res2Net-101-26w-4s\* | 224x224 | 45.21 | 8.12 | 79.19 | 94.44 | [model](https://download.openmmlab.com/mmclassification/v0/res2net/res2net101-w26-s4_3rdparty_8xb32_in1k_20210927-870b6c36.pth)| + +*Models with \* are converted from other repos.* diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/res2net/metafile.yml b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/res2net/metafile.yml new file mode 100644 index 0000000000..dfcda7329f --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/res2net/metafile.yml @@ -0,0 +1,67 @@ +Collections: + - Name: Res2Net + Metadata: + Training Data: ImageNet-1k + Training Techniques: + - SGD with Momentum + - Weight Decay + Architecture: + - Batch Normalization + - Convolution + - Global Average Pooling + - ReLU + - Res2Net Block + Paper: + Title: 'Res2Net: A New Multi-scale Backbone Architecture' + URL: https://arxiv.org/pdf/1904.01169.pdf + README: configs/res2net/README.md + +Models: + - Name: res2net50-w14-s8_3rdparty_8xb32_in1k + Metadata: + FLOPs: 4220000000 + Parameters: 25060000 + In Collection: Res2Net + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 78.14 + Top 5 Accuracy: 93.85 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/res2net/res2net50-w14-s8_3rdparty_8xb32_in1k_20210927-bc967bf1.pth + Converted From: + Weights: https://1drv.ms/u/s!AkxDDnOtroRPdOTqhF8ne_aakDI?e=EVb8Ri + Code: https://github.com/Res2Net/Res2Net-PretrainedModels/blob/master/res2net.py#L221 + Config: configs/res2net/res2net50-w14-s8_8xb32_in1k.py + - Name: res2net50-w26-s8_3rdparty_8xb32_in1k + Metadata: + FLOPs: 8390000000 + Parameters: 48400000 + In Collection: Res2Net + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 79.20 + Top 5 Accuracy: 94.36 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/res2net/res2net50-w26-s8_3rdparty_8xb32_in1k_20210927-f547a94b.pth + Converted From: + Weights: https://1drv.ms/u/s!AkxDDnOtroRPdTrAd_Afzc26Z7Q?e=slYqsR + Code: https://github.com/Res2Net/Res2Net-PretrainedModels/blob/master/res2net.py#L201 + Config: configs/res2net/res2net50-w26-s8_8xb32_in1k.py + - Name: res2net101-w26-s4_3rdparty_8xb32_in1k + Metadata: + FLOPs: 8120000000 + Parameters: 45210000 + In Collection: Res2Net + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 79.19 + Top 5 Accuracy: 94.44 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/res2net/res2net101-w26-s4_3rdparty_8xb32_in1k_20210927-870b6c36.pth + Converted From: + Weights: https://1drv.ms/u/s!AkxDDnOtroRPcJRgTLkahL0cFYw?e=nwbnic + Code: https://github.com/Res2Net/Res2Net-PretrainedModels/blob/master/res2net.py#L181 + Config: configs/res2net/res2net101-w26-s4_8xb32_in1k.py diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/res2net/res2net101-w26-s4_8xb32_in1k.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/res2net/res2net101-w26-s4_8xb32_in1k.py new file mode 100644 index 0000000000..7ebe9e94d6 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/res2net/res2net101-w26-s4_8xb32_in1k.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/res2net101-w26-s4.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/res2net/res2net50-w14-s8_8xb32_in1k.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/res2net/res2net50-w14-s8_8xb32_in1k.py new file mode 100644 index 0000000000..56cc02e3b8 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/res2net/res2net50-w14-s8_8xb32_in1k.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/res2net50-w14-s8.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/res2net/res2net50-w26-s8_8xb32_in1k.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/res2net/res2net50-w26-s8_8xb32_in1k.py new file mode 100644 index 0000000000..d7dcbeb916 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/res2net/res2net50-w26-s8_8xb32_in1k.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/res2net50-w26-s8.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnest/README.md b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnest/README.md new file mode 100644 index 0000000000..704d24a759 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnest/README.md @@ -0,0 +1,17 @@ +# ResNeSt: Split-Attention Networks + + +## Introduction + + + +```latex +@misc{zhang2020resnest, + title={ResNeSt: Split-Attention Networks}, + author={Hang Zhang and Chongruo Wu and Zhongyue Zhang and Yi Zhu and Haibin Lin and Zhi Zhang and Yue Sun and Tong He and Jonas Mueller and R. Manmatha and Mu Li and Alexander Smola}, + year={2020}, + eprint={2004.08955}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnest/resnest101_b64x32_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnest/resnest101_b64x32_imagenet.py new file mode 100644 index 0000000000..27b1882cf7 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnest/resnest101_b64x32_imagenet.py @@ -0,0 +1,181 @@ +_base_ = ['../_base_/models/resnest101.py', '../_base_/default_runtime.py'] + +# dataset settings +dataset_type = 'ImageNet' +img_lighting_cfg = dict( + eigval=[55.4625, 4.7940, 1.1475], + eigvec=[[-0.5675, 0.7192, 0.4009], [-0.5808, -0.0045, -0.8140], + [-0.5836, -0.6948, 0.4203]], + alphastd=0.1, + to_rgb=True) +policies = [ + dict(type='AutoContrast', prob=0.5), + dict(type='Equalize', prob=0.5), + dict(type='Invert', prob=0.5), + dict( + type='Rotate', + magnitude_key='angle', + magnitude_range=(0, 30), + pad_val=0, + prob=0.5, + random_negative_prob=0.5), + dict( + type='Posterize', + magnitude_key='bits', + magnitude_range=(0, 4), + prob=0.5), + dict( + type='Solarize', + magnitude_key='thr', + magnitude_range=(0, 256), + prob=0.5), + dict( + type='SolarizeAdd', + magnitude_key='magnitude', + magnitude_range=(0, 110), + thr=128, + prob=0.5), + dict( + type='ColorTransform', + magnitude_key='magnitude', + magnitude_range=(-0.9, 0.9), + prob=0.5, + random_negative_prob=0.), + dict( + type='Contrast', + magnitude_key='magnitude', + magnitude_range=(-0.9, 0.9), + prob=0.5, + random_negative_prob=0.), + dict( + type='Brightness', + magnitude_key='magnitude', + magnitude_range=(-0.9, 0.9), + prob=0.5, + random_negative_prob=0.), + dict( + type='Sharpness', + magnitude_key='magnitude', + magnitude_range=(-0.9, 0.9), + prob=0.5, + random_negative_prob=0.), + dict( + type='Shear', + magnitude_key='magnitude', + magnitude_range=(0, 0.3), + pad_val=0, + prob=0.5, + direction='horizontal', + random_negative_prob=0.5), + dict( + type='Shear', + magnitude_key='magnitude', + magnitude_range=(0, 0.3), + pad_val=0, + prob=0.5, + direction='vertical', + random_negative_prob=0.5), + dict( + type='Cutout', + magnitude_key='shape', + magnitude_range=(1, 41), + pad_val=0, + prob=0.5), + dict( + type='Translate', + magnitude_key='magnitude', + magnitude_range=(0, 0.3), + pad_val=0, + prob=0.5, + direction='horizontal', + random_negative_prob=0.5, + interpolation='bicubic'), + dict( + type='Translate', + magnitude_key='magnitude', + magnitude_range=(0, 0.3), + pad_val=0, + prob=0.5, + direction='vertical', + random_negative_prob=0.5, + interpolation='bicubic') +] +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandAugment', + policies=policies, + num_policies=2, + magnitude_level=12), + dict( + type='RandomResizedCrop', + size=256, + efficientnet_style=True, + interpolation='bicubic', + backend='pillow'), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='ColorJitter', brightness=0.4, contrast=0.4, saturation=0.4), + dict(type='Lighting', **img_lighting_cfg), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=False), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='CenterCrop', + crop_size=256, + efficientnet_style=True, + interpolation='bicubic', + backend='pillow'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + samples_per_gpu=64, + workers_per_gpu=2, + train=dict( + type=dataset_type, + data_prefix='data/imagenet/train', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline), + test=dict( + # replace `data/val` with `data/test` for standard test + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline)) +evaluation = dict(interval=1, metric='accuracy') + +# optimizer +optimizer = dict( + type='SGD', + lr=0.8, + momentum=0.9, + weight_decay=1e-4, + paramwise_cfg=dict(bias_decay_mult=0., norm_decay_mult=0.)) +optimizer_config = dict(grad_clip=None) + +# learning policy +lr_config = dict( + policy='CosineAnnealing', + min_lr=0, + warmup='linear', + warmup_iters=5, + warmup_ratio=1e-6, + warmup_by_epoch=True) +runner = dict(type='EpochBasedRunner', max_epochs=270) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnest/resnest200_b32x64_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnest/resnest200_b32x64_imagenet.py new file mode 100644 index 0000000000..3b166a2d62 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnest/resnest200_b32x64_imagenet.py @@ -0,0 +1,181 @@ +_base_ = ['../_base_/models/resnest200.py', '../_base_/default_runtime.py'] + +# dataset settings +dataset_type = 'ImageNet' +img_lighting_cfg = dict( + eigval=[55.4625, 4.7940, 1.1475], + eigvec=[[-0.5675, 0.7192, 0.4009], [-0.5808, -0.0045, -0.8140], + [-0.5836, -0.6948, 0.4203]], + alphastd=0.1, + to_rgb=True) +policies = [ + dict(type='AutoContrast', prob=0.5), + dict(type='Equalize', prob=0.5), + dict(type='Invert', prob=0.5), + dict( + type='Rotate', + magnitude_key='angle', + magnitude_range=(0, 30), + pad_val=0, + prob=0.5, + random_negative_prob=0.5), + dict( + type='Posterize', + magnitude_key='bits', + magnitude_range=(0, 4), + prob=0.5), + dict( + type='Solarize', + magnitude_key='thr', + magnitude_range=(0, 256), + prob=0.5), + dict( + type='SolarizeAdd', + magnitude_key='magnitude', + magnitude_range=(0, 110), + thr=128, + prob=0.5), + dict( + type='ColorTransform', + magnitude_key='magnitude', + magnitude_range=(-0.9, 0.9), + prob=0.5, + random_negative_prob=0.), + dict( + type='Contrast', + magnitude_key='magnitude', + magnitude_range=(-0.9, 0.9), + prob=0.5, + random_negative_prob=0.), + dict( + type='Brightness', + magnitude_key='magnitude', + magnitude_range=(-0.9, 0.9), + prob=0.5, + random_negative_prob=0.), + dict( + type='Sharpness', + magnitude_key='magnitude', + magnitude_range=(-0.9, 0.9), + prob=0.5, + random_negative_prob=0.), + dict( + type='Shear', + magnitude_key='magnitude', + magnitude_range=(0, 0.3), + pad_val=0, + prob=0.5, + direction='horizontal', + random_negative_prob=0.5), + dict( + type='Shear', + magnitude_key='magnitude', + magnitude_range=(0, 0.3), + pad_val=0, + prob=0.5, + direction='vertical', + random_negative_prob=0.5), + dict( + type='Cutout', + magnitude_key='shape', + magnitude_range=(1, 41), + pad_val=0, + prob=0.5), + dict( + type='Translate', + magnitude_key='magnitude', + magnitude_range=(0, 0.3), + pad_val=0, + prob=0.5, + direction='horizontal', + random_negative_prob=0.5, + interpolation='bicubic'), + dict( + type='Translate', + magnitude_key='magnitude', + magnitude_range=(0, 0.3), + pad_val=0, + prob=0.5, + direction='vertical', + random_negative_prob=0.5, + interpolation='bicubic') +] +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandAugment', + policies=policies, + num_policies=2, + magnitude_level=12), + dict( + type='RandomResizedCrop', + size=320, + efficientnet_style=True, + interpolation='bicubic', + backend='pillow'), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='ColorJitter', brightness=0.4, contrast=0.4, saturation=0.4), + dict(type='Lighting', **img_lighting_cfg), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=False), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='CenterCrop', + crop_size=320, + efficientnet_style=True, + interpolation='bicubic', + backend='pillow'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + samples_per_gpu=32, + workers_per_gpu=2, + train=dict( + type=dataset_type, + data_prefix='data/imagenet/train', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline), + test=dict( + # replace `data/val` with `data/test` for standard test + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline)) +evaluation = dict(interval=1, metric='accuracy') + +# optimizer +optimizer = dict( + type='SGD', + lr=0.8, + momentum=0.9, + weight_decay=1e-4, + paramwise_cfg=dict(bias_decay_mult=0., norm_decay_mult=0.)) +optimizer_config = dict(grad_clip=None) + +# learning policy +lr_config = dict( + policy='CosineAnnealing', + min_lr=0, + warmup='linear', + warmup_iters=5, + warmup_ratio=1e-6, + warmup_by_epoch=True) +runner = dict(type='EpochBasedRunner', max_epochs=270) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnest/resnest269_b32x64_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnest/resnest269_b32x64_imagenet.py new file mode 100644 index 0000000000..7a4db092a4 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnest/resnest269_b32x64_imagenet.py @@ -0,0 +1,181 @@ +_base_ = ['../_base_/models/resnest269.py', '../_base_/default_runtime.py'] + +# dataset settings +dataset_type = 'ImageNet' +img_lighting_cfg = dict( + eigval=[55.4625, 4.7940, 1.1475], + eigvec=[[-0.5675, 0.7192, 0.4009], [-0.5808, -0.0045, -0.8140], + [-0.5836, -0.6948, 0.4203]], + alphastd=0.1, + to_rgb=True) +policies = [ + dict(type='AutoContrast', prob=0.5), + dict(type='Equalize', prob=0.5), + dict(type='Invert', prob=0.5), + dict( + type='Rotate', + magnitude_key='angle', + magnitude_range=(0, 30), + pad_val=0, + prob=0.5, + random_negative_prob=0.5), + dict( + type='Posterize', + magnitude_key='bits', + magnitude_range=(0, 4), + prob=0.5), + dict( + type='Solarize', + magnitude_key='thr', + magnitude_range=(0, 256), + prob=0.5), + dict( + type='SolarizeAdd', + magnitude_key='magnitude', + magnitude_range=(0, 110), + thr=128, + prob=0.5), + dict( + type='ColorTransform', + magnitude_key='magnitude', + magnitude_range=(-0.9, 0.9), + prob=0.5, + random_negative_prob=0.), + dict( + type='Contrast', + magnitude_key='magnitude', + magnitude_range=(-0.9, 0.9), + prob=0.5, + random_negative_prob=0.), + dict( + type='Brightness', + magnitude_key='magnitude', + magnitude_range=(-0.9, 0.9), + prob=0.5, + random_negative_prob=0.), + dict( + type='Sharpness', + magnitude_key='magnitude', + magnitude_range=(-0.9, 0.9), + prob=0.5, + random_negative_prob=0.), + dict( + type='Shear', + magnitude_key='magnitude', + magnitude_range=(0, 0.3), + pad_val=0, + prob=0.5, + direction='horizontal', + random_negative_prob=0.5), + dict( + type='Shear', + magnitude_key='magnitude', + magnitude_range=(0, 0.3), + pad_val=0, + prob=0.5, + direction='vertical', + random_negative_prob=0.5), + dict( + type='Cutout', + magnitude_key='shape', + magnitude_range=(1, 41), + pad_val=0, + prob=0.5), + dict( + type='Translate', + magnitude_key='magnitude', + magnitude_range=(0, 0.3), + pad_val=0, + prob=0.5, + direction='horizontal', + random_negative_prob=0.5, + interpolation='bicubic'), + dict( + type='Translate', + magnitude_key='magnitude', + magnitude_range=(0, 0.3), + pad_val=0, + prob=0.5, + direction='vertical', + random_negative_prob=0.5, + interpolation='bicubic') +] +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandAugment', + policies=policies, + num_policies=2, + magnitude_level=12), + dict( + type='RandomResizedCrop', + size=416, + efficientnet_style=True, + interpolation='bicubic', + backend='pillow'), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='ColorJitter', brightness=0.4, contrast=0.4, saturation=0.4), + dict(type='Lighting', **img_lighting_cfg), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=False), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='CenterCrop', + crop_size=416, + efficientnet_style=True, + interpolation='bicubic', + backend='pillow'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + samples_per_gpu=32, + workers_per_gpu=2, + train=dict( + type=dataset_type, + data_prefix='data/imagenet/train', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline), + test=dict( + # replace `data/val` with `data/test` for standard test + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline)) +evaluation = dict(interval=1, metric='accuracy') + +# optimizer +optimizer = dict( + type='SGD', + lr=0.8, + momentum=0.9, + weight_decay=1e-4, + paramwise_cfg=dict(bias_decay_mult=0., norm_decay_mult=0.)) +optimizer_config = dict(grad_clip=None) + +# learning policy +lr_config = dict( + policy='CosineAnnealing', + min_lr=0, + warmup='linear', + warmup_iters=5, + warmup_ratio=1e-6, + warmup_by_epoch=True) +runner = dict(type='EpochBasedRunner', max_epochs=270) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnest/resnest50_b64x32_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnest/resnest50_b64x32_imagenet.py new file mode 100644 index 0000000000..812a3bee53 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnest/resnest50_b64x32_imagenet.py @@ -0,0 +1,181 @@ +_base_ = ['../_base_/models/resnest50.py', '../_base_/default_runtime.py'] + +# dataset settings +dataset_type = 'ImageNet' +img_lighting_cfg = dict( + eigval=[55.4625, 4.7940, 1.1475], + eigvec=[[-0.5675, 0.7192, 0.4009], [-0.5808, -0.0045, -0.8140], + [-0.5836, -0.6948, 0.4203]], + alphastd=0.1, + to_rgb=True) +policies = [ + dict(type='AutoContrast', prob=0.5), + dict(type='Equalize', prob=0.5), + dict(type='Invert', prob=0.5), + dict( + type='Rotate', + magnitude_key='angle', + magnitude_range=(0, 30), + pad_val=0, + prob=0.5, + random_negative_prob=0.5), + dict( + type='Posterize', + magnitude_key='bits', + magnitude_range=(0, 4), + prob=0.5), + dict( + type='Solarize', + magnitude_key='thr', + magnitude_range=(0, 256), + prob=0.5), + dict( + type='SolarizeAdd', + magnitude_key='magnitude', + magnitude_range=(0, 110), + thr=128, + prob=0.5), + dict( + type='ColorTransform', + magnitude_key='magnitude', + magnitude_range=(-0.9, 0.9), + prob=0.5, + random_negative_prob=0.), + dict( + type='Contrast', + magnitude_key='magnitude', + magnitude_range=(-0.9, 0.9), + prob=0.5, + random_negative_prob=0.), + dict( + type='Brightness', + magnitude_key='magnitude', + magnitude_range=(-0.9, 0.9), + prob=0.5, + random_negative_prob=0.), + dict( + type='Sharpness', + magnitude_key='magnitude', + magnitude_range=(-0.9, 0.9), + prob=0.5, + random_negative_prob=0.), + dict( + type='Shear', + magnitude_key='magnitude', + magnitude_range=(0, 0.3), + pad_val=0, + prob=0.5, + direction='horizontal', + random_negative_prob=0.5), + dict( + type='Shear', + magnitude_key='magnitude', + magnitude_range=(0, 0.3), + pad_val=0, + prob=0.5, + direction='vertical', + random_negative_prob=0.5), + dict( + type='Cutout', + magnitude_key='shape', + magnitude_range=(1, 41), + pad_val=0, + prob=0.5), + dict( + type='Translate', + magnitude_key='magnitude', + magnitude_range=(0, 0.3), + pad_val=0, + prob=0.5, + direction='horizontal', + random_negative_prob=0.5, + interpolation='bicubic'), + dict( + type='Translate', + magnitude_key='magnitude', + magnitude_range=(0, 0.3), + pad_val=0, + prob=0.5, + direction='vertical', + random_negative_prob=0.5, + interpolation='bicubic') +] +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandAugment', + policies=policies, + num_policies=2, + magnitude_level=12), + dict( + type='RandomResizedCrop', + size=224, + efficientnet_style=True, + interpolation='bicubic', + backend='pillow'), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='ColorJitter', brightness=0.4, contrast=0.4, saturation=0.4), + dict(type='Lighting', **img_lighting_cfg), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=False), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='CenterCrop', + crop_size=224, + efficientnet_style=True, + interpolation='bicubic', + backend='pillow'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + samples_per_gpu=64, + workers_per_gpu=2, + train=dict( + type=dataset_type, + data_prefix='data/imagenet/train', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline), + test=dict( + # replace `data/val` with `data/test` for standard test + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline)) +evaluation = dict(interval=1, metric='accuracy') + +# optimizer +optimizer = dict( + type='SGD', + lr=0.8, + momentum=0.9, + weight_decay=1e-4, + paramwise_cfg=dict(bias_decay_mult=0., norm_decay_mult=0.)) +optimizer_config = dict(grad_clip=None) + +# learning policy +lr_config = dict( + policy='CosineAnnealing', + min_lr=0, + warmup='linear', + warmup_iters=5, + warmup_ratio=1e-6, + warmup_by_epoch=True) +runner = dict(type='EpochBasedRunner', max_epochs=270) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/README.md b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/README.md new file mode 100644 index 0000000000..8e30bcb46b --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/README.md @@ -0,0 +1,47 @@ +# Deep Residual Learning for Image Recognition + + +## Introduction + + + +```latex +@inproceedings{he2016deep, + title={Deep residual learning for image recognition}, + author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={770--778}, + year={2016} +} +``` + +## Results and models + +## Cifar10 + +| Model | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download | +|:---------------------:|:---------:|:--------:|:---------:|:---------:|:---------:|:--------:| +| ResNet-18-b16x8 | 11.17 | 0.56 | 94.82 | | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet18_b16x8_cifar10.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet18_b16x8_cifar10_20210528-bd6371c8.pth) | [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet18_b16x8_cifar10_20210528-bd6371c8.log.json) | +| ResNet-34-b16x8 | 21.28 | 1.16 | 95.34 | | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet34_b16x8_cifar10.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet34_b16x8_cifar10_20210528-a8aa36a6.pth) | [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet34_b16x8_cifar10_20210528-a8aa36a6.log.json) | +| ResNet-50-b16x8 | 23.52 | 1.31 | 95.55 | | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet50_b16x8_cifar10.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_b16x8_cifar10_20210528-f54bfad9.pth) | [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_b16x8_cifar10_20210528-f54bfad9.log.json) | +| ResNet-101-b16x8 | 42.51 | 2.52 | 95.58 | | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet101_b16x8_cifar10.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet101_b16x8_cifar10_20210528-2d29e936.pth) | [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet101_b16x8_cifar10_20210528-2d29e936.log.json) | +| ResNet-152-b16x8 | 58.16 | 3.74 | 95.76 | | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet152_b16x8_cifar10.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet152_b16x8_cifar10_20210528-3e8e9178.pth) | [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet152_b16x8_cifar10_20210528-3e8e9178.log.json) | + +## Cifar100 + +| Model | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download | +|:---------------------:|:---------:|:--------:|:---------:|:---------:|:---------:|:--------:| +| ResNet-50-b16x8 | 23.71 | 1.31 | 79.90 | 95.19 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet50_b16x8_cifar100.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_b16x8_cifar100_20210528-67b58a1b.pth) | [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_b16x8_cifar100_20210528-67b58a1b.log.json) | + +### ImageNet + +| Model | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download | +|:---------------------:|:---------:|:--------:|:---------:|:---------:|:---------:|:--------:| +| ResNet-18 | 11.69 | 1.82 | 69.90 | 89.43 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet18_b32x8_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet18_8xb32_in1k_20210831-fbbb1da6.pth) | [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet18_8xb32_in1k_20210831-fbbb1da6.log.json) | +| ResNet-34 | 21.8 | 3.68 | 73.62 | 91.59 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet34_b32x8_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet34_8xb32_in1k_20210831-f257d4e6.pth) | [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet34_8xb32_in1k_20210831-f257d4e6.log.json) | +| ResNet-50 | 25.56 | 4.12 | 76.55 | 93.06 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet50_b32x8_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb32_in1k_20210831-ea4938fc.pth) | [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb32_in1k_20210831-ea4938fc.log.json) | +| ResNet-101 | 44.55 | 7.85 | 77.97 | 94.06 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet101_b32x8_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet101_8xb32_in1k_20210831-539c63f8.pth) | [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet101_8xb32_in1k_20210831-539c63f8.log.json) | +| ResNet-152 | 60.19 | 11.58 | 78.48 | 94.13 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet152_b32x8_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet152_8xb32_in1k_20210901-4d7582fa.pth) | [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet152_8xb32_in1k_20210901-4d7582fa.log.json) | +| ResNetV1D-50 | 25.58 | 4.36 | 77.54 | 93.57 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnetv1d50_b32x8_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1d50_b32x8_imagenet_20210531-db14775a.pth) | [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1d50_b32x8_imagenet_20210531-db14775a.log.json) | +| ResNetV1D-101 | 44.57 | 8.09 | 78.93 | 94.48 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnetv1d101_b32x8_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1d101_b32x8_imagenet_20210531-6e13bcd3.pth) | [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1d101_b32x8_imagenet_20210531-6e13bcd3.log.json) | +| ResNetV1D-152 | 60.21 | 11.82 | 79.41 | 94.70 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnetv1d152_b32x8_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1d152_b32x8_imagenet_20210531-278cf22a.pth) | [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1d152_b32x8_imagenet_20210531-278cf22a.log.json) | diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/metafile.yml b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/metafile.yml new file mode 100644 index 0000000000..8353014dbd --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/metafile.yml @@ -0,0 +1,217 @@ +Collections: + - Name: ResNet + Metadata: + Training Data: ImageNet-1k + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Epochs: 100 + Batch Size: 256 + Architecture: + - ResNet + Paper: + URL: https://openaccess.thecvf.com/content_cvpr_2016/html/He_Deep_Residual_Learning_CVPR_2016_paper.html + Title: "Deep Residual Learning for Image Recognition" + README: configs/resnet/README.md + Code: + URL: https://github.com/open-mmlab/mmclassification/blob/v0.15.0/mmcls/models/backbones/resnet.py#L383 + Version: v0.15.0 + - Name: ResNet-CIFAR + Metadata: + Training Data: CIFAR-10 + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x 1080 GPUs + Epochs: 200 + Batch Size: 128 + Architecture: + - ResNet + Paper: + URL: https://openaccess.thecvf.com/content_cvpr_2016/html/He_Deep_Residual_Learning_CVPR_2016_paper.html + Title: "Deep Residual Learning for Image Recognition" + README: configs/resnet/README.md + Code: + URL: https://github.com/open-mmlab/mmclassification/blob/v0.15.0/mmcls/models/backbones/resnet_cifar.py#L10 + Version: v0.15.0 + +Models: + - Name: resnet18_b16x8_cifar10 + Metadata: + FLOPs: 560000000 + Parameters: 11170000 + In Collection: ResNet-CIFAR + Results: + - Dataset: CIFAR-10 + Metrics: + Top 1 Accuracy: 94.82 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnet18_b16x8_cifar10_20210528-bd6371c8.pth + Config: configs/resnet/resnet18_b16x8_cifar10.py + - Name: resnet34_b16x8_cifar10 + Metadata: + FLOPs: 1160000000 + Parameters: 21280000 + In Collection: ResNet-CIFAR + Results: + - Dataset: CIFAR-10 + Metrics: + Top 1 Accuracy: 95.34 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnet34_b16x8_cifar10_20210528-a8aa36a6.pth + Config: configs/resnet/resnet34_b16x8_cifar10.py + - Name: resnet50_b16x8_cifar10 + Metadata: + FLOPs: 1310000000 + Parameters: 23520000 + In Collection: ResNet-CIFAR + Results: + - Dataset: CIFAR-10 + Metrics: + Top 1 Accuracy: 95.55 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_b16x8_cifar10_20210528-f54bfad9.pth + Config: configs/resnet/resnet50_b16x8_cifar10.py + - Name: resnet101_b16x8_cifar10 + Metadata: + FLOPs: 2520000000 + Parameters: 42510000 + In Collection: ResNet-CIFAR + Results: + - Dataset: CIFAR-10 + Metrics: + Top 1 Accuracy: 95.58 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnet101_b16x8_cifar10_20210528-2d29e936.pth + Config: configs/resnet/resnet101_b16x8_cifar10.py + - Name: resnet152_b16x8_cifar10 + Metadata: + FLOPs: 3740000000 + Parameters: 58160000 + In Collection: ResNet-CIFAR + Results: + - Dataset: CIFAR-10 + Metrics: + Top 1 Accuracy: 95.76 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnet152_b16x8_cifar10_20210528-3e8e9178.pth + Config: configs/resnet/resnet152_b16x8_cifar10.py + - Name: resnet50_b16x8_cifar100 + Metadata: + FLOPs: 1310000000 + Parameters: 23710000 + Training Data: CIFAR-100 + In Collection: ResNet-CIFAR + Results: + - Dataset: CIFAR-100 + Metrics: + Top 1 Accuracy: 79.90 + Top 5 Accuracy: 95.19 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_b16x8_cifar100_20210528-67b58a1b.pth + Config: configs/resnet/resnet50_b16x8_cifar100.py + - Name: resnet18_b32x8_imagenet + Metadata: + FLOPs: 1820000000 + Parameters: 11690000 + In Collection: ResNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 69.90 + Top 5 Accuracy: 89.43 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnet18_8xb32_in1k_20210831-fbbb1da6.pth + Config: configs/resnet/resnet18_b32x8_imagenet.py + - Name: resnet34_b32x8_imagenet + Metadata: + FLOPs: 3680000000 + Parameters: 2180000 + In Collection: ResNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 73.62 + Top 5 Accuracy: 91.59 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnet34_8xb32_in1k_20210831-f257d4e6.pth + Config: configs/resnet/resnet34_b32x8_imagenet.py + - Name: resnet50_b32x8_imagenet + Metadata: + FLOPs: 4120000000 + Parameters: 25560000 + In Collection: ResNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 76.55 + Top 5 Accuracy: 93.06 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb32_in1k_20210831-ea4938fc.pth + Config: configs/resnet/resnet50_b32x8_imagenet.py + - Name: resnet101_b32x8_imagenet + Metadata: + FLOPs: 7850000000 + Parameters: 44550000 + In Collection: ResNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 77.97 + Top 5 Accuracy: 94.06 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnet101_8xb32_in1k_20210831-539c63f8.pth + Config: configs/resnet/resnet101_b32x8_imagenet.py + - Name: resnet152_b32x8_imagenet + Metadata: + FLOPs: 11580000000 + Parameters: 60190000 + In Collection: ResNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 78.48 + Top 5 Accuracy: 94.13 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnet152_8xb32_in1k_20210901-4d7582fa.pth + Config: configs/resnet/resnet152_b32x8_imagenet.py + - Name: resnetv1d50_b32x8_imagenet + Metadata: + FLOPs: 4360000000 + Parameters: 25580000 + In Collection: ResNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 77.54 + Top 5 Accuracy: 93.57 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1d50_b32x8_imagenet_20210531-db14775a.pth + Config: configs/resnet/resnetv1d50_b32x8_imagenet.py + - Name: resnetv1d101_b32x8_imagenet + Metadata: + FLOPs: 8090000000 + Parameters: 44570000 + In Collection: ResNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 78.93 + Top 5 Accuracy: 94.48 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1d101_b32x8_imagenet_20210531-6e13bcd3.pth + Config: configs/resnet/resnetv1d101_b32x8_imagenet.py + - Name: resnetv1d152_b32x8_imagenet + Metadata: + FLOPs: 11820000000 + Parameters: 60210000 + In Collection: ResNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 79.41 + Top 5 Accuracy: 94.70 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1d152_b32x8_imagenet_20210531-278cf22a.pth + Config: configs/resnet/resnetv1d152_b32x8_imagenet.py diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet101_b16x8_cifar10.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet101_b16x8_cifar10.py new file mode 100644 index 0000000000..166a1740b0 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet101_b16x8_cifar10.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/resnet101_cifar.py', + '../_base_/datasets/cifar10_bs16.py', + '../_base_/schedules/cifar10_bs128.py', '../_base_/default_runtime.py' +] diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet101_b32x8_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet101_b32x8_imagenet.py new file mode 100644 index 0000000000..388d2cd918 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet101_b32x8_imagenet.py @@ -0,0 +1,4 @@ +_base_ = [ + '../_base_/models/resnet101.py', '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet152_b16x8_cifar10.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet152_b16x8_cifar10.py new file mode 100644 index 0000000000..3f307b6aa8 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet152_b16x8_cifar10.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/resnet152_cifar.py', + '../_base_/datasets/cifar10_bs16.py', + '../_base_/schedules/cifar10_bs128.py', '../_base_/default_runtime.py' +] diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet152_b32x8_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet152_b32x8_imagenet.py new file mode 100644 index 0000000000..cc9dc2cee4 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet152_b32x8_imagenet.py @@ -0,0 +1,4 @@ +_base_ = [ + '../_base_/models/resnet152.py', '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet18_b16x8_cifar10.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet18_b16x8_cifar10.py new file mode 100644 index 0000000000..c7afa397b7 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet18_b16x8_cifar10.py @@ -0,0 +1,4 @@ +_base_ = [ + '../_base_/models/resnet18_cifar.py', '../_base_/datasets/cifar10_bs16.py', + '../_base_/schedules/cifar10_bs128.py', '../_base_/default_runtime.py' +] diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet18_b32x8_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet18_b32x8_imagenet.py new file mode 100644 index 0000000000..ac452ff756 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet18_b32x8_imagenet.py @@ -0,0 +1,4 @@ +_base_ = [ + '../_base_/models/resnet18.py', '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet34_b16x8_cifar10.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet34_b16x8_cifar10.py new file mode 100644 index 0000000000..7f5cd517d5 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet34_b16x8_cifar10.py @@ -0,0 +1,4 @@ +_base_ = [ + '../_base_/models/resnet34_cifar.py', '../_base_/datasets/cifar10_bs16.py', + '../_base_/schedules/cifar10_bs128.py', '../_base_/default_runtime.py' +] diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet34_b32x8_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet34_b32x8_imagenet.py new file mode 100644 index 0000000000..7749261c80 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet34_b32x8_imagenet.py @@ -0,0 +1,4 @@ +_base_ = [ + '../_base_/models/resnet34.py', '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_8xb128_coslr-90e_in21k.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_8xb128_coslr-90e_in21k.py new file mode 100644 index 0000000000..8cc79211e9 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_8xb128_coslr-90e_in21k.py @@ -0,0 +1,11 @@ +_base_ = [ + '../_base_/models/resnet50.py', '../_base_/datasets/imagenet21k_bs128.py', + '../_base_/schedules/imagenet_bs1024_coslr.py', + '../_base_/default_runtime.py' +] + +# model settings +model = dict(head=dict(num_classes=21843)) + +# runtime settings +runner = dict(type='EpochBasedRunner', max_epochs=90) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b16x8_cifar10.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b16x8_cifar10.py new file mode 100644 index 0000000000..669e5de27e --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b16x8_cifar10.py @@ -0,0 +1,4 @@ +_base_ = [ + '../_base_/models/resnet50_cifar.py', '../_base_/datasets/cifar10_bs16.py', + '../_base_/schedules/cifar10_bs128.py', '../_base_/default_runtime.py' +] diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b16x8_cifar100.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b16x8_cifar100.py new file mode 100644 index 0000000000..39bd90f794 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b16x8_cifar100.py @@ -0,0 +1,10 @@ +_base_ = [ + '../_base_/models/resnet50_cifar.py', + '../_base_/datasets/cifar100_bs16.py', + '../_base_/schedules/cifar10_bs128.py', '../_base_/default_runtime.py' +] + +model = dict(head=dict(num_classes=100)) + +optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0005) +lr_config = dict(policy='step', step=[60, 120, 160], gamma=0.2) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b16x8_cifar10_mixup.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b16x8_cifar10_mixup.py new file mode 100644 index 0000000000..2420ebfeb0 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b16x8_cifar10_mixup.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/resnet50_cifar_mixup.py', + '../_base_/datasets/cifar10_bs16.py', + '../_base_/schedules/cifar10_bs128.py', '../_base_/default_runtime.py' +] diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b32x8_coslr_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b32x8_coslr_imagenet.py new file mode 100644 index 0000000000..938a114b79 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b32x8_coslr_imagenet.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/resnet50.py', '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256_coslr.py', + '../_base_/default_runtime.py' +] diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b32x8_cutmix_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b32x8_cutmix_imagenet.py new file mode 100644 index 0000000000..2f8d0ca9f3 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b32x8_cutmix_imagenet.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/resnet50_cutmix.py', + '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b32x8_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b32x8_imagenet.py new file mode 100644 index 0000000000..c32f333b67 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b32x8_imagenet.py @@ -0,0 +1,4 @@ +_base_ = [ + '../_base_/models/resnet50.py', '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b32x8_label_smooth_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b32x8_label_smooth_imagenet.py new file mode 100644 index 0000000000..1c1aa5a2c4 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b32x8_label_smooth_imagenet.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/resnet50_label_smooth.py', + '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b32x8_mixup_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b32x8_mixup_imagenet.py new file mode 100644 index 0000000000..2a153d0e18 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b32x8_mixup_imagenet.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/resnet50_mixup.py', + '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b64x32_warmup_coslr_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b64x32_warmup_coslr_imagenet.py new file mode 100644 index 0000000000..c26245ef53 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b64x32_warmup_coslr_imagenet.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/resnet50.py', '../_base_/datasets/imagenet_bs64.py', + '../_base_/schedules/imagenet_bs2048_coslr.py', + '../_base_/default_runtime.py' +] diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b64x32_warmup_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b64x32_warmup_imagenet.py new file mode 100644 index 0000000000..34d5288b9d --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b64x32_warmup_imagenet.py @@ -0,0 +1,4 @@ +_base_ = [ + '../_base_/models/resnet50.py', '../_base_/datasets/imagenet_bs64.py', + '../_base_/schedules/imagenet_bs2048.py', '../_base_/default_runtime.py' +] diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b64x32_warmup_label_smooth_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b64x32_warmup_label_smooth_imagenet.py new file mode 100644 index 0000000000..23c9defdde --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnet50_b64x32_warmup_label_smooth_imagenet.py @@ -0,0 +1,12 @@ +_base_ = ['./resnet50_b64x32_warmup_imagenet.py'] +model = dict( + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict( + type='LabelSmoothLoss', + loss_weight=1.0, + label_smooth_val=0.1, + num_classes=1000), + )) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnetv1d101_b32x8_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnetv1d101_b32x8_imagenet.py new file mode 100644 index 0000000000..b16ca863db --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnetv1d101_b32x8_imagenet.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/resnetv1d101.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnetv1d152_b32x8_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnetv1d152_b32x8_imagenet.py new file mode 100644 index 0000000000..76926ddbb6 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnetv1d152_b32x8_imagenet.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/resnetv1d152.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnetv1d50_b32x8_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnetv1d50_b32x8_imagenet.py new file mode 100644 index 0000000000..208bde470a --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnet/resnetv1d50_b32x8_imagenet.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/resnetv1d50.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnext/README.md b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnext/README.md new file mode 100644 index 0000000000..8a4786aac9 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnext/README.md @@ -0,0 +1,27 @@ +# Aggregated Residual Transformations for Deep Neural Networks + + +## Introduction + + + +```latex +@inproceedings{xie2017aggregated, + title={Aggregated residual transformations for deep neural networks}, + author={Xie, Saining and Girshick, Ross and Doll{\'a}r, Piotr and Tu, Zhuowen and He, Kaiming}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={1492--1500}, + year={2017} +} +``` + +## Results and models + +### ImageNet + +| Model | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download | +|:---------------------:|:---------:|:--------:|:---------:|:---------:|:---------:|:--------:| +| ResNeXt-32x4d-50 | 25.03 | 4.27 | 77.90 | 93.66 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnext/resnext50_32x4d_b32x8_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnext/resnext50_32x4d_b32x8_imagenet_20210429-56066e27.pth) | [log](https://download.openmmlab.com/mmclassification/v0/resnext/resnext50_32x4d_b32x8_imagenet_20210429-56066e27.log.json) | +| ResNeXt-32x4d-101 | 44.18 | 8.03 | 78.61 | 94.17 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnext/resnext101_32x4d_b32x8_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnext/resnext101_32x4d_b32x8_imagenet_20210506-e0fa3dd5.pth) | [log](https://download.openmmlab.com/mmclassification/v0/resnext/resnext101_32x4d_b32x8_imagenet_20210506-e0fa3dd5.log.json) | +| ResNeXt-32x8d-101 | 88.79 | 16.5 | 79.27 | 94.58 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnext/resnext101_32x8d_b32x8_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnext/resnext101_32x8d_b32x8_imagenet_20210506-23a247d5.pth) | [log](https://download.openmmlab.com/mmclassification/v0/resnext/resnext101_32x8d_b32x8_imagenet_20210506-23a247d5.log.json) | +| ResNeXt-32x4d-152 | 59.95 | 11.8 | 78.88 | 94.33 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnext/resnext152_32x4d_b32x8_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnext/resnext152_32x4d_b32x8_imagenet_20210524-927787be.pth) | [log](https://download.openmmlab.com/mmclassification/v0/resnext/resnext152_32x4d_b32x8_imagenet_20210524-927787be.log.json) | diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnext/metafile.yml b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnext/metafile.yml new file mode 100644 index 0000000000..841bad4ca1 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnext/metafile.yml @@ -0,0 +1,73 @@ +Collections: + - Name: ResNeXt + Metadata: + Training Data: ImageNet-1k + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Epochs: 100 + Batch Size: 256 + Architecture: + - ResNeXt + Paper: + URL: https://openaccess.thecvf.com/content_cvpr_2017/html/Xie_Aggregated_Residual_Transformations_CVPR_2017_paper.html + Title: "Aggregated Residual Transformations for Deep Neural Networks" + README: configs/resnext/README.md + Code: + URL: https://github.com/open-mmlab/mmclassification/blob/v0.15.0/mmcls/models/backbones/resnext.py#L90 + Version: v0.15.0 + +Models: + - Name: resnext50_32x4d_b32x8_imagenet + Metadata: + FLOPs: 4270000000 + Parameters: 25030000 + In Collection: ResNeXt + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 77.90 + Top 5 Accuracy: 93.66 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/resnext/resnext50_32x4d_b32x8_imagenet_20210429-56066e27.pth + Config: configs/resnext/resnext50_32x4d_b32x8_imagenet.py + - Name: resnext101_32x4d_b32x8_imagenet + Metadata: + FLOPs: 8030000000 + Parameters: 44180000 + In Collection: ResNeXt + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 78.61 + Top 5 Accuracy: 94.17 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/resnext/resnext101_32x4d_b32x8_imagenet_20210506-e0fa3dd5.pth + Config: configs/resnext/resnext101_32x4d_b32x8_imagenet.py + - Name: resnext101_32x8d_b32x8_imagenet + Metadata: + FLOPs: 16500000000 + Parameters: 88790000 + In Collection: ResNeXt + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 79.27 + Top 5 Accuracy: 94.58 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/resnext/resnext101_32x8d_b32x8_imagenet_20210506-23a247d5.pth + Config: configs/resnext/resnext101_32x8d_b32x8_imagenet.py + - Name: resnext152_32x4d_b32x8_imagenet + Metadata: + FLOPs: 11800000000 + Parameters: 59950000 + In Collection: ResNeXt + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 78.88 + Top 5 Accuracy: 94.33 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/resnext/resnext152_32x4d_b32x8_imagenet_20210524-927787be.pth + Config: configs/resnext/resnext152_32x4d_b32x8_imagenet.py diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnext/resnext101_32x4d_b32x8_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnext/resnext101_32x4d_b32x8_imagenet.py new file mode 100644 index 0000000000..970aa60f35 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnext/resnext101_32x4d_b32x8_imagenet.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/resnext101_32x4d.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnext/resnext101_32x8d_b32x8_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnext/resnext101_32x8d_b32x8_imagenet.py new file mode 100644 index 0000000000..315d05fd57 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnext/resnext101_32x8d_b32x8_imagenet.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/resnext101_32x8d.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnext/resnext152_32x4d_b32x8_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnext/resnext152_32x4d_b32x8_imagenet.py new file mode 100644 index 0000000000..9c137313cb --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnext/resnext152_32x4d_b32x8_imagenet.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/resnext152_32x4d.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnext/resnext50_32x4d_b32x8_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnext/resnext50_32x4d_b32x8_imagenet.py new file mode 100644 index 0000000000..bd9c9fcf4e --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/resnext/resnext50_32x4d_b32x8_imagenet.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/resnext50_32x4d.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/seresnet/README.md b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/seresnet/README.md new file mode 100644 index 0000000000..1241e3fc6e --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/seresnet/README.md @@ -0,0 +1,25 @@ +# Squeeze-and-Excitation Networks + + +## Introduction + + + +```latex +@inproceedings{hu2018squeeze, + title={Squeeze-and-excitation networks}, + author={Hu, Jie and Shen, Li and Sun, Gang}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={7132--7141}, + year={2018} +} +``` + +## Results and models + +### ImageNet + +| Model | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download | +|:---------------------:|:---------:|:--------:|:---------:|:---------:|:---------:|:--------:| +| SE-ResNet-50 | 28.09 | 4.13 | 77.74 | 93.84 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/seresnet/seresnet50_b32x8_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/se-resnet/se-resnet50_batch256_imagenet_20200804-ae206104.pth) | [log](https://download.openmmlab.com/mmclassification/v0/se-resnet/se-resnet50_batch256_imagenet_20200708-657b3c36.log.json) | +| SE-ResNet-101 | 49.33 | 7.86 | 78.26 | 94.07 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/seresnet/seresnet101_b32x8_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/se-resnet/se-resnet101_batch256_imagenet_20200804-ba5b51d4.pth) | [log](https://download.openmmlab.com/mmclassification/v0/se-resnet/se-resnet101_batch256_imagenet_20200708-038a4d04.log.json) | diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/seresnet/metafile.yml b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/seresnet/metafile.yml new file mode 100644 index 0000000000..419425dc79 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/seresnet/metafile.yml @@ -0,0 +1,47 @@ +Collections: + - Name: SEResNet + Metadata: + Training Data: ImageNet-1k + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Epochs: 140 + Batch Size: 256 + Architecture: + - ResNet + Paper: + URL: https://openaccess.thecvf.com/content_cvpr_2018/html/Hu_Squeeze-and-Excitation_Networks_CVPR_2018_paper.html + Title: "Squeeze-and-Excitation Networks" + README: configs/seresnet/README.md + Code: + URL: https://github.com/open-mmlab/mmclassification/blob/v0.15.0/mmcls/models/backbones/seresnet.py#L58 + Version: v0.15.0 + +Models: + - Name: seresnet50_b32x8_imagenet + Metadata: + FLOPs: 4130000000 + Parameters: 28090000 + In Collection: SEResNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 77.74 + Top 5 Accuracy: 93.84 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/se-resnet/se-resnet50_batch256_imagenet_20200804-ae206104.pth + Config: configs/seresnet/seresnet50_b32x8_imagenet.py + - Name: seresnet101_b32x8_imagenet + Metadata: + FLOPs: 7860000000 + Parameters: 49330000 + In Collection: SEResNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 78.26 + Top 5 Accuracy: 94.07 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/se-resnet/se-resnet101_batch256_imagenet_20200804-ba5b51d4.pth + Config: configs/seresnet/seresnet101_b32x8_imagenet.py diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/seresnet/seresnet101_b32x8_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/seresnet/seresnet101_b32x8_imagenet.py new file mode 100644 index 0000000000..8be39e7a32 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/seresnet/seresnet101_b32x8_imagenet.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/seresnet101.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/seresnet/seresnet50_b32x8_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/seresnet/seresnet50_b32x8_imagenet.py new file mode 100644 index 0000000000..19082bd0dd --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/seresnet/seresnet50_b32x8_imagenet.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/seresnet50.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256_140e.py', + '../_base_/default_runtime.py' +] diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/seresnext/README.md b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/seresnext/README.md new file mode 100644 index 0000000000..393cc5183d --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/seresnext/README.md @@ -0,0 +1,16 @@ +# Squeeze-and-Excitation Networks + + +## Introduction + + + +```latex +@inproceedings{hu2018squeeze, + title={Squeeze-and-excitation networks}, + author={Hu, Jie and Shen, Li and Sun, Gang}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={7132--7141}, + year={2018} +} +``` diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/seresnext/seresnext101_32x4d_b32x8_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/seresnext/seresnext101_32x4d_b32x8_imagenet.py new file mode 100644 index 0000000000..01778305ca --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/seresnext/seresnext101_32x4d_b32x8_imagenet.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/seresnext101_32x4d.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/seresnext/seresnext50_32x4d_b32x8_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/seresnext/seresnext50_32x4d_b32x8_imagenet.py new file mode 100644 index 0000000000..4d593e45b8 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/seresnext/seresnext50_32x4d_b32x8_imagenet.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/seresnext50_32x4d.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/shufflenet_v1/README.md b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/shufflenet_v1/README.md new file mode 100644 index 0000000000..b18934565b --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/shufflenet_v1/README.md @@ -0,0 +1,24 @@ +# ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices + + +## Introduction + + + +```latex +@inproceedings{zhang2018shufflenet, + title={Shufflenet: An extremely efficient convolutional neural network for mobile devices}, + author={Zhang, Xiangyu and Zhou, Xinyu and Lin, Mengxiao and Sun, Jian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={6848--6856}, + year={2018} +} +``` + +## Results and models + +### ImageNet + +| Model | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download | +|:---------------------:|:---------:|:--------:|:---------:|:---------:|:---------:|:--------:| +| ShuffleNetV1 1.0x (group=3) | 1.87 | 0.146 | 68.13 | 87.81 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/shufflenet_v1/shufflenet_v1_1x_b64x16_linearlr_bn_nowd_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/shufflenet_v1/shufflenet_v1_batch1024_imagenet_20200804-5d6cec73.pth) | [log](https://download.openmmlab.com/mmclassification/v0/shufflenet_v1/shufflenet_v1_batch1024_imagenet_20200804-5d6cec73.log.json) | diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/shufflenet_v1/metafile.yml b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/shufflenet_v1/metafile.yml new file mode 100644 index 0000000000..04e7e46484 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/shufflenet_v1/metafile.yml @@ -0,0 +1,35 @@ +Collections: + - Name: Shufflenet V1 + Metadata: + Training Data: ImageNet-1k + Training Techniques: + - SGD with Momentum + - Weight Decay + - No BN decay + Training Resources: 8x 1080 GPUs + Epochs: 300 + Batch Size: 1024 + Architecture: + - Shufflenet V1 + Paper: + URL: https://openaccess.thecvf.com/content_cvpr_2018/html/Zhang_ShuffleNet_An_Extremely_CVPR_2018_paper.html + Title: "ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices" + README: configs/shufflenet_v1/README.md + Code: + URL: https://github.com/open-mmlab/mmclassification/blob/v0.15.0/mmcls/models/backbones/shufflenet_v1.py#L152 + Version: v0.15.0 + +Models: + - Name: shufflenet_v1_1x_b64x16_linearlr_bn_nowd_imagenet + Metadata: + FLOPs: 146000000 + Parameters: 1870000 + In Collection: Shufflenet V1 + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 68.13 + Top 5 Accuracy: 87.81 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/shufflenet_v1/shufflenet_v1_batch1024_imagenet_20200804-5d6cec73.pth + Config: configs/shufflenet_v1/shufflenet_v1_1x_b64x16_linearlr_bn_nowd_imagenet.py diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/shufflenet_v1/shufflenet_v1_1x_b64x16_linearlr_bn_nowd_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/shufflenet_v1/shufflenet_v1_1x_b64x16_linearlr_bn_nowd_imagenet.py new file mode 100644 index 0000000000..58e45f1ba4 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/shufflenet_v1/shufflenet_v1_1x_b64x16_linearlr_bn_nowd_imagenet.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/shufflenet_v1_1x.py', + '../_base_/datasets/imagenet_bs64_pil_resize.py', + '../_base_/schedules/imagenet_bs1024_linearlr_bn_nowd.py', + '../_base_/default_runtime.py' +] diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/shufflenet_v2/README.md b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/shufflenet_v2/README.md new file mode 100644 index 0000000000..3502425819 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/shufflenet_v2/README.md @@ -0,0 +1,24 @@ +# Shufflenet v2: Practical guidelines for efficient cnn architecture design + + +## Introduction + + + +```latex +@inproceedings{ma2018shufflenet, + title={Shufflenet v2: Practical guidelines for efficient cnn architecture design}, + author={Ma, Ningning and Zhang, Xiangyu and Zheng, Hai-Tao and Sun, Jian}, + booktitle={Proceedings of the European conference on computer vision (ECCV)}, + pages={116--131}, + year={2018} +} +``` + +## Results and models + +### ImageNet + +| Model | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download | +|:---------------------:|:---------:|:--------:|:---------:|:---------:|:---------:|:--------:| +| ShuffleNetV2 1.0x | 2.28 | 0.149 | 69.55 | 88.92 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/shufflenet_v2/shufflenet_v2_1x_b64x16_linearlr_bn_nowd_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/shufflenet_v2/shufflenet_v2_batch1024_imagenet_20200812-5bf4721e.pth) | [log](https://download.openmmlab.com/mmclassification/v0/shufflenet_v2/shufflenet_v2_batch1024_imagenet_20200804-8860eec9.log.json) | diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/shufflenet_v2/metafile.yml b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/shufflenet_v2/metafile.yml new file mode 100644 index 0000000000..a1aa95daaa --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/shufflenet_v2/metafile.yml @@ -0,0 +1,35 @@ +Collections: + - Name: Shufflenet V2 + Metadata: + Training Data: ImageNet-1k + Training Techniques: + - SGD with Momentum + - Weight Decay + - No BN decay + Training Resources: 8x 1080 GPUs + Epochs: 300 + Batch Size: 1024 + Architecture: + - Shufflenet V2 + Paper: + URL: https://openaccess.thecvf.com/content_ECCV_2018/papers/Ningning_Light-weight_CNN_Architecture_ECCV_2018_paper.pdf + Title: "ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design" + README: configs/shufflenet_v2/README.md + Code: + URL: https://github.com/open-mmlab/mmclassification/blob/v0.15.0/mmcls/models/backbones/shufflenet_v2.py#L134 + Version: v0.15.0 + +Models: + - Name: shufflenet_v2_1x_b64x16_linearlr_bn_nowd_imagenet + Metadata: + FLOPs: 149000000 + Parameters: 2280000 + In Collection: Shufflenet V2 + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 69.55 + Top 5 Accuracy: 88.92 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/shufflenet_v2/shufflenet_v2_batch1024_imagenet_20200812-5bf4721e.pth + Config: configs/shufflenet_v2/shufflenet_v2_1x_b64x16_linearlr_bn_nowd_imagenet.py diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/shufflenet_v2/shufflenet_v2_1x_b64x16_linearlr_bn_nowd_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/shufflenet_v2/shufflenet_v2_1x_b64x16_linearlr_bn_nowd_imagenet.py new file mode 100644 index 0000000000..a106ab8686 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/shufflenet_v2/shufflenet_v2_1x_b64x16_linearlr_bn_nowd_imagenet.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/shufflenet_v2_1x.py', + '../_base_/datasets/imagenet_bs64_pil_resize.py', + '../_base_/schedules/imagenet_bs1024_linearlr_bn_nowd.py', + '../_base_/default_runtime.py' +] diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/swin_transformer/README.md b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/swin_transformer/README.md new file mode 100644 index 0000000000..b1fade80dd --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/swin_transformer/README.md @@ -0,0 +1,42 @@ +# Swin Transformer: Hierarchical Vision Transformer using Shifted Windows + + +## Introduction + +[ALGORITHM] + +```latex +@article{liu2021Swin, + title={Swin Transformer: Hierarchical Vision Transformer using Shifted Windows}, + author={Liu, Ze and Lin, Yutong and Cao, Yue and Hu, Han and Wei, Yixuan and Zhang, Zheng and Lin, Stephen and Guo, Baining}, + journal={arXiv preprint arXiv:2103.14030}, + year={2021} +} +``` + +## Pretrain model + +The pre-trained modles are converted from [model zoo of Swin Transformer](https://github.com/microsoft/Swin-Transformer#main-results-on-imagenet-with-pretrained-models). + +### ImageNet 1k + +| Model | Pretrain | resolution | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Download | +|:---------:|:------------:|:-----------:|:---------:|:---------:|:---------:|:---------:|:--------:| +| Swin-T | ImageNet-1k | 224x224 | 28.29 | 4.36 | 81.18 | 95.52 | [model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_tiny_patch4_window7_224-160bb0a5.pth)| +| Swin-S | ImageNet-1k | 224x224 | 49.61 | 8.52 | 83.21 | 96.25 | [model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_small_patch4_window7_224-cc7a01c9.pth)| +| Swin-B | ImageNet-1k | 224x224 | 87.77 | 15.14 | 83.42 | 96.44 | [model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_base_patch4_window7_224-4670dd19.pth)| +| Swin-B | ImageNet-1k | 384x384 | 87.90 | 44.49 | 84.49 | 96.95 | [model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_base_patch4_window12_384-02c598a4.pth)| +| Swin-B | ImageNet-22k | 224x224 | 87.77 | 15.14 | 85.16 | 97.50 | [model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_base_patch4_window7_224_22kto1k-f967f799.pth)| +| Swin-B | ImageNet-22k | 384x384 | 87.90 | 44.49 | 86.44 | 98.05 | [model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_base_patch4_window12_384_22kto1k-d59b0d1d.pth)| +| Swin-L | ImageNet-22k | 224x224 | 196.53 | 34.04 | 86.24 | 97.88 | [model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_large_patch4_window7_224_22kto1k-5f0996db.pth)| +| Swin-L | ImageNet-22k | 384x384 | 196.74 | 100.04 | 87.25 | 98.25 | [model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_large_patch4_window12_384_22kto1k-0a40944b.pth)| + + +## Results and models + +### ImageNet +| Model | Pretrain | resolution | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download | +|:---------:|:------------:|:-----------:|:---------:|:---------:|:---------:|:---------:|:----------:|:--------:| +| Swin-T | ImageNet-1k | 224x224 | 28.29 | 4.36 | 81.18 | 95.61 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/swin_transformer/swin_tiny_224_b16x64_300e_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin_tiny_224_b16x64_300e_imagenet_20210616_090925-66df6be6.pth) | [log](https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin_tiny_224_b16x64_300e_imagenet_20210616_090925.log.json)| +| Swin-S | ImageNet-1k | 224x224 | 49.61 | 8.52 | 83.02 | 96.29 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/swin_transformer/swin_small_224_b16x64_300e_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin_small_224_b16x64_300e_imagenet_20210615_110219-7f9d988b.pth) | [log](https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin_small_224_b16x64_300e_imagenet_20210615_110219.log.json)| +| Swin-B | ImageNet-1k | 224x224 | 87.77 | 15.14 | 83.36 | 96.44 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/swin_transformer/swin_base_224_b16x64_300e_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin_base_224_b16x64_300e_imagenet_20210616_190742-93230b0d.pth) | [log](https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin_base_224_b16x64_300e_imagenet_20210616_190742.log.json)| diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/swin_transformer/metafile.yml b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/swin_transformer/metafile.yml new file mode 100644 index 0000000000..46ea185da2 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/swin_transformer/metafile.yml @@ -0,0 +1,188 @@ +Collections: + - Name: Swin-Transformer + Metadata: + Training Data: ImageNet-1k + Training Techniques: + - AdamW + - Weight Decay + Training Resources: 16x V100 GPUs + Epochs: 300 + Batch Size: 1024 + Architecture: + - Shift Window Multihead Self Attention + Paper: + URL: https://arxiv.org/pdf/2103.14030.pdf + Title: "Swin Transformer: Hierarchical Vision Transformer using Shifted Windows" + README: configs/swin_transformer/README.md + Code: + URL: https://github.com/open-mmlab/mmclassification/blob/v0.15.0/mmcls/models/backbones/swin_transformer.py#L176 + Version: v0.15.0 + +Models: + - Name: swin-tiny_64xb16_in1k + Metadata: + FLOPs: 4360000000 + Parameters: 28290000 + In Collection: Swin-Transformer + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 81.18 + Top 5 Accuracy: 95.61 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin_tiny_224_b16x64_300e_imagenet_20210616_090925-66df6be6.pth + Config: configs/swin_transformer/swin_tiny_224_b16x64_300e_imagenet.py + - Name: swin-small_64xb16_in1k + Metadata: + FLOPs: 8520000000 + Parameters: 49610000 + In Collection: Swin-Transformer + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.02 + Top 5 Accuracy: 96.29 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin_small_224_b16x64_300e_imagenet_20210615_110219-7f9d988b.pth + Config: configs/swin_transformer/swin_small_224_b16x64_300e_imagenet.py + - Name: swin-base_64xb16_in1k + Metadata: + FLOPs: 15140000000 + Parameters: 87770000 + In Collection: Swin-Transformer + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.36 + Top 5 Accuracy: 96.44 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin_base_224_b16x64_300e_imagenet_20210616_190742-93230b0d.pth + Config: configs/swin_transformer/swin_base_224_b16x64_300e_imagenet.py + - Name: swin-tiny_3rdparty_in1k + Metadata: + FLOPs: 4360000000 + Parameters: 28290000 + In Collection: Swin-Transformer + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 81.18 + Top 5 Accuracy: 95.52 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_tiny_patch4_window7_224-160bb0a5.pth + Converted From: + Weights: https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth + Code: https://github.com/microsoft/Swin-Transformer/blob/777f6c66604bb5579086c4447efe3620344d95a9/models/swin_transformer.py#L458 + Config: configs/swin_transformer/swin_tiny_224_b16x64_300e_imagenet.py + - Name: swin-small_3rdparty_in1k + Metadata: + FLOPs: 8520000000 + Parameters: 49610000 + In Collection: Swin-Transformer + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.21 + Top 5 Accuracy: 96.25 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_small_patch4_window7_224-cc7a01c9.pth + Converted From: + Weights: https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth + Code: https://github.com/microsoft/Swin-Transformer/blob/777f6c66604bb5579086c4447efe3620344d95a9/models/swin_transformer.py#L458 + Config: configs/swin_transformer/swin_small_224_b16x64_300e_imagenet.py + - Name: swin-base_3rdparty_in1k + Metadata: + FLOPs: 15140000000 + Parameters: 87770000 + In Collection: Swin-Transformer + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.42 + Top 5 Accuracy: 96.44 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_base_patch4_window7_224-4670dd19.pth + Converted From: + Weights: https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224.pth + Code: https://github.com/microsoft/Swin-Transformer/blob/777f6c66604bb5579086c4447efe3620344d95a9/models/swin_transformer.py#L458 + Config: configs/swin_transformer/swin_base_224_b16x64_300e_imagenet.py + - Name: swin-base_3rdparty_in1k-384 + Metadata: + FLOPs: 44490000000 + Parameters: 87900000 + In Collection: Swin-Transformer + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 84.49 + Top 5 Accuracy: 96.95 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_base_patch4_window12_384-02c598a4.pth + Converted From: + Weights: https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384.pth + Code: https://github.com/microsoft/Swin-Transformer/blob/777f6c66604bb5579086c4447efe3620344d95a9/models/swin_transformer.py#L458 + Config: configs/swin_transformer/swin_base_384_evalonly_imagenet.py + - Name: swin-base_in21k-pre-3rdparty_in1k + Metadata: + FLOPs: 15140000000 + Parameters: 87770000 + In Collection: Swin-Transformer + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 85.16 + Top 5 Accuracy: 97.50 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_base_patch4_window7_224_22kto1k-f967f799.pth + Converted From: + Weights: https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224_22kto1k.pth + Code: https://github.com/microsoft/Swin-Transformer/blob/777f6c66604bb5579086c4447efe3620344d95a9/models/swin_transformer.py#L458 + Config: configs/swin_transformer/swin_base_224_b16x64_300e_imagenet.py + - Name: swin-base_in21k-pre-3rdparty_in1k-384 + Metadata: + FLOPs: 44490000000 + Parameters: 87900000 + In Collection: Swin-Transformer + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 86.44 + Top 5 Accuracy: 98.05 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_base_patch4_window12_384_22kto1k-d59b0d1d.pth + Converted From: + Weights: https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384_22kto1k.pth + Code: https://github.com/microsoft/Swin-Transformer/blob/777f6c66604bb5579086c4447efe3620344d95a9/models/swin_transformer.py#L458 + Config: configs/swin_transformer/swin_base_384_evalonly_imagenet.py + - Name: swin-large_in21k-pre-3rdparty_in1k + Metadata: + FLOPs: 34040000000 + Parameters: 196530000 + In Collection: Swin-Transformer + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 86.24 + Top 5 Accuracy: 97.88 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_large_patch4_window7_224_22kto1k-5f0996db.pth + Converted From: + Weights: https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window7_224_22kto1k.pth + Code: https://github.com/microsoft/Swin-Transformer/blob/777f6c66604bb5579086c4447efe3620344d95a9/models/swin_transformer.py#L458 + Config: configs/swin_transformer/swin_large_224_evalonly_imagenet.py + - Name: swin-large_in21k-pre-3rdparty_in1k-384 + Metadata: + FLOPs: 100040000000 + Parameters: 196740000 + In Collection: Swin-Transformer + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 87.25 + Top 5 Accuracy: 98.25 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_large_patch4_window12_384_22kto1k-0a40944b.pth + Converted From: + Weights: https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22kto1k.pth + Code: https://github.com/microsoft/Swin-Transformer/blob/777f6c66604bb5579086c4447efe3620344d95a9/models/swin_transformer.py#L458 + Config: configs/swin_transformer/swin_large_384_evalonly_imagenet.py diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/swin_transformer/swin_base_224_b16x64_300e_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/swin_transformer/swin_base_224_b16x64_300e_imagenet.py new file mode 100644 index 0000000000..2a4548af0b --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/swin_transformer/swin_base_224_b16x64_300e_imagenet.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/swin_transformer/base_224.py', + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py' +] diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/swin_transformer/swin_base_384_evalonly_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/swin_transformer/swin_base_384_evalonly_imagenet.py new file mode 100644 index 0000000000..711a0d6d21 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/swin_transformer/swin_base_384_evalonly_imagenet.py @@ -0,0 +1,7 @@ +# Only for evaluation +_base_ = [ + '../_base_/models/swin_transformer/base_384.py', + '../_base_/datasets/imagenet_bs64_swin_384.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py' +] diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/swin_transformer/swin_large_224_evalonly_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/swin_transformer/swin_large_224_evalonly_imagenet.py new file mode 100644 index 0000000000..4e875c59f3 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/swin_transformer/swin_large_224_evalonly_imagenet.py @@ -0,0 +1,7 @@ +# Only for evaluation +_base_ = [ + '../_base_/models/swin_transformer/large_224.py', + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py' +] diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/swin_transformer/swin_large_384_evalonly_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/swin_transformer/swin_large_384_evalonly_imagenet.py new file mode 100644 index 0000000000..a7f0ad2762 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/swin_transformer/swin_large_384_evalonly_imagenet.py @@ -0,0 +1,7 @@ +# Only for evaluation +_base_ = [ + '../_base_/models/swin_transformer/large_384.py', + '../_base_/datasets/imagenet_bs64_swin_384.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py' +] diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/swin_transformer/swin_small_224_b16x64_300e_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/swin_transformer/swin_small_224_b16x64_300e_imagenet.py new file mode 100644 index 0000000000..aa1fa21b05 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/swin_transformer/swin_small_224_b16x64_300e_imagenet.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/swin_transformer/small_224.py', + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py' +] diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/swin_transformer/swin_tiny_224_b16x64_300e_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/swin_transformer/swin_tiny_224_b16x64_300e_imagenet.py new file mode 100644 index 0000000000..e1ed022a1b --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/swin_transformer/swin_tiny_224_b16x64_300e_imagenet.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/swin_transformer/tiny_224.py', + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py' +] diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/t2t_vit/README.md b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/t2t_vit/README.md new file mode 100644 index 0000000000..64768463a4 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/t2t_vit/README.md @@ -0,0 +1,33 @@ +# Tokens-to-Token ViT: Training Vision Transformers from Scratch on ImageNet + + +## Introduction + + + +```latex +@article{yuan2021tokens, + title={Tokens-to-token vit: Training vision transformers from scratch on imagenet}, + author={Yuan, Li and Chen, Yunpeng and Wang, Tao and Yu, Weihao and Shi, Yujun and Tay, Francis EH and Feng, Jiashi and Yan, Shuicheng}, + journal={arXiv preprint arXiv:2101.11986}, + year={2021} +} +``` + +## Pretrain model + +The pre-trained modles are converted from [official repo](https://github.com/yitu-opensource/T2T-ViT/tree/main#2-t2t-vit-models). + +### ImageNet-1k + +| Model | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download | +|:--------------:|:---------:|:--------:|:---------:|:---------:|:------:|:--------:| +| T2T-ViT_t-14\* | 21.47 | 4.34 | 81.69 | 95.85 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/t2t_vit/t2t-vit-t-14_8xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/t2t-vit/t2t-vit-t-14_3rdparty_8xb64_in1k_20210928-b7c09b62.pth) | [log]()| +| T2T-ViT_t-19\* | 39.08 | 7.80 | 82.43 | 96.08 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/t2t_vit/t2t-vit-t-19_8xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/t2t-vit/t2t-vit-t-19_3rdparty_8xb64_in1k_20210928-7f1478d5.pth) | [log]()| +| T2T-ViT_t-24\* | 64.00 | 12.69 | 82.55 | 96.06 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/t2t_vit/t2t-vit-t-24_8xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/t2t-vit/t2t-vit-t-24_3rdparty_8xb64_in1k_20210928-fe95a61b.pth) | [log]()| + +*Models with \* are converted from other repos.* + +## Results and models + +Waiting for adding. diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/t2t_vit/metafile.yml b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/t2t_vit/metafile.yml new file mode 100644 index 0000000000..0abcfe0617 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/t2t_vit/metafile.yml @@ -0,0 +1,64 @@ +Collections: + - Name: Tokens-to-Token ViT + Metadata: + Training Data: ImageNet-1k + Architecture: + - Layer Normalization + - Scaled Dot-Product Attention + - Attention Dropout + - Dropout + - Tokens to Token + Paper: + URL: https://arxiv.org/abs/2101.11986 + Title: "Tokens-to-Token ViT: Training Vision Transformers from Scratch on ImageNet" + README: configs/t2t_vit/README.md + +Models: + - Name: t2t-vit-t-14_3rdparty_8xb64_in1k + Metadata: + FLOPs: 4340000000 + Parameters: 21470000 + In Collection: Tokens-to-Token ViT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 81.69 + Top 5 Accuracy: 95.85 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/t2t-vit/t2t-vit-t-14_3rdparty_8xb64_in1k_20210928-b7c09b62.pth + Converted From: + Weights: https://github.com/yitu-opensource/T2T-ViT/releases/download/main/81.7_T2T_ViTt_14.pth.tar + Code: https://github.com/yitu-opensource/T2T-ViT/blob/main/models/t2t_vit.py#L243 + Config: configs/t2t_vit/t2t-vit-t-14_8xb64_in1k.py + - Name: t2t-vit-t-19_3rdparty_8xb64_in1k + Metadata: + FLOPs: 7800000000 + Parameters: 39080000 + In Collection: Tokens-to-Token ViT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 82.43 + Top 5 Accuracy: 96.08 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/t2t-vit/t2t-vit-t-19_3rdparty_8xb64_in1k_20210928-7f1478d5.pth + Converted From: + Weights: https://github.com/yitu-opensource/T2T-ViT/releases/download/main/82.4_T2T_ViTt_19.pth.tar + Code: https://github.com/yitu-opensource/T2T-ViT/blob/main/models/t2t_vit.py#L254 + Config: configs/t2t_vit/t2t-vit-t-19_8xb64_in1k.py + - Name: t2t-vit-t-24_3rdparty_8xb64_in1k + Metadata: + FLOPs: 12690000000 + Parameters: 64000000 + In Collection: Tokens-to-Token ViT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 82.55 + Top 5 Accuracy: 96.06 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/t2t-vit/t2t-vit-t-24_3rdparty_8xb64_in1k_20210928-fe95a61b.pth + Converted From: + Weights: https://github.com/yitu-opensource/T2T-ViT/releases/download/main/82.6_T2T_ViTt_24.pth.tar + Code: https://github.com/yitu-opensource/T2T-ViT/blob/main/models/t2t_vit.py#L265 + Config: configs/t2t_vit/t2t-vit-t-24_8xb64_in1k.py diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/t2t_vit/t2t-vit-t-14_8xb64_in1k.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/t2t_vit/t2t-vit-t-14_8xb64_in1k.py new file mode 100644 index 0000000000..126d564ed2 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/t2t_vit/t2t-vit-t-14_8xb64_in1k.py @@ -0,0 +1,31 @@ +_base_ = [ + '../_base_/models/t2t-vit-t-14.py', + '../_base_/datasets/imagenet_bs64_t2t_224.py', + '../_base_/default_runtime.py', +] + +# optimizer +paramwise_cfg = dict( + bias_decay_mult=0.0, + custom_keys={'.backbone.cls_token': dict(decay_mult=0.0)}, +) +optimizer = dict( + type='AdamW', + lr=5e-4, + weight_decay=0.05, + paramwise_cfg=paramwise_cfg, +) +optimizer_config = dict(grad_clip=None) + +# learning policy +# FIXME: lr in the first 300 epochs conforms to the CosineAnnealing and +# the lr in the last 10 epoch equals to min_lr +lr_config = dict( + policy='CosineAnnealing', + min_lr=1e-5, + by_epoch=True, + warmup_by_epoch=True, + warmup='linear', + warmup_iters=10, + warmup_ratio=1e-6) +runner = dict(type='EpochBasedRunner', max_epochs=310) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/t2t_vit/t2t-vit-t-19_8xb64_in1k.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/t2t_vit/t2t-vit-t-19_8xb64_in1k.py new file mode 100644 index 0000000000..afd05a76a4 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/t2t_vit/t2t-vit-t-19_8xb64_in1k.py @@ -0,0 +1,31 @@ +_base_ = [ + '../_base_/models/t2t-vit-t-19.py', + '../_base_/datasets/imagenet_bs64_t2t_224.py', + '../_base_/default_runtime.py', +] + +# optimizer +paramwise_cfg = dict( + bias_decay_mult=0.0, + custom_keys={'.backbone.cls_token': dict(decay_mult=0.0)}, +) +optimizer = dict( + type='AdamW', + lr=5e-4, + weight_decay=0.065, + paramwise_cfg=paramwise_cfg, +) +optimizer_config = dict(grad_clip=None) + +# learning policy +# FIXME: lr in the first 300 epochs conforms to the CosineAnnealing and +# the lr in the last 10 epoch equals to min_lr +lr_config = dict( + policy='CosineAnnealing', + min_lr=1e-5, + by_epoch=True, + warmup_by_epoch=True, + warmup='linear', + warmup_iters=10, + warmup_ratio=1e-6) +runner = dict(type='EpochBasedRunner', max_epochs=310) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/t2t_vit/t2t-vit-t-24_8xb64_in1k.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/t2t_vit/t2t-vit-t-24_8xb64_in1k.py new file mode 100644 index 0000000000..9f856f3e59 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/t2t_vit/t2t-vit-t-24_8xb64_in1k.py @@ -0,0 +1,31 @@ +_base_ = [ + '../_base_/models/t2t-vit-t-24.py', + '../_base_/datasets/imagenet_bs64_t2t_224.py', + '../_base_/default_runtime.py', +] + +# optimizer +paramwise_cfg = dict( + bias_decay_mult=0.0, + custom_keys={'.backbone.cls_token': dict(decay_mult=0.0)}, +) +optimizer = dict( + type='AdamW', + lr=5e-4, + weight_decay=0.065, + paramwise_cfg=paramwise_cfg, +) +optimizer_config = dict(grad_clip=None) + +# learning policy +# FIXME: lr in the first 300 epochs conforms to the CosineAnnealing and +# the lr in the last 10 epoch equals to min_lr +lr_config = dict( + policy='CosineAnnealing', + min_lr=1e-5, + by_epoch=True, + warmup_by_epoch=True, + warmup='linear', + warmup_iters=10, + warmup_ratio=1e-6) +runner = dict(type='EpochBasedRunner', max_epochs=310) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/tnt/README.md b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/tnt/README.md new file mode 100644 index 0000000000..5e4bd38c94 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/tnt/README.md @@ -0,0 +1,32 @@ +# Transformer in Transformer + +## Introduction + + + +```latex +@misc{han2021transformer, + title={Transformer in Transformer}, + author={Kai Han and An Xiao and Enhua Wu and Jianyuan Guo and Chunjing Xu and Yunhe Wang}, + year={2021}, + eprint={2103.00112}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + +## Pretrain model + +The pre-trained modles are converted from [timm](https://github.com/rwightman/pytorch-image-models/). + +### ImageNet + +| Model | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Download | +|:---------------------:|:---------:|:--------:|:---------:|:---------:|:--------:| +| Transformer in Transformer small\* | 23.76 | 3.36 | 81.52 | 95.73 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/tnt/tnt_s_patch16_224_evalonly_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/tnt/tnt-small-p16_3rdparty_in1k_20210903-c56ee7df.pth) | [log]()| + +*Models with \* are converted from other repos.* + +## Results and models + +Waiting for adding. diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/tnt/metafile.yml b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/tnt/metafile.yml new file mode 100644 index 0000000000..ff8558b3c6 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/tnt/metafile.yml @@ -0,0 +1,29 @@ +Collections: + - Name: Transformer in Transformer + Metadata: + Training Data: ImageNet-1k + Paper: + URL: https://arxiv.org/abs/2103.00112 + Title: "Transformer in Transformer" + README: configs/tnt/README.md + Code: + URL: https://github.com/open-mmlab/mmclassification/blob/v0.15.0/mmcls/models/backbones/tnt.py#L203 + Version: v0.15.0 + +Models: + - Name: tnt-small-p16_3rdparty_in1k + Metadata: + FLOPs: 3360000000 + Parameters: 23760000 + In Collection: Transformer in Transformer + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 81.52 + Top 5 Accuracy: 95.73 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/tnt/tnt-small-p16_3rdparty_in1k_20210903-c56ee7df.pth + Config: configs/tnt/tnt_s_patch16_224_evalonly_imagenet.py + Converted From: + Weights: https://github.com/contrastive/pytorch-image-models/releases/download/TNT/tnt_s_patch16_224.pth.tar + Code: https://github.com/contrastive/pytorch-image-models/blob/809271b0f3e5d9be4e11c0c5cec1dbba8b5e2c60/timm/models/tnt.py#L144 diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/tnt/tnt_s_patch16_224_evalonly_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/tnt/tnt_s_patch16_224_evalonly_imagenet.py new file mode 100644 index 0000000000..e09820bf5d --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/tnt/tnt_s_patch16_224_evalonly_imagenet.py @@ -0,0 +1,39 @@ +# accuracy_top-1 : 81.52 accuracy_top-5 : 95.73 +_base_ = [ + '../_base_/models/tnt_s_patch16_224.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/default_runtime.py' +] + +img_norm_cfg = dict( + mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_rgb=True) + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='Resize', + size=(248, -1), + interpolation='bicubic', + backend='pillow'), + dict(type='CenterCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] + +dataset_type = 'ImageNet' +data = dict( + samples_per_gpu=32, workers_per_gpu=4, test=dict(pipeline=test_pipeline)) + +# optimizer +optimizer = dict(type='AdamW', lr=1e-3, weight_decay=0.05) +optimizer_config = dict(grad_clip=None) + +lr_config = dict( + policy='CosineAnnealing', + min_lr=0, + warmup_by_epoch=True, + warmup='linear', + warmup_iters=5, + warmup_ratio=1e-3) +runner = dict(type='EpochBasedRunner', max_epochs=300) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/README.md b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/README.md new file mode 100644 index 0000000000..a1aca53dfc --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/README.md @@ -0,0 +1,31 @@ +# Very Deep Convolutional Networks for Large-Scale Image Recognition + + +## Introduction + + + +```latex +@article{simonyan2014very, + title={Very deep convolutional networks for large-scale image recognition}, + author={Simonyan, Karen and Zisserman, Andrew}, + journal={arXiv preprint arXiv:1409.1556}, + year={2014} +} + +``` + +## Results and models + +### ImageNet + +| Model | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download | +|:---------------------:|:---------:|:--------:|:---------:|:---------:|:---------:|:--------:| +| VGG-11 | 132.86 | 7.63 | 68.75 | 88.87 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/vgg/vgg11_b32x8_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/vgg/vgg11_batch256_imagenet_20210208-4271cd6c.pth) | [log](https://download.openmmlab.com/mmclassification/v0/vgg/vgg11_batch256_imagenet_20210208-4271cd6c.log.json) | +| VGG-13 | 133.05 | 11.34 | 70.02 | 89.46 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/vgg/vgg13_b32x8_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/vgg/vgg13_batch256_imagenet_20210208-4d1d6080.pth) | [log](https://download.openmmlab.com/mmclassification/v0/vgg/vgg13_batch256_imagenet_20210208-4d1d6080.log.json) | +| VGG-16 | 138.36 | 15.5 | 71.62 | 90.49 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/vgg/vgg16_b32x8_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/vgg/vgg16_batch256_imagenet_20210208-db26f1a5.pth) | [log](https://download.openmmlab.com/mmclassification/v0/vgg/vgg16_batch256_imagenet_20210208-db26f1a5.log.json) | +| VGG-19 | 143.67 | 19.67 | 72.41 | 90.80 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/vgg/vgg19_b32x8_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/vgg/vgg19_batch256_imagenet_20210208-e6920e4a.pth) | [log](https://download.openmmlab.com/mmclassification/v0/vgg/vgg19_batch256_imagenet_20210208-e6920e4a.log.json)| +| VGG-11-BN | 132.87 | 7.64 | 70.67 | 90.16 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/vgg/vgg11bn_b32x8_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/vgg/vgg11_bn_batch256_imagenet_20210207-f244902c.pth) | [log](https://download.openmmlab.com/mmclassification/v0/vgg/vgg11_bn_batch256_imagenet_20210207-f244902c.log.json) | +| VGG-13-BN | 133.05 | 11.36 | 72.12 | 90.66 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/vgg/vgg13bn_b32x8_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/vgg/vgg13_bn_batch256_imagenet_20210207-1a8b7864.pth) | [log](https://download.openmmlab.com/mmclassification/v0/vgg/vgg13_bn_batch256_imagenet_20210207-1a8b7864.log.json) | +| VGG-16-BN | 138.37 | 15.53 | 73.74 | 91.66 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/vgg/vgg16_b32x8_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/vgg/vgg16_bn_batch256_imagenet_20210208-7e55cd29.pth) | [log](https://download.openmmlab.com/mmclassification/v0/vgg/vgg16_bn_batch256_imagenet_20210208-7e55cd29.log.json) | +| VGG-19-BN | 143.68 | 19.7 | 74.68 | 92.27 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/vgg/vgg19bn_b32x8_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/vgg/vgg19_bn_batch256_imagenet_20210208-da620c4f.pth) | [log](https://download.openmmlab.com/mmclassification/v0/vgg/vgg19_bn_batch256_imagenet_20210208-da620c4f.log.json)| diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/metafile.yml b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/metafile.yml new file mode 100644 index 0000000000..0c94481200 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/metafile.yml @@ -0,0 +1,125 @@ +Collections: + - Name: VGG + Metadata: + Training Data: ImageNet-1k + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x Xp GPUs + Epochs: 100 + Batch Size: 256 + Architecture: + - VGG + Paper: + URL: https://arxiv.org/abs/1409.1556 + Title: "Very Deep Convolutional Networks for Large-Scale Image" + README: configs/vgg/README.md + Code: + URL: https://github.com/open-mmlab/mmclassification/blob/v0.15.0/mmcls/models/backbones/vgg.py#L39 + Version: v0.15.0 + +Models: + - Name: vgg11_b32x8_imagenet + Metadata: + FLOPs: 7630000000 + Parameters: 132860000 + In Collection: VGG + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 68.75 + Top 5 Accuracy: 88.87 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/vgg/vgg11_batch256_imagenet_20210208-4271cd6c.pth + Config: configs/vgg/vgg11_b32x8_imagenet.py + - Name: vgg13_b32x8_imagenet + Metadata: + FLOPs: 11340000000 + Parameters: 133050000 + In Collection: VGG + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 70.02 + Top 5 Accuracy: 89.46 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/vgg/vgg13_batch256_imagenet_20210208-4d1d6080.pth + Config: configs/vgg/vgg13_b32x8_imagenet.py + - Name: vgg16_b32x8_imagenet + Metadata: + FLOPs: 15500000000 + Parameters: 138360000 + In Collection: VGG + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 71.62 + Top 5 Accuracy: 90.49 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/vgg/vgg16_batch256_imagenet_20210208-db26f1a5.pth + Config: configs/vgg/vgg16_b32x8_imagenet.py + - Name: vgg19_b32x8_imagenet + Metadata: + FLOPs: 19670000000 + Parameters: 143670000 + In Collection: VGG + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 72.41 + Top 5 Accuracy: 90.8 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/vgg/vgg19_batch256_imagenet_20210208-e6920e4a.pth + Config: configs/vgg/vgg19_b32x8_imagenet.py + - Name: vgg11bn_b32x8_imagenet + Metadata: + FLOPs: 7640000000 + Parameters: 132870000 + In Collection: VGG + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 70.67 + Top 5 Accuracy: 90.16 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/vgg/vgg11_bn_batch256_imagenet_20210207-f244902c.pth + Config: configs/vgg/vgg11bn_b32x8_imagenet.py + - Name: vgg13bn_b32x8_imagenet + Metadata: + FLOPs: 11360000000 + Parameters: 133050000 + In Collection: VGG + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 72.12 + Top 5 Accuracy: 90.66 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/vgg/vgg13_bn_batch256_imagenet_20210207-1a8b7864.pth + Config: configs/vgg/vgg13bn_b32x8_imagenet.py + - Name: vgg16bn_b32x8_imagenet + Metadata: + FLOPs: 15530000000 + Parameters: 138370000 + In Collection: VGG + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 73.74 + Top 5 Accuracy: 91.66 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/vgg/vgg16_bn_batch256_imagenet_20210208-7e55cd29.pth + Config: configs/vgg/vgg16bn_b32x8_imagenet.py + - Name: vgg19bn_b32x8_imagenet + Metadata: + FLOPs: 19700000000 + Parameters: 143680000 + In Collection: VGG + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 74.68 + Top 5 Accuracy: 92.27 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/vgg/vgg19_bn_batch256_imagenet_20210208-da620c4f.pth + Config: configs/vgg/vgg19bn_b32x8_imagenet.py diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/vgg11_b32x8_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/vgg11_b32x8_imagenet.py new file mode 100644 index 0000000000..c5742bcb98 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/vgg11_b32x8_imagenet.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/vgg11.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256.py', + '../_base_/default_runtime.py', +] +optimizer = dict(lr=0.01) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/vgg11bn_b32x8_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/vgg11bn_b32x8_imagenet.py new file mode 100644 index 0000000000..4ead074bfb --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/vgg11bn_b32x8_imagenet.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/vgg11bn.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/vgg13_b32x8_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/vgg13_b32x8_imagenet.py new file mode 100644 index 0000000000..50d26f3d2b --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/vgg13_b32x8_imagenet.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/vgg13.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] +optimizer = dict(lr=0.01) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/vgg13bn_b32x8_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/vgg13bn_b32x8_imagenet.py new file mode 100644 index 0000000000..8d22a81729 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/vgg13bn_b32x8_imagenet.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/vgg13bn.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/vgg16_b16x8_voc.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/vgg16_b16x8_voc.py new file mode 100644 index 0000000000..d096959f29 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/vgg16_b16x8_voc.py @@ -0,0 +1,25 @@ +_base_ = ['../_base_/datasets/voc_bs16.py', '../_base_/default_runtime.py'] + +# use different head for multilabel task +model = dict( + type='ImageClassifier', + backbone=dict(type='VGG', depth=16, num_classes=20), + neck=None, + head=dict( + type='MultiLabelClsHead', + loss=dict(type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0))) + +# load model pretrained on imagenet +load_from = 'https://download.openmmlab.com/mmclassification/v0/vgg/vgg16_batch256_imagenet_20210208-db26f1a5.pth' # noqa + +# optimizer +optimizer = dict( + type='SGD', + lr=0.001, + momentum=0.9, + weight_decay=0, + paramwise_cfg=dict(custom_keys={'.backbone.classifier': dict(lr_mult=10)})) +optimizer_config = dict(grad_clip=None) +# learning policy +lr_config = dict(policy='step', step=20, gamma=0.1) +runner = dict(type='EpochBasedRunner', max_epochs=40) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/vgg16_b32x8_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/vgg16_b32x8_imagenet.py new file mode 100644 index 0000000000..55cd9fc4ab --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/vgg16_b32x8_imagenet.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/vgg16.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] +optimizer = dict(lr=0.01) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/vgg16bn_b32x8_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/vgg16bn_b32x8_imagenet.py new file mode 100644 index 0000000000..60674c7144 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/vgg16bn_b32x8_imagenet.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/vgg16bn.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/vgg19_b32x8_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/vgg19_b32x8_imagenet.py new file mode 100644 index 0000000000..6b033c90b6 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/vgg19_b32x8_imagenet.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/vgg19.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] +optimizer = dict(lr=0.01) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/vgg19bn_b32x8_imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/vgg19bn_b32x8_imagenet.py new file mode 100644 index 0000000000..18a1897f65 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vgg/vgg19bn_b32x8_imagenet.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/vgg19bn.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/README.md b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/README.md new file mode 100644 index 0000000000..c78d00d206 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/README.md @@ -0,0 +1,51 @@ +# An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale + + +## Introduction + +[ALGORITHM] + +```latex +@inproceedings{ + dosovitskiy2021an, + title={An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale}, + author={Alexey Dosovitskiy and Lucas Beyer and Alexander Kolesnikov and Dirk Weissenborn and Xiaohua Zhai and Thomas Unterthiner and Mostafa Dehghani and Matthias Minderer and Georg Heigold and Sylvain Gelly and Jakob Uszkoreit and Neil Houlsby}, + booktitle={International Conference on Learning Representations}, + year={2021}, + url={https://openreview.net/forum?id=YicbFdNTTy} +} +``` + +The training step of Vision Transformers is divided into two steps. The first +step is training the model on a large dataset, like ImageNet-21k, and get the +pretrain model. And the second step is training the model on the target dataset, +like ImageNet-1k, and get the finetune model. Here, we provide both pretrain +models and finetune models. + +## Pretrain model + +The pre-trained models are converted from [model zoo of Google Research](https://github.com/google-research/vision_transformer#available-vit-models). + +### ImageNet 21k + +| Model | Params(M) | Flops(G) | Download | +|:----------:|:---------:|:---------:|:--------:| +| ViT-B16\* | 86.86 | 33.03 | [model](https://download.openmmlab.com/mmclassification/v0/vit/pretrain/vit-base-p16_3rdparty_pt-64xb64_in1k-224_20210928-02284250.pth)| +| ViT-B32\* | 88.30 | 8.56 | [model](https://download.openmmlab.com/mmclassification/v0/vit/pretrain/vit-base-p32_3rdparty_pt-64xb64_in1k-224_20210928-eee25dd4.pth)| +| ViT-L16\* | 304.72 | 116.68 | [model](https://download.openmmlab.com/mmclassification/v0/vit/pretrain/vit-large-p16_3rdparty_pt-64xb64_in1k-224_20210928-0001f9a1.pth)| + +*Models with \* are converted from other repos.* + + +## Finetune model + +The finetune models are converted from [model zoo of Google Research](https://github.com/google-research/vision_transformer#available-vit-models). + +### ImageNet 1k +| Model | Pretrain | resolution | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download | +|:----------:|:------------:|:-----------:|:---------:|:---------:|:---------:|:---------:|:----------:|:--------:| +| ViT-B16\* | ImageNet-21k | 384x384 | 86.86 | 33.03 | 85.43 | 97.77 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/vision_transformer/vit-base-p16_ft-64xb64_in1k-384.py) | [model](https://download.openmmlab.com/mmclassification/v0/vit/finetune/vit-base-p16_in21k-pre-3rdparty_ft-64xb64_in1k-384_20210928-98e8652b.pth)| +| ViT-B32\* | ImageNet-21k | 384x384 | 88.30 | 8.56 | 84.01 | 97.08 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/vision_transformer/vit-base-p32_ft-64xb64_in1k-384.py) | [model](https://download.openmmlab.com/mmclassification/v0/vit/finetune/vit-base-p32_in21k-pre-3rdparty_ft-64xb64_in1k-384_20210928-9cea8599.pth)| +| ViT-L16\* | ImageNet-21k | 384x384 | 304.72 | 116.68 | 85.63 | 97.63 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/vision_transformer/vit-large-p16_ft-64xb64_in1k-384.py) | [model](https://download.openmmlab.com/mmclassification/v0/vit/finetune/vit-large-p16_in21k-pre-3rdparty_ft-64xb64_in1k-384_20210928-b20ba619.pth)| + +*Models with \* are converted from other repos.* diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/metafile.yml b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/metafile.yml new file mode 100644 index 0000000000..a497b17e4f --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/metafile.yml @@ -0,0 +1,76 @@ +Collections: + - Name: Vision Transformer + Metadata: + Architecture: + - Attention Dropout + - Convolution + - Dense Connections + - Dropout + - GELU + - Layer Normalization + - Multi-Head Attention + - Scaled Dot-Product Attention + - Tanh Activation + Paper: + URL: https://arxiv.org/pdf/2010.11929.pdf + Title: 'An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale' + README: configs/vision_transformer/README.md + +Models: + - Name: vit-base-p16_in21k-pre-3rdparty_ft-64xb64_in1k-384 + In Collection: Vision Transformer + Metadata: + FLOPs: 33030000000 + Parameters: 86860000 + Training Data: + - ImageNet-21k + - ImageNet-1k + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 85.43 + Top 5 Accuracy: 97.77 + Weights: https://download.openmmlab.com/mmclassification/v0/vit/finetune/vit-base-p16_in21k-pre-3rdparty_ft-64xb64_in1k-384_20210928-98e8652b.pth + Converted From: + Weights: https://console.cloud.google.com/storage/browser/_details/vit_models/augreg/B_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz + Code: https://github.com/google-research/vision_transformer/blob/88a52f8892c80c10de99194990a517b4d80485fd/vit_jax/models.py#L208 + Config: configs/vision_transformer/vit-base-p16_ft-64xb64_in1k-384.py + - Name: vit-base-p32_in21k-pre-3rdparty_ft-64xb64_in1k-384 + In Collection: Vision Transformer + Metadata: + FLOPs: 8560000000 + Parameters: 88300000 + Training Data: + - ImageNet-21k + - ImageNet-1k + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 84.01 + Top 5 Accuracy: 97.08 + Weights: https://download.openmmlab.com/mmclassification/v0/vit/finetune/vit-base-p32_in21k-pre-3rdparty_ft-64xb64_in1k-384_20210928-9cea8599.pth + Converted From: + Weights: https://console.cloud.google.com/storage/browser/_details/vit_models/augreg/B_32-i21k-300ep-lr_0.001-aug_light1-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_384.npz + Code: https://github.com/google-research/vision_transformer/blob/88a52f8892c80c10de99194990a517b4d80485fd/vit_jax/models.py#L208 + Config: configs/vision_transformer/vit-base-p32_ft-64xb64_in1k-384.py + - Name: vit-large-p16_in21k-pre-3rdparty_ft-64xb64_in1k-384 + In Collection: Vision Transformer + Metadata: + FLOPs: 116680000000 + Parameters: 304720000 + Training Data: + - ImageNet-21k + - ImageNet-1k + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 85.63 + Top 5 Accuracy: 97.63 + Weights: https://download.openmmlab.com/mmclassification/v0/vit/finetune/vit-large-p16_in21k-pre-3rdparty_ft-64xb64_in1k-384_20210928-b20ba619.pth + Converted From: + Weights: https://console.cloud.google.com/storage/browser/_details/vit_models/augreg/L_16-i21k-300ep-lr_0.001-aug_strong1-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_384.npz + Code: https://github.com/google-research/vision_transformer/blob/88a52f8892c80c10de99194990a517b4d80485fd/vit_jax/models.py#L208 + Config: configs/vision_transformer/vit-large-p16_ft-64xb64_in1k-384.py diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/vit-base-p16_ft-64xb64_in1k-384.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/vit-base-p16_ft-64xb64_in1k-384.py new file mode 100644 index 0000000000..cb42d0d813 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/vit-base-p16_ft-64xb64_in1k-384.py @@ -0,0 +1,36 @@ +_base_ = [ + '../_base_/models/vit-base-p16.py', + '../_base_/datasets/imagenet_bs64_pil_resize_autoaug.py', + '../_base_/schedules/imagenet_bs4096_AdamW.py', + '../_base_/default_runtime.py' +] + +model = dict(backbone=dict(img_size=384)) + +img_norm_cfg = dict( + mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_rgb=True) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', size=384, backend='pillow'), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', size=(384, -1), backend='pillow'), + dict(type='CenterCrop', crop_size=384), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] + +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline), +) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/vit-base-p16_pt-64xb64_in1k-224.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/vit-base-p16_pt-64xb64_in1k-224.py new file mode 100644 index 0000000000..79c323b1ef --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/vit-base-p16_pt-64xb64_in1k-224.py @@ -0,0 +1,12 @@ +_base_ = [ + '../_base_/models/vit-base-p16.py', + '../_base_/datasets/imagenet_bs64_pil_resize_autoaug.py', + '../_base_/schedules/imagenet_bs4096_AdamW.py', + '../_base_/default_runtime.py' +] + +model = dict( + head=dict(hidden_dim=3072), + train_cfg=dict( + augments=dict(type='BatchMixup', alpha=0.2, num_classes=1000, + prob=1.))) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/vit-base-p32_ft-64xb64_in1k-384.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/vit-base-p32_ft-64xb64_in1k-384.py new file mode 100644 index 0000000000..0386fef1fd --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/vit-base-p32_ft-64xb64_in1k-384.py @@ -0,0 +1,36 @@ +_base_ = [ + '../_base_/models/vit-base-p32.py', + '../_base_/datasets/imagenet_bs64_pil_resize_autoaug.py', + '../_base_/schedules/imagenet_bs4096_AdamW.py', + '../_base_/default_runtime.py' +] + +model = dict(backbone=dict(img_size=384)) + +img_norm_cfg = dict( + mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_rgb=True) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', size=384, backend='pillow'), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', size=(384, -1), backend='pillow'), + dict(type='CenterCrop', crop_size=384), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] + +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline), +) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/vit-base-p32_pt-64xb64_in1k-224.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/vit-base-p32_pt-64xb64_in1k-224.py new file mode 100644 index 0000000000..a477e2119e --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/vit-base-p32_pt-64xb64_in1k-224.py @@ -0,0 +1,12 @@ +_base_ = [ + '../_base_/models/vit-base-p32.py', + '../_base_/datasets/imagenet_bs64_pil_resize_autoaug.py', + '../_base_/schedules/imagenet_bs4096_AdamW.py', + '../_base_/default_runtime.py' +] + +model = dict( + head=dict(hidden_dim=3072), + train_cfg=dict( + augments=dict(type='BatchMixup', alpha=0.2, num_classes=1000, + prob=1.))) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/vit-large-p16_ft-64xb64_in1k-384.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/vit-large-p16_ft-64xb64_in1k-384.py new file mode 100644 index 0000000000..5be99188bf --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/vit-large-p16_ft-64xb64_in1k-384.py @@ -0,0 +1,36 @@ +_base_ = [ + '../_base_/models/vit-large-p16.py', + '../_base_/datasets/imagenet_bs64_pil_resize_autoaug.py', + '../_base_/schedules/imagenet_bs4096_AdamW.py', + '../_base_/default_runtime.py' +] + +model = dict(backbone=dict(img_size=384)) + +img_norm_cfg = dict( + mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_rgb=True) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', size=384, backend='pillow'), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', size=(384, -1), backend='pillow'), + dict(type='CenterCrop', crop_size=384), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] + +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline), +) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/vit-large-p16_pt-64xb64_in1k-224.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/vit-large-p16_pt-64xb64_in1k-224.py new file mode 100644 index 0000000000..5cf7a7d30c --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/vit-large-p16_pt-64xb64_in1k-224.py @@ -0,0 +1,12 @@ +_base_ = [ + '../_base_/models/vit-large-p16.py', + '../_base_/datasets/imagenet_bs64_pil_resize_autoaug.py', + '../_base_/schedules/imagenet_bs4096_AdamW.py', + '../_base_/default_runtime.py' +] + +model = dict( + head=dict(hidden_dim=3072), + train_cfg=dict( + augments=dict(type='BatchMixup', alpha=0.2, num_classes=1000, + prob=1.))) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/vit-large-p32_ft-64xb64_in1k-384.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/vit-large-p32_ft-64xb64_in1k-384.py new file mode 100644 index 0000000000..60506b0241 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/vit-large-p32_ft-64xb64_in1k-384.py @@ -0,0 +1,37 @@ +# Refer to pytorch-image-models +_base_ = [ + '../_base_/models/vit-large-p32.py', + '../_base_/datasets/imagenet_bs64_pil_resize_autoaug.py', + '../_base_/schedules/imagenet_bs4096_AdamW.py', + '../_base_/default_runtime.py' +] + +model = dict(backbone=dict(img_size=384)) + +img_norm_cfg = dict( + mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_rgb=True) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', size=384, backend='pillow'), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', size=(384, -1), backend='pillow'), + dict(type='CenterCrop', crop_size=384), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] + +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline), +) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/vit-large-p32_pt-64xb64_in1k-224.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/vit-large-p32_pt-64xb64_in1k-224.py new file mode 100644 index 0000000000..773ade874a --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/configs/vision_transformer/vit-large-p32_pt-64xb64_in1k-224.py @@ -0,0 +1,12 @@ +_base_ = [ + '../_base_/models/vit-large-p32.py', + '../_base_/datasets/imagenet_bs64_pil_resize_autoaug.py', + '../_base_/schedules/imagenet_bs4096_AdamW.py', + '../_base_/default_runtime.py' +] + +model = dict( + head=dict(hidden_dim=3072), + train_cfg=dict( + augments=dict(type='BatchMixup', alpha=0.2, num_classes=1000, + prob=1.))) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/demo/demo.JPEG b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/demo/demo.JPEG new file mode 100644 index 0000000000000000000000000000000000000000..fd3a93f59385d6ff632483646e6caee300b56d09 GIT binary patch literal 109527 zcmb4qcT`hP@Na-XDAJ2TLQMb#M0$tNoAf3qNC`y|=?Fw1G!Zc*gx;h`2SF)H69okl zKtY-)MbHF85u{2FzkGl1ocI2IbMMJHIXgRd@7>v%ozKjke>-0SaF`pL83Sl&XaFA6 z3vj*xfY{s#3J(gq6BI0>C@Tk0(>JpO0s;R^=W_sq|9Jl&(bCe<{zpJMI$An0#AB=A$;b;0(NDlB+Lbr=Z87n|yv7Yew=X9C1dAKmD=AM< z#R$*<{`VpO_bGt?!~m$TO>I=6j^{rD(9_b>0jZ;*HUc>5M7iXFdi304ww^o+`XLFp zat3HKuU+5P__m_gPrmb6zy&HNEhil(01h}Kmbg| zz;u;=zxLh@`FAp04$Yv%Fyaa7WA)&laF3U58J`CiEkm}qQ$*0;alZ;% zShDdaa2OumzA+>Hy>2q%a#=Zz`=~~wyUq7{F3asqT_GZVbq9Idi-Uf=@WMqz{6{Ss zGUafUvp$$%%?EKq^c4-Ap!|?PdXeH1l&y)7oIpCgu$Ro;sos?bOlCnp#Kd^V?720C zXV98z?j~X2?Piysgn6yG+!SAI%Ls*A|64uE7v>H0WRe=|*&R!uzN+Bk%=f{?;v=ows-XoTa9UYYOVoePNlbP z_pFur_Sta}`MF7GxpeR&4^BaYI(3mKDE4SSCR+DaTBxv{hk0c*vjG1`)zuZqhcZGn zxwrCDe?ZotN^t_Ebamo!GfyM`l?(W}kL+~V!#zDAGPmz<+r;OOWrTa-bJe~gA6{=7 zIwa(HL<--*UpE|}*n54t*ytQnIjMe)5z zg9kq9Tj?Oq!=ALo^FaWQ-eNaOu!{UnOMlAWHe1DDv18Wm78ZAvKa_b2n3IA-Nc)$4 z(`@8K-smj}6(wLw-vkbqvFRO>n>>vzgx@Bbddl$PZsGcb*ewaim(ZsB3LMb*hO8`% zRyc1x(-Yp&fv`+?khXNdD2_v1+yyK#!Q$6s>syhs=i5)q#94otl(0J;&HJU80O(Me zNZxP{I(;t)RW!@;sx(%by`=_9INAQv`Y`#5Sbx%l7T~%pbq!5EmoMy`P;?FO@p3NJ z@@r~#RBPlfYv3ckzNljy8Zu$ESXW>DB{W|odQfq!_cWq+Q_#_Jt7v#fk4|gpXuaUi z6y2^U=esdG6>oJB@BJ^Fp$aXueJpYa;=y{^k^~)r$e9O{OkWfX-=w|s%g6IfzY@bE zRNQ);>VQnt9FDb6%sNJ$9j-p*oM%Nxat|r3kz`DXyGJWALprpU`Y&LEP22U53-mi- zrP_+_wM#C!jGlXP6#^z&TWlynb=Au^HY6oP-07mUmQ>`B@uZ%Ste5pZHIGy}MdTq% z;Rd)mj)=}Y`8x3Z&Up?R{J+%>=5%EjXzDw%=}ba*Fr7BwXSk=0*;N1623`BjOX9Kf zS3J)Qii0(rb{vH36@E$Y>(6?2ieCos(?Q}s)^}feooZQx>v?)1;a6HSu7%AxLwyN5 zwC29)v^=OR*mp2;^^t68iYL_t09 zSnoODy`ZfPcSnulm2$E?dQA}M@Q9P~>7E%pua^NyUG84j!&xclJ#t;>#LW+s5s z!=u&jMy9X#B#iq~X2)xxLwMnK)AqHy1K9aIskv|Tc6#vkQw<-~+ahm~eMN3{uta#<*J1VD0r0jla zq1RtXwfSh$-b;qJUsuoOR+-gxWdDHP>R2y#tkc-M15;4b)rr~Oo|MX(a^&%k=`JAH z6zM`vaVO!Q%J@~3D^$M^FLA}k#FSK9|9hqg$@#6I#r;vWqk=~>#UzJFiumTNg+ODf zB_7G>4@ohJi8Nf<`gjuYi6+@w?5_DEb50XhsaI1?Z&QEimYZoUvFU_O1~V&3N&EO} zCQAc*Wlc#Xa6w7I`FkRPzCL!<0iAt=prO@24put96Sg8%nD3LYTl)&rYaz5vCa?m} z4i1zgdHaisrB&hMaJ$O*_gPrF1_fLz!?7ZR(f7A>9a}U<_%XZRk^=i8d>$rybeA*G zIiymxF$eyYcm$$*eu1QKs_XcGxJ^l_jD)S2r@Z^UvM%ziUUyIng9=1vyFS zYX0dU$w6VwNfBY8-|gFH!;(r_ChLUnGzE1wRPfW*=MqjS8oyt8(jFJ)>jrqgW6;G5 z!u>MLFHY}Zv_GTm=PZ!l6#B)oGq z2e2j(6lU;I*uygepaYkz9Ev!4#Hgvre-wpti2D(>i??!hH2y7>ob+H>s$;h}i%2(h*>wW(cjkv4J;($n9j;18@|O%)@ajTi=%Eer$9^HxpJ6t`)=9f3 z5z4oUjZ6#WoO2fwW8yTdG&`2loeR}w9jCbB;P0&(?Lt*pFC|+Jg!Br9O>>&guU4>* zzHE;(NuoS68)6Xkx@-x7iRa2CA~Ojs#8Y@*e@E}XW!K6Sv&b1-@|410oi{Tl+P6KO&}dPDmS zKEH&?N!6*!xfh1AbgqqL8H`dN3>mle3l0H=xd2Vm(WVRvw1=zCMai<8K91T(vk8Op z!%GQAYlK8aP6uDgQ*~HsPD3 zd(j26F;d8s-I?dk;THWIrFPmyq3W<;V+FvfL&&9UCqc_k_uk?%U8RBo;zFZw>vN`8AX5BNRR#l$|w*kDkFHq?Z_Z}n?R ze^~$1%y^Ye(eIA&^d@6gL7iMPof_T3zAC#zW2JL~e)fmhUEuiqL)AANnedL))aDQt zflkPbzmsF8*{>9UDg$myaW@lLK;Q}Z(0h^=7>NqJJN~%)<(=v8sf^yQ(gfguEb_l~ zS!*~W{FcGa?G;A$b3p9^Ze9Gr<^s#~(9eJ?L%Z=NB{}b)N#n)AaK}pqKiuki_};Zw z(ZiZqF4H_Y2Q;CU$KBza%Aqe4j?-`4oP6fcSRb0Av(Q4zt^KG>NKh$Gpq-7&21|0zzsA*r!lDA=^UmZ1leIwmDZEmd2= zJ`V4B_lMmZFxI5KCDhT1+;QbOK+JOOZ}2AV6(JMX!-Rzo^q1?3nRtqQtkeQgfAQv) zgZ9QGyWXa!@08CBGt}|A3F@hmw!U>ne_urWnhLzz?d`NLoiiUY%BBZ~Ysj`kFaYeHT(I2FFHy6-6p}FB z&Z0b>sr;fNl-oGg(0&(dbhF3oYF7cZkFwhCXwJlh_W&N_YiRO#?==HjjgC@P> z09Cho>k4=6^k)yOZBDjC!Ck1%bwY6nDISl>QEY!*&Toego}QGEo9{3kRm=s!=l-ps zXF8ZSt4l|GJRF35^+uy0r+fB2ABKDQ1-|6Ke1_>VE6I4`^TF3^AzqeDG<9BT368+x zB?u?p=*E1366~7Y#1(eBI!PU^u*tku`%b6+MDu?N27`ohYt1lRDJBp6l^E2aX^{%a z360Z!hayBseA26!su_`-zv1UvuG{`1?}qg9J;Fy}68mvP-nHy{r2GutD_r*O^;;7W zbmxGoZk}DpIlv+j12_lF_6ikfCEsa$gs(@FMgqNhGrjfqgI9Kiahi;KU^CZ#*G)1Fu*-^>++h%Wmr^iQuHkR8iG~2Kb zq9-S>qR#=c$RNpKpIa_5o`F?RyW__?PXw-F#y4Y)zy1bSI1pM89|H-!&_R^&``d_^ zTVmL67M}phE^m(>s#x0LnMzAJUKmpzqv(>E8UEzPJ0{zxuwJT2V z+We=5276?S=7Kc?1*CrB;Abx~bfWnW*HOcxPV?u0zk6u|ppERngFnKx8dFSHG5hWR zDk9&o=oBJ^;~h{h|L#~jD@GM#r+k_hTkBYTmAfm$ z!dc+OD|jqO2w7q++}GdtE#BmJ zVWn~hk%kep#UE2<9Qi^g44}C3vR`0?@LqQP@7L;x?RdLEWNzcjF2*lyS>7k9Fm&8P ztQo0v(vHG~uhk)}Px0 zc9Vu>rxh$fOxyGm4A^}{MSU?puAGC85frhRHdL#O9}nwzkwOH-q~ z|1wC=G1eFjROM67C`Kg=m!ZD#{HZXQ%CbArz;W4pOX1zZ=efbdiR$US=BOqjP5wpC zBasN~eyYF|g(RT7=!=1>#LKo|ri=c>2XV+2Day}_WThLj36(n4$H}$tA4;Vb9H{&j z#;U2Uwj8P*g%Uf~vvI9VQ%cC2#aC`lJrHlIgoqbF=S|SrQ21QmTJ$N%#$5>cK%#XL=qTTvHp7-1ky{ z=`0S#V|6OswaLLNnYVEWr*ps`{j#Gqx{rf{bD<*zNX(bFKCcE4VDQkO4ydG!cMo<* z^GX$ZjT2{&)Jon?S+-unq$PxVaXggsY3GKvJ@6{JD`mAEj{;&rbqt`ok&4bI8-&_c z6B7H@Zpn+=x%jXgJ!M-4-27}+L8$$TpECLEd^_!PF0GSLlMD6HrF`se` zK3^l&@`=|)Tn~|IZvbP}@g*7)q4%;P6fZoiN;IjD{iJXD!a&QrSjn;j61Q{?h|_Ql zY*Mdl*2KA2j-}Urx&zVl*z7}W!q=w@=vm^I;Za&Yo|QS>+$WtfxZ)_W*GB9D%ieS+ z{k2G)3b0YLUD`*u8d-%stYd$hun8fWU;x8zbP#AvqE z=}x;I6{r%-<|UY$BqB2jxg6h(oIh-U2f;hHKyXaG+@#y!Zzx}ys@Dz1N3Rw&-=j9B z-??NT zw~nkWz9Xa{9TC3Zh3<+?T`nmu!KZiJuK?F%#d=Atzm|-1JAj&fnR#Lj%$8m`8SjZ$t+{<~G@-6@R#RW6y03BUUe-tqY!$``MFKJ;tkHs%Y(e>TZOeo?(dUs=T>F?nRQm80bCu@S%eWG5sef& z;_EOwR9ykw&2le+`f(3K&H)!5D4RuKer^1q{5`V|@NnL=TI;UOaS%_e#ceS-zmZS0 zb{g0!<>s0|&{3_;rw4}^y{4Lm6O&cYW0v@D z`y!A9H%o$oMxr+zfaXYj3+ePc0KcTys^o@w;g#r;ap9->+|j63cvW8QFxji-v(haW zLa#6hL>OA>UV9$vBe8j4rIcUk0WCeif41sxq6 z^Xmg#7kYTScKB(tG9nRB;%V0};umxQKtVMVCjF|vY=@DA-CslQOMF+Y+f~0%nH$Y! z)(eSSP)J$}T?q_g3hjL%lN^!~$W9hspBhLKC+u{-2(@Tvu5hwA8dapY$*Xqie-)KM zI4)0@f?xpk){}Pjp9%J^M{|M4C^zUh2}Rvr5mM~$HTxcX~Js~ zo-eIeTlr*`ewt}f`^Rr;l3=EWJ0Og)2Si2hYB4JBXb)X~sb8e^j-~bkkE-vs>S|C> zBw3K{rx%}*Oy;L%nWhAj;;6GO-Acpp+hw<(@>gDicI>vF!YHWK$%Ov#HQRLScR!RY zUf76H0TF=5%VShreyk?c&QI}yWZ#gkZN3U%D~h_|$qv$&7T7f3u9JC@319@4gi&I3 z`r@xzs^HqPVHkBayyG=#y+=bl^{Sn}MB>!R|5o_Ek_dn)j9bgK^h!3kI^H0DAHwpz zFcA^KR*nf)-yIC2ip6MUt8!VCe-_5Q{irTPg0n@NuEM>ub< zZ)f>gQ`X5n2_1{|9@0}YPG2l%?HyzFS5ovI(^~MMkKBF)Q)2~EctMPE#1gW&^YD-P z$b+Va=PMCy?*{Kn>N|WeVoAB#G{km&cr%inuZ>QiNK4&b~vAgx|aDoJwnMw3IFzoC7*O4h<>~-n!?wN$^U% za?R0)ta;E%vM^_hy&5KgL zJGYuyU)zZ*Pn!le(>BGi{fgP!{KHtMVxy6wr*)#(#rR!7J>cZMNQ8aCXz~bwyLOi6 zB=xJEm|YnDXwUl_(`adrW}Ng_rD)|1US)8ZqS=Mq+-nvLHf+B?JbVh!p3gdgSWEW5 zx>Dvd$*me%{xWhV!pwa5-|eCr|E}bq$9H66CqKw%e5%*a2xS~JDW+)jodawFN+}BP zgX7QO5Fp%nYUv;X+k|}GDqHcx4PS2R9d7FX@_89m7r)&g3b93LM;`@~gMJJm0l#S1 zZLXTEmb_p1muI&*`fKK;x&7k0Szk<2@94#N?T)!|_s86CR3*ZsR>e#5CX6N>fj57Y zJI2dA&;$si*8nJskl>i8HR4B?&!nVpxy)w;ap*_&2gVJM#X4Zq=cL_CGeJi=r|f~F z)mxXI(OsYHu0jYkHQx4v-M=cprvn$Ft^ys6?^uC!ZrJAI@@rdDGop!NJ9y~QrchCU zaE3|gVxy-}n4S(-^;U`BH}1a-;u)DZ69aF)J~q-n#&i)1|B|>|CRG*7xEUrt{M> zcLK_l4kjDN;suT?0?mf#U&S$?PWOeUM75mkuwa6L;}e|kODR7o$Ag#{;t6$sdZT=S z(DU{AL4$U3kKk2vbbIUx|2o&}p;5Y84^v1KRQ+(x-S*P`VP4+*bkpUV;*;D*JcS%z zRhkaEf$x$ocoIyD9>i?$}SJ4J|Zso)vfWV*1%}_Fj07{G1SNljWM&4!bYG^gP47E(LHL&^ed2 z6C5_9;gZ}+=XKZJxK7VAt&;b7hPSXW$J62?JQi<|?q8AXb+wxQzBTNIX^P&^gWa(~ z4Uj;jMCjE!`gZ9pPa&!GfLRy!yP(}tIC+{^*;rKC$(SU+I*ymYzV62+PB09GsvX3g zmUJ&OGQkKl*Pmqc%ca)4=u)FN_AzoD`qsRRb%)YajV*^9UL!MX6H1506l5>{kVzd_ zjW;IcNvi3<4+XC;pH=Y=*jtvB!2B-Q*|72JE+NRmi%mxrwexpuih5?(1S`>6Nop4= zH_19kkhoC!5rA#U9XYOZlRoWgXHX#Hm@etW%3`b>sv`!3FIf&tBWjViI>ifc1j`P`XGH5f~jS@ZbXvA2&jw@XlhU>AIa!lOaGC#OWUqqDB%NQuki$uI20ltOB%--Y0X z_o>O8cjOP^H|TS2i+l%-f*kxnomRYGO6k@#^LcG!O0?MLn*161OEfwKyfYd3?G%$R zVHk{p_NZ^Gh-`gtOHiP-kOqAMI=?pHQ$0WL8{!=zvGng`Fc)%PMbtT~oqzQ2afCwe zz7##PqA@58Q1zG$Kfu)YIx;ac1R!)9Gw0c{B^s&rE5lnr|2uQBTF<(TwW74;5L3dk zj|drDR)%QBwwSQP^+bVML1mC!SIcpFhsP2EAXHDUUzz?yp!XB)WdIhB(O3~oShVBQ zcO20mW?Gxh{6pJkG%In+b1pkxOz&lfFaBFq5p!B06pD7bJds9|!b;*b$~AOJiP|z6 zyI2DMs$OUBjGbcUMdqf%?Jf4=@Y4&c&qWkPH_@hvq4d3|UW4WW^)gmCWwa7nJUnW70IxzK7cp@`_yY%LWQF6kMtNlfk= ziI6%QSa{QBiXNYAe&9g=#)vaARJ|7{G6v4@}-rT2;7XuJ05@DM-CLZ`#iU- zHd*r(>2i4B1n2kr*mP1fuh)S zCL+N;3G0tOguf>pt>%5&Nu4=0)RBcRYx)JmB95Ec0_Mk0^rxLXm>aoynrce;o#UgH z5b%;Wzjod}n&UjH9EBkCks!0byDuD1R+R|rfdJD?lqC`VE(*Sg>2Vc`rTo-7&1Y0S ztI8F;W!ZVjV$a{==PTy;CHX@T4!5H*gH=&crmbK*#&~uEb*1`K>)9MVnR#1_GeD!L zpVRk|3F(*Qb5fTI6|R1Lc^BsPO}>F$4Uf^$ipaqqq)1`gHJ$n=6#soKne^uCy^<^Z zYnF|1dkib+Yh>S5>_}C@8yi-0dCY}A&S0IsR~e{RRCoGLqwGU+1n0BSN_;z}1jqc~ zs(yJWCk!S>B&d%a?O91p3_1nHnu*d^M!^#I0w^C4y^KF}VoG8n5E1$zR${+Bx~uzY z-70%Ny1eXfkhaS=b~O%G<+XpyzKD0ZF+3=Mg*TiG4zgUusg`!O`Ew1q?Vs_=WP%L? zj0Cu%={y6_Bo9tvJ$ou~`Aur@rZC@IKReM(0p?qh3#A|sjuf^1^<^>JS84Sgv-*4T z)-nmOeN-YOjQoH9x``JPF(sXJ^qw8B%+mJHOSQf_VCTsG{u!wjo9RXXXwX+F|=&(znB({Wl-p8_9kiXB&@?1e%z&W zg_a2#0u$cXR*|{pf zpgSH{dPG59MHei}xtSJOznA!(-F>Z?oU;9e*%%v~$(F1kb&U@w-DMTZLJyxSDQ?;q zYx8aS_;&;%cLs&B9*tSm2NGM@-+nRX*qXDaQzvUfvmPcDJpu6xP(zFFW9F>- z^$!)DLOOGB2{3X3OW0$zVKfH*C!FuB*Y1r3Yt(|OVjE2>ekpFT zz2s@2Sf;g|V$Ec(_Tc^-eK~zo2J5ji4Uz~Rw5=K|+s61D)~w(sl1CP%8eI98UVx=b zv@zrCUmxXjRIpi>3TbHFXh@||M77DJr^u9TfZ+!x`*^Nr;BV6j`B7!|olF-hg%oky zlv3?$!>TrjAV;w@nCz&FjmQ1IQFc>?wXj-2i=X2>v}-!iB<$artK9Za&uaQ}puj6E zR8R)lgM6xEE0Z?YBT}X+a&#J=RyYsi3NS65aQnRa1iYlGHng(E{rF*_T%s}VK^Fmj zfT-BpXS)9_qfJ>?Auz%ngWmM@6szwDk3{b-QC?Dz#<%X|b!C~pSciFYX>sppM14i# zl7m75Ai?mU#im4-oOD%nHgzH?+^n%+3dwy{IYb~Olu?Uvc-B>Icesce)VvUp(C(@~rf;+~J*lG!{L5hk(S(XUD zeQiV5VaSE(u~T))CL5{Vc%fTd5si93?;H%)3M4JR57`8Ulu|S%@;lSgLbt~goyV0} zG6xhJ%-4(!Lu%pb=YVqJ$^OTWv^2Q*HUBoH{N$Vl+eZ~K7w73V`H$Apo=zL8yQFr$ zbx;Ba7$PQD`_ygX>He~me%l|yf|VG4(*NBQ^;~DS_CZ;?&eAOK3TC zg~4j;qa#}ZlK;{QFMblaQ8%s>*SB$>uhAL0amLlZ^&yAydm*CLbKICF*N=XlxDywr z@%?eX#XVh%&&yo?ZtL3qsbBhdoxN;3On5^c6Ehe9Ak528>~@A+K~uQE8I1iSFZ1*c zn-@!A7!W4vl&$nPJs+?htiPu3Y6?sP&?msbr*S4{$R&J70r)t}#8<7CR zB4eSOv>0mG!Q?ivX`i9XPsLPsCqhLw`EEUCs+6WvG=H~Mq2)(8T3b{&Ne#)r=~`f2 z-N+v-XQn1uqM)JG54mup?6PB0o4f-OGWmmaLbXA8p6oXO(0_z+@AryPi^ptvWPw^* z3_3^oOSJ?&KoaPefd5PBu`m6lZ8MbaixRtwO23J2?e;4`7%(T9U)x{6}Zlw?+ z460=9j!#?B+;{Y!eY zDA(P-UqXq99p@4aBLo#S>h&pUq5Yt|bRW8VsLj<6c1;X0dZ{E*0GLBFh&R7N;Y)BO zXI4_igTJ1vFJ#x2*B;AsdNeVq9i9VPr-b>n!K)c;zjH1}vgBSGeea}tD6}KOG1|vV4~)h_ zh`9b z$W+@w&D7U*{EX*PkSNe~K%>c-p>MY|>6f#fg!|P>4fU7oqGmZN@ zhD1uI4=D>4U%sSSrpfu4(aDQ<5U430L50eTxAOH67aBFob-;Uzvd40>I4@71K;GbC z1t!qg-x;@i9k!2~p?f7B-2}%WH3+dT81_9OzR|k!k+?DXGGm-Bb*2^10YYsd)5bnp zzE7+w%QgufF_h>vVHrVw`Gcg-)s7j$#sdVG4iN|{XlKoYgTbb!(b8)Y@#Dv{x>6m3 zQb}ro5!~9ZBaNwu*}NcitaD3`xO=$%jIUwd=@t$qb3IviWVj@1E| zF94Cr&P;VU#WQQ1JeDGk(Ht~{v9V1J8r|hxQPHKS@H3BJm=&)aq&m9NkQcM6Jn%mX{WzwU3^ounjU3zjt-)TwOZ#$jI zggVHdf4MH1ddy{eM2Y!3$(!T{%V3c}x*_7}aW!NwlC3G3e*)+t_(y*d8)B>Cj3_43 zL4}t~+OSuHd_lkfCWwLoeY9_`^S$0qr<@`h>oLjgqwVQZHM&s$s%|d-uSyG+FYwo0 zD?+9X3lfqrx_v;@52UMX`YRvWZFFJgpdd^!sc!@zk5PcXL6&=tXZ>ccV@xwG$hkM7 zRT`tm#rn^s{P1+@AorJGP7#nTwv)r_M$I{OO2{j!_ zI;5Pba!#=r1drbaPvzy%g)%?F3u~30kZYd_nuxuzNHMSp6fJOlm=J(|_Li0b^>cEk z$6@)QvdSLsMWjrNp$aiD8d^fF?gl~F^e!r&guW{WJgeakl7x3szO8p8%ok=@TJ&cr zCBL|ys7TJxiWWXdm;v@owPYYO>kWUD#RCL%4*03+m11|refVPix&4 zi$k)4oWtVXXJ&nXXuP&&Eho%&@cI>$OMK9sMReTaT-{7(rAmc!PCjlW#o|JuNw=`t zTH43?%uLOq!sQ~F`oAX$8E|+fxv5Gr)F?q!hhpjWd&Jt8an#|DhaSB66ykS=EHK(v zRwz9mBBI%OPngiAF}du#dgWf zs)~Z7&pOuQr8S;B{ry%LK9Mf?c$H~H^)zfLLgnE;I~|?v0rSO)TnRP)!055kb3mje ze+D)*Y6ravF)e9ZF(kV{L#Q>dBhn>~a5y>~tr3aI&It`DEe0{$f& z;$7u!S`gkeT}c&VoOv@5-!V+^74{>xthE!XZ`{7`xEV}XIR$ig^meKKsXp|K)WNNM zw*trFS)|p>975iu^oYS_lLOL#i7pFz7`1jyE@P8YDz7Z|6J4_=_@r+^GNVOo#yW#h zD^Be+jQG6f3;OG%&tsdE0N8Q3ZHeABGB%$2p)DRId+|iF$zSI*&P49iLB*|1?T6ON z(;3U#^%*8I@W&y~$#g-4vtwgBP4tpO?<99s((FZ6GNkmW2SqtP1Ek#(*^v? z$w_}hU={(a&?~>ZWoEleRoNFhCIi8;1@GG;wp{QNB98AWA3$C=e5+eHtGjAu>yu{v z&U&rlLdxzAy8-*)+t4>?DfWq1ldL0%JF2RKS67e`okar4t$~a*y&rjo@L(YOU;M3S z&JSoZtp0hI4hKBB^#?5ZdUB#-Frj*t7XdmVRh;0}e zf_bySe@>8=p3jEzX$X$qy}ouZan*RNswy%tsj=Qb6nCX}0-X4)CJgPtMx%BlEEtX2 z7lvSKC=&Cxb*u<^jjCPU=KVnj*J@UqTC$%;tfYM^clfK%ruz|~2Otv#Z+tk!Nbnpd zocc2Kb$ZWL=LEH3s_J-q((Hp_r*%TnNBp~A|9NYeU4n+(?l`V~W@Yev<+}2(KAsQ5 z>27bOYqOFt3_n_r)V{Ho8=rkh&a$ij>M@PSg5R?kl0epc^{ooOEgQE`L4TbJ%UNw=gG zwJ{`ovlq3&-?cF%g^iI=hpUe#D>kqA?)lanrI46p;_}pFLYs^k)V8BsY!H`-6~|J*3f#`J#Q>Td0ci8>9T^k{hhAoDa9C@261m7aRq^^JHY z;0dSEWNPIJ8>J8Pf_5mm*=$YW7`?)vzomeJm(EoMft${HeRokZe z^U|HO;~QRn2E;^?&!v7Jj>~p6tXX88DB3a(H2@$!p|r?6%t_KF5=hh ztd$m(AAHJ8lQ|bXNX_N2v z?I)@=>%SP$p9c+S+DqaXN(5xVJH1D^%U_p&Xv4+oU#8|EC$hs5L+TD4SNChN+-&$5^3C zd_%oqV`j-0MoDULGjf8czFN=W!hpUwW%`3Y!{WM6Fb?8(60c9*3D&UEGBT!SS-EUH zX&}v9njCd)C$He(v&Y((J*TX5^*TAJHLRSuR#w&tL^YU4_|9oWqKJ`y67$lzu`xG)WsQunjReOM=bkP z7BC7p5WP{;xm;WcUDDhDmV#mDfRA$H7(nx3p5{ ziH5n?`uB|+AL0hZeICr#*CY%P)rH`3t%%);!>o=mqTHoNS^+JFKwn3ew=Do`C*+UNwLEbB!n7mtJRGGIN6!G7xkoH@3G-PLZ}G z+ws=R^1Sn0SM8n!+1+$V9|@E9HQx~WgPpQ+_T%SQC)U*z@_d=E-5%oeJvhlsXR#gc zRA!z{ezNuv(nQyC+Eu18(O@S^C(AK!1lvp#6c(x z_I4;U*MOdY%90-){N$!A4p6j3)G zolz0&n2})~vbkfWuC{)8D~sv()AxkOWbPJ!8ez{f^_IjtC!p8L@hHmk#;9LU-qUcS zUhDSKVF1M1)3*r9;vZO8G|kN?ic_;D5tEJJ2FHNe27c!+ObSoOL?AoH)<=mt8prRa zf>zf&TxC$%cijJVTxVcIv&^npHa{vkoeq(lcf^gF6l>AS?8?VaF1uDkSqmxx`}6<$Az_Rafy zPC`GiKJ1`OOBL-^_(n8{`+tfjL=K&W;sx^W!*y;4B{4u)?wx+Vx6SpwIYP5$+`4>W zW^cr_^?FBljq%^Q4O5B2_34u^NGiYmq~#b%9+VLi8ZuMy^N{J$ouH#noo^q<{d#xw zhwb!b!wAy4Uc2G>KlHeB<*ff*gL~z6j*cxq_Psut-VUz$7HS3gA!;n+ZnCX>Ew@U) zWweNDcGFWCjhjA}{IXX`%>lnM8m%hvk2|Nc><~oqB z8m{x!Ovwx>wDGJtJBYiXA5|Sha}vcvFnZ?z(V40w&DRS|zx|Bd$uh47YcBX3{cW-**vfkvR z7NmOBGkTT;n~Wt)=jOXgUO5>;<=$O0FacxPA4w%6uu%Nb{wE$2-)a7wNhl78!^7lv zUT%EHode+K0PV{}NypA|&T=_JbJk%!^}#SAZY(KllFxo#ie}p4{xvbrZ&##x`KYD4 zp<1UQ6S4ByfP0;L*}rnqhP|KYb0?6f5C;6y_Lt_Hk42}ZR&$6xOyafwc5Y4g9k9XQ zLaVIGwm)tLr9+IPO-UM;Eo@g{csGY#J$XY3XXni!9w&JKAF0#vYO@a?b%TVjeuOxJ$2G zpK{AYRzAn}i-=va7cHs&$iT4XvM#Q0hZXJ z4?)6$;SnCjm3O@yMScU_ffu3=N8@7l$8RAsdcKqprMYOBX+}e^rJ(2E=~)LIen~M& zw7#t$Y|hW#eZ)!z3U5&JAraj9uAbh;g+kXxk zrS)o_!TOos*N!NRWp#_x+9W*4(383D*DGkLppH5RIFrHU!Q217^s~He>P}3VXLEpL zhN86MQ0+IL3O%-qd7>mi+;FY)Kgq+TAI(vxuK-@USeERDW6nJQ6@n|rYCY!5Zwwl< zpT5@)16V{%Id=v6DdasJ$FFdnw3GM!$R`MLl>^{!?e(Gx$t$~%sF6tFw zAiz!l7XJ|=7rj0@9A6@GW4AEm=_DAnb7mZk*bA_4phXjkV%<)+_=|Ac#0E(WDx!DH zZ7|d+M=Z#nAC$>vjCxNV6AU==U@93pYnG_czx=1C;YQ{2oR5StPeQt*vDFgu7HEjD z4&6TM9}%B^m?eH^IjyJ5Zl?LB(LR^mQe~R49xxh`REoc8uw&&PpnI4;&ZXR0 z{lLkK_83C9W2Oe*Tf3g2+KK1bDD#o}`UIgT-Khn9D$rt0MODYLFi7=cgaDrh5t);& z>HMZN{CFm4apRf6Lva1suyf0HBCq#lv<>8xZg(aNtH~nUeQJ3%9iH^^lcepmQr9c( zXU5KLX}o~f+C_s3QkTyG+)%y)kN*MrKnA~OoMXv7SIzT6CBBE|R}YQK0G-vQ>Ji48 zLQknk=^T(v989se+;~; zjyL(aRLNUAjkIWWtv;}){MO}~ev-r{oW{j~<8TTCWwDaXPD$e*Iy_GtsL`<}mBY&0 zC!;Uk>3UAJeZ59C;Jkc(k-^D5XUl{+{!+((4dB} zmSU!B1(r73hIJnx<#y!pj1GG98^m#tbM^fH0Q#GqhaF1=YPt*)>K8QH_w<>hg<}>e z*JM)|V!;=9+Juvnfs#)rt*07UWl{>DvfwUT6#L8z6FP#0m6U8XqWi287uxxi?cg2d$K860`}=^1UTHsRzt zA}yob^Vig3(WSSi-P9ygB%g1lzof?`U`R@?<|BnAkMGpZcSftf=^e1CTM$}Wexx-U zI+RJLQ<=`$p^BuEFmHz}@(>6B`)~(2{kl}Z%+_dsAt9G1)5HzOv3B*z5H#J#EYf_k zm4u(}h?xHXt})h!at(7o;$ys@VIJxFB$nKYwIPZrrY|Ev$PNZhNXNf80Qn^2+^taBy*x@zzK3u?w>-xLk{=2mb)7-%MeJn@#S1+`Tl5B#{KS z2x}NDGZK@QP*jZYKMliSSK@fbM&rMT^=Cp@G@kO(HSg-sxR5GDra>&@Y&r3kQ?Z6J zP8jje$R|qqfy7tBK%Z;K~`-b>^>L&!E-jby;MMWynU`lN)0hBR)tO z9Q^e}@~m=gMQ1o-@M zgZuPfCHEDVc44y492P3@#1HBV#C&i;JfFu)^9P|F7Nku=HhqmHdlLl=R6n>6hT1%5 zoPVEBV2Thnx`rB%0u^eoU;$j2hBQpdtXmDVv0bWv zNhjbQIsN|t%c91hD#k*Sqyh$D5=PjY^OXnv(Ttpv^Z4p%nDtYgDu|n^yH6v_4%mS5 zK@LD-1`vVg2ge>c&mBK838S+KY3R~~Jc<@9Z_3BHf%x(9&&cPFpC^zMM#MF#h_AUx z)qPfQ*+9c+Q|ESb^PK+uO>*4h$eLN2mf~gmg@t`KY-A_uCpZc*6 zq(!=~cy^wibm6P(I!zzBooiPz*iy4fLG}U(0f#)C0l+6Db%FjVfk*`G%MTUknuAZO zQFoRYVZJsw5;;~-&&k>ufX)Y!ybOGt9;|$uY8^>&{6SV}W|9|Fw}7la-30#tiy$5k zj1oT|8S03)u>jGd41-M8r3uV&!ZJ`ELCNw!!9N`SM^$WX+~UIMM}tL3;XlkIq9Q)$ zNi)D7Bk}Nh96%eKHfT?)(9wNHj;yq-u)xHANIpE{&Hy9xo{uG6!;xE|BvjMYO2-G= zuH(x5AJ};K@(0IO`HDt_N3bW>No0B0tN08LxOvAI#((9~HPl=xN$v|JjiA8-$G|&7 z>Vl;MUKB8;2u;H zmN_Kz)I3ifk0O7hcW}d~PuSI&;Vlo}IvhvsBMqKTPBMAO!2ETjC_kA#U^et1R%g}V z17@2AiNGaeOA4h(zy*QNxRM4zz~jl|tjqI8%%LcMX@}CUZ4xh0PVtS)zXQi40rB8t zkIz>?RW}e4cM$3oY3cQ(hPqUzUo9zTR9NFBxyc~yB$9U!bJX)=0Bcgp*SIxXky?06 zb%eJ-+5lqFWBueH1>_v$c+NgJ7)sT*_c0t6HE}Gx&D(KnR{L!CMLPP+BSmKOTSAtfS1txp=YeQ25VRQ+-PIpDkMvHLA0yjf;B8>X*O-{{T$}=Rf;KIX}Nt z&*tOeK{4dvKP3Zrf}dTgVEw~rYZK05O}e7H+6QT`<|hiV2hv7u|3B7$rp#d$_|W zIRx-j6P|O9qPbPOI{ZsAS&H>)+T74Bb!GczEKFi@lFDo!72rENtT zJ#GPGQChQ&HeGC8)lhHrEXbaswljc01o6&s{+Z88Jb*ruWkv^K$%dfmuF+<%2B)fO zS|*_m8PrD5#XEQiCU6nl=Y&(A&qF7Wm;T`({{WBvA^Atpjhb1kR^L+6q!NJ1xx+FL z51tN5{A6{b=GuY`&QOh1Q7(Z))Sp#){=KcC7E`z}U)T;Y`woD_E1g-4xh!$&E4dZ@ zMzyNiJ%6*5x2DZ)C5Q$2J3tMQSXzyCM4sjjCq-?RHqC;QSLtf0QB#+yRL?@ zr+1%f==SSbGC`=764qU2GT9A03jUOgeZ0%Ph6gypjTrn8-~L6Me2NsNE}Qst?Ktb* zq1w8IKG@Q1L`1q`(XPJ%Cr z)%(_OrP_U|EJ_6%FhFg&<+iJs;6999ga()B-g+MGn-}DNSZd zw;u*f=Jw3letJ6ae_MIg@;E{Zs6ACqe|6%1+QM@l%hFM@kJW}9(YjP zuegFUz~t}%0F#U#QL<22?#82{QmQRYsG8`^L72=i34G`C!F@J;eS;o-i@SI*-i*uA>&UHyBsEimcwZs2M|h9XVorVST3@ zct0Zpsum14CKLk+YYAK=;>>od?h33}wig8C{0?$5e+R0P5ABm@-9coryq0*U79B$$ zP00#2oZ~!XVR^{#eh*6gM|&~W$kfZs;#^Hi$}_65G5bS)dC%>TKmFZHm%7wj`_1ay z)1JhTr=+eC5>_}2Q=Dh~KoAZI_~*w(NxgL&fFe3u6r_+$^2WuG1`bF$&N2_rBaC^+ zRmYgZ5;BH;J%ok^jbzTr0Lrhy{{R*Sc{%4J&srBTnVTJ(5TM2+O@*VKlx@NV8Sn;A z`JdmOyO8Qpbs#saixM)`fZ&287#ne%Z)1-b;~CCz`}Nr}vYkNSRlw6#nslB)46&6b z)H^n*2a-rWM~+A2eh*0gK|bNdjc6HE*Jkc$YR{CC{{V8E{7q_B_9E)`d){j@!=_xVZ%!`n>S{FQigwA$#6jAN$YKu%7*aAi zhCDt+*lm7gFBMTA+H_A=v@s;wo}QXgf~;b3EsZ{8uQX>r?BrqYZ`=d7{vAkpmes_?{WgiXREu`d^<$Q_PVttG zCOk5coxqTJI46uBKc1@j06$bzQ`C~Z4MHgy3Z$SR%z{m-q-P|KNBB<%;QVJDI%NAm zz}BI2$zD3rpqXkw7k*I{_YQJ$0R-UubLWpe2lCN$J;8-olLKoNV^IK_D@xNC+)cSd zl1bVK134HTen*}<&iVZgrK?vEKW+B=T1nSS)2b61ss)K4QpDwmj4lI{!1IiLq0dn& zyl#ccsCwP*2$gKCf|OI!)TFMARX)kYjD?hvz`0;X6aoJL?S4mGt9pQn7G&}26!d7d z8&m2K*M=}v1IGn4C5TH?dOhw^VFRk#y&j=T(vJrQ6Y4_3noXgbes+B>T|F?L!%`mMf+PYFhPb@1~wlUP~D6jhALUIc5Lmd1P2R}T4 zJegbzNXn~pWpcN3MP}L?LQN(;&sreStdYFCjvPxK<%}JK6lPpxmOcQ&o@0}{{(a4V zBz@s=Thv8#w{oMz?8PGy(>MblD|;Err~@In_{irSICcy5ZsKsGsV0jaqchC4`?Flt zh?V2nEl3g+;c`=AF69{Zu<*>o$l`c`C$s%$W>u{Sea&jNiBg?G zVYPAAUkk`{eC4y^P=1+Zj<%x9B)+M?5}re#-Nzo|k_bEu@Os>zS4JU9)YjW-lFR7U z(@HBc`mECce;Ysq@WKE*$5zFaIb``I;J8VK_kmQg((a5mEzR~h9zM>shwzS=DjGAt#ZXe~-$IVB9mNk9D1%PnzuNlThc;~?D7DX8c z7CMv!r_>_NC$?!NdNdO}D2}(fe(6{UvutWAF7Oj!oDFS|%ebtAmk@ z^YhjWxbvw40@(b^f`nUlGbL`|hfa38Lr9iBV;cwK%7NA+;FKJ3 zuYdd8&^(~zyXrjBJ5yJAUTe|mQC(O)5q70y^#b@Smn_5cgTTf)>iB*f`&QVHe=&Mk za&{Cj!%D89CcE79;+NE8<y5UxuoEE5GL^JeDHcQT2*b zS9oewIy{r^4fe|_*IlIY%7`#e_s3Mrj4pu^O4q+on4v8h63Jx9xf`S#SulTY56Ag+ zOmr7!BUh;!7xdL?MP-Hv{-Ts-1M`!}{{TL!!Ou`}p)_X>PR&A1Nu|mMDhYk03XEtjWp_;V|gj+ z`j~-)tGM32m#I-J z_WG3_Rbq)!%vPa6HvBQ%0w_>OW#D|{lhqXn6?Za_Y*b|)ms(VwLmJB(hfpkT5hy1F zD;yqo5Jw~9+tkhkZsiFiS%Ar=>5$5*mo*vPV%)LUI}eb!$QcA+V*n3<j_ z%`8dVqU9QvHT^$L)?Q^nTF#*iR-`0j3Wb%uLKis$CzJU-Lno1l9z3@!Ff~{jmP&A2 z5@95Q<#bT8W%s;C802Rez!~TE1mN|hd$M&iW8UK=tvz)?YJrMZ$Zf~c+}Y%XI6U#l z;OEcDsbvfGXvTK}FB<1=)C`krI(-eLoUFQpE1Z4_By9&7{{T*)bK^eY7~ilWJAL=r z`fQLvYq;nKmDO4}$(Mqmg9b%q!Bd_OBgcX|pX!heM;Ms;n1|{|XTPa64RY+1pdZwc zYfB(2l6|OD?<5AofLr#@T;5G?a(JfoBd7I~v@A+uuRgnOs?4c0qfYeJEy&CZJdD1= z6pi2!kU_>eLFZoR#(x^8(0x^k^>cSZ?Hyj`nH<6ir=HAlMm_slL~Jm8V19x@+yFT_ z>7UNXO@-IRPo*&r`81J!vI( zn;teBGUW!b-ZRIGeYK(1NOv(Z*p@~-s9nTHHn8^+3Y-uyPdL~zX65x%&J!Bw8By+i zC+atJX|?p|wMqM3R>(6=q3RP;wqoG063CJ$81*;wSLF{Tlt4?Y9cC^%HB?yQfnzL8ji&dummgy?JAS=9YN2@F4+!5)J9g zsX^|*{Uk@oM|&jI%Hb%zi=5B5dq3+RYwC8VrS(@&?@IH}B=S_&^x&+UpOA(o3%Czz zv!MMrImpHgUkk~M@>l+ZT-le*Mg!@iI-QQ;?)tG)r(dz-om$fgq=!?5Vn~;Uos@vs zCkJi|kTOR-dJNt`6ry86jP_>}?)^{eN!DT3G!Lc!08ZYLv6Io(ZX}j0$RBRxvb)k8 zs8-xmZO+g$xb^2`$K>;{025+n!d8JWTiw^+aLar}+kLnoK@-ujR=keuv|y^l$6=Ey z7!agj0fNBdm78S$0D%;;^bh%*-&yaEsg=!BF0rdq(vf#nsYzKOjNlLHYy`ThVVC?l z@#l{{u;jm}nRI2U?h&H@01zKgwC$1K)Gc>dja_Dn)k6YhNCjDb)7G`^c`#Jc2dzHQlZdj+#2!ge$0Uy(Zo;{=p>Ahzo!w4l zss8{JA4;pu9X%UbzuX07WOu2g+E}EM3>g*PH7?mn7hnM75uEk0{4B+QS`#?Rm-mRO ze-V^5Smm2T`fuGg>P0+ITqx)(2@7oq?230xSnLs~cV=OQAg<&;4x>)Ob9o>bkGOui z{d3W5>DB1{7VKRgNuEgJr5>TIyps}8m1YqIjZQ&3Lls5KGlk2GH;Ie>>Zpz7l6sU4 zNBFM#sY=+>sr@PKDC3Sug`C>C95mzfQhni+dU*7kd1A^!?rdQ){CpHGVW3|ug`zhq zo!R_#caLdLNiO{vi(P4BGOU#By>TmSiG*az2vdoXwG@x()VqE?II3hHRaMv_UmO#kK01zH35V@Vgn3HFu66s{^({T2 zrs4|E81$v~N+t{kDp-_no_{`nJx9nqmybeM9=Z`NO*>4MFSLVBzX|kI3pH^AJ~5y6 zB}gQ5?Zz|btOykS1#Erg0czx?1yiw`O_k@6H<~K47})T!+Q6#!Oj>ohG}t?P zT&$qWn>J-kETH&MUAK=U{s9~gqP}Ax92ZC2=fEo1!7cYiM~WrZG+kDusopr>(xoX5 zUx41Ks;>h%Cjb-p9aAoPA6l3OI~pQSJ-uF9V@lF>YV%7ZUXsM`hrs^;6k&4G5D%Pt zPdxPblaL4v547xbHbqqB+V67xEbaY5dAsw!D)xul1&SCugIup!)T#i?$Yl=9eZ-kL zJBh%~dWJs{7!#89?&IZlZ>2h%_UG{x{44hxO`}KrRQ)xglQ>fy4)di}Nf?9eNfC)< z8Nu9kleYx$dX_}O+HHR$0XY=tPeyjOp`~lOO=x>xy7cbzh9Ih~Li872LAN7xk+EIJ z^YQ2L*RSK_L5M20=3Jx}`lEV%vkViQ2cvWa&o5ctln#U$E}Pi3FJa{{T{q z5y1Zd=dN4|7Per^la}hFA|!yq$hNGxae*u39P@%nG&+IVqgvE8eQJ>*gqal_Wq}#wZ6}^bkIzwc)`wBk zmCVUvc&fa$^y#OQU;-m5sXX}Hc<8q=itBQ$pqd6n4cl|LgvVVYEAC$;AB>#;0G2WF z(8rS?05*)xlN}iY&!#I8Wi1JhB&Z}e?m8zZ_Z#Jyt_tzTb!B+tGA{f|PSLvnjFIQY zFnAgH9Q0oqi+~@zJkqG|BP@wDhb4Xg0I2iE)1TX)+n~rjO7RD{Y$B__uReCERGZU- z2Ow@^yMh4ZpY8L|Mf=9vjT1U&BSTi!<9g9H(`8T^Bh*C2mVh@e7ay=5hh+5}u;Hhrf9{YS^`_#ES&l%<=f z15u;VlC=#o@`t-#RGrvi7u-o=Irgv&NhcpZM;!*losm+3{T`%?3s-xGVu#K_ccE}q7hL0aA2Z%nTd!CB0tGK1~NCoGYZoM#KkJPh@X zvW03UGLm``cBS4LZ8)3kD8F&YNQyY?M&>&Jjy5|F)5pgo{{XL1&zON~EXyV`N7QP0 z`?;U$uX0{}b3)Tu}t+PSV2{ia2mn_&yDGT3b&qm^GA)Dk1nRcmlCfL z8eG%0_PL;@uiYKg(Nbl3po>e)QXpT|y$d1|f2ggvFx&%hdeQP3jhoa#lrQb-G|{^` z^%DO8f3No@whMZZHlausjzYwk{YYB{R3O^SP6<*=6OOFDDuGpUO^}AME-BdBHDs0t z?aeu)Vz!?UNXA3Q8&8(bbA=v#*)7$^Ai7aGbDoT}CW}_Jgr21>Rq(Cvf|7?AIlv_0 z3Gxu3+m3O^PEnNg5smg_GQ%pvXZgm1r_-*JNOAY8ERt?f%Pgs}cMZghfsS%W9XSq1 z)FR|{AGF5uE%l{5$)Lw%k-dK9m@9%Z$&k5GkUzq?#{h$pqk5df*@I0NcI*}#L+y&l zp>{_hm8C?$4gFOl5;Dhb0LB0~1oT@DPf~4>N8TiRpHiK6OG~u0DYNczJKB|%7a#`o z)d0v)08@&e@yQW*7p*weBYoel$YX!n(Esw61!kf#Z+JifAEGswxlx8(_k78hvxU&`7FdKH{QHwd> zD~eqYLZ#P8)w|ln(Y~psOQ+I~M?llUBbITvZD6QmSjuG-rzOKN_LT(R7taUk>Lx;! zL%8C%dQgg5>or|$#=yHy+EauFKEb&%MxY`b9X(_3@yW6vTAz9LR*7a9^OaQ!(Q1-uRq2lH?i;r(dJu}5W5R5*l^9Q4 zf_=;s9ix!mcD5OQ5u7zRsH4kM(45m){yc%U>Da$VyNkVNh5b+e0NG^DnZab)5%&J3 z41J7<&JNA4Zr&3w(~-3Y>IolArGNZee-r)BEvsFR^ooXo+X5t-J36m!*;X)>JHu|U zz%nd**+TD8ums;AMX301js@~EM=N(3FVvsoTe_-RdU0qvHC?EItZDGZkvLXn5}p_` zt3t(aKdS2pVp2&D9C#OSu2<>>Ezjcr09U^iskOaM2$^Fnj#l>Kwm=-q&*NUS#XFquEvGe?&GoIpE) z>@z%Qssfeia(Ny%d?m4b!f05NYnq4G9@x9vANlj^&cE8y+lt+~kXf+Pr=wX(dE~PM zZ;2x^RZyi=$kR3kz1F5pgE=K!-jfy6Py!l=fkR=suTXStPyR{mZV^ssQxovd?fiW?+o_ zlhEH9Mp_ zjjY235d?qE8Gvnw#z$oY$LVBpuIkRNBSF+h#JM*XGOOVlCQ$n;>h`1D8ns^C?@9Z6 zU(+Uer@WHZg<^JC$u7(4M(&Cum)_OM2vvf!*itoQ@(>Q|`pkIoa&8^}07=&10GP*UZVIUvxfUW)GR5`)a#@&0?a=st#giechs*d;L&S=n ze)^5{%SP_6s*!f5Zs`5J+d6A$U;h9w)E`QcdisTlE7)lOK%y|}vak{>stFOYrGZuU z6U+#j?s?7P{ri=^`2Gtl&)V9Squ71ZCCyqJLU|yT7M2(sP3wPI@(4bhxgaSY(TNTY zPgcR_*|KV87zq8saUBD{zPEo5v+H_|y??x`L0VT5RI8;%G^U{{6$dukR$DVcU$gS{qs>{TX5;!w|`2WoS{sO z5owu^rV3%t=^oN>9Ch;EUw}gu0P+6--*eT$<|>P6=pA>x^BKT18@6u*$Vwc5r*TXkJMsz0e_2w?Rs^C~`n5w{DX8gPtMx0nmR9s( zGig99TVx8#3lvpg!yMs`4F~B7)7SKho?s_wsmE;p02*J!Z+%qICiUp7w0h)69h{NE-_z)?vc?owf%Y2PY*Iz7f7q0a1UP7*)$z?s(#e z?+7&tzMii;#>JTWCH^wo^3d9eb zWclj0dVx;0D_ZAx>7BTZCaBXug!qmP?0Fs(dzk+KV0y*md50M4Suiu`MAbX8jr#`f zo2_edR$-N@wX$Rc`4N%7AdlN0zg~KIVb35<#yA;8A5slhxHWrHOQ%w8GCQC&u}5ZO z45J^Un3cCO`R60?jygHKmv#$aAIz$#t;qDPQ&+yz!*jRVg9lXmyHi6NStsT-Mr;lT z$UKaE^i}ec^^fave=HWEyOw2^vVfN2s`D?mF-y5|@q&5k%0Lyb{r;01ozH?4kpBQi zyug(sbC6q(0XRHh6O*4f>(a?}sVru?lf)yH2n6h3)D|&9f?<%iR)ozi*qgdvm!v-rLI@B9@RUnP%YnCyy}3QLe-+v<-^ zvyL(w$^d;cf)akAMsckp>5tZq_tblfxc>l%4*hEjsZyUyrP$q_t=!W4dqbxvWpg!{ z9WCls{{X1ju_Li#oQNcN-&qV_NGQQjIKW>(YU zp`{ISqvVt}+aEXU|Yc2L=5v9c_qVZmX) zt!ajri8nK5bz@*u?X}{)9P>#&ulJVb-0fXsuj!N{2tp$aFN~7gh&*S(bQPdtG1j9B zcU;RKr>|DMP%{!`GDkFRfZG;TBy9&HXi}hIMu3mB2f2axhC{867o}G-1X*@R3@-m&l(n%EH05;%oNy7}|I3K@9g_}Et zpY0@?wGXke9!2Ylc06gsxHLq~| z!&{JvF9&-PX_T5d*Kew}$GNu>KrI*;^uZ!ReBcquB88v!_axzKeM;V`-~F){sBZT+ zZPs)usLQHXl)2r2OlVo7!791roxuBf9X2l;G0<3pIv$MT9qaX<==3)#Q`YbD_FBPKTW3ZmvUEEv0IZb9Vn(|`VdE1N6+fB1v`qL{sp?I^l;@gwxVvoBcD zyE?zr9_7?EIis?bbscG%Re4rcSr_gLOX!t~L+w@y0_1*>G~xdMOg=N{e{&8`ij0E0 zAKEAGPvWOvr*_SK1L@w0PQ+>J%4}*9&LNFsUD6nn(`P6jV*)_TfmE@{75@OGBDhj+ zXZ>RVInJm2LVpw$qSke&J9F*y=qogFjXhxz!eb-Vk*2Zik|@blZIdUza~5=u9WRlG z7mfAR{{UHR4sz*xg(jW#`@LwkvE4o6saer(*jA#CsU%4RERD8BDoQGVRn|?LnHl8^ z?NsYQ_>YZ2(0_j7whX+unO?Wsn!jXN?j0stw)I+a2!hXMS%3AAO&_WyHW$=*6vZ1Z z6iQ=>^2Hukajy@=OYLzvD~oU zvvR(xM_7-x%nfZ7k*Q9}86-`zGb=%}F(SwMVO2e1&aZTW?p%2E^hD^-{Av9k`i-Yq z)4ra4Z>yzuDo%awFD^wNMIBK!6GML(&r_{Ld!5nguS$#Ppe8%`5r;o|D zs7?MPGujK~QsaN>r_;Xi?RtH$-sQ3p|UqIMMO2Lh%4lfwhs- zZ(DG9IZ%>C34a;tx1}jpisWfS6%#uYAl1XIr@wG#| zfN;L$RU2{UMGeJPD*30>^EQ;C}E*4|!bAMLc{o$35K_FvU4Gg0j8{mDlD zvt<_T54Y}Fp=e1`kw~&j8BLC|`P$NIq zhB8DbMpq}VFU;|Li~{=~gFPPz;mGz48Pq!m_=xS@TFi9fo(W)b$q_l391s)wL^zZw z0W-9p)h2&f>&EduEycc}zy0++FEhlU$eW&t(>|`~S{0phxNE~_TGb>(Nu{oWcf08` zK?1%u&e5w+7wHG}JK=zGKBva`k1v1TcwR?_pz|}lc2!>F);^#waq1G@r=;kqCY`3& zlF1eA&S6xV$yo8~uJG15yi5!bs(p{& zI)xVE%C1%{-W{@W7CKVB@cl z@_fW5r_%VoCixL_&+`3Py{+o{-)>vh&atLfB55h=WhD^UE4vtU&*?9g45Tn4CkL+` z{MS}2u1DhTZEp!5OrPRdHIL#eRqdYAsdu(!)-6F4QA4Nc`d!Gy_5bXCir$WJ!*Ll$98Jz-kSES-(BD7!An+x1#Ln`l6bdD!w&2U z!~=-QVV2w+0

*r{f+{{Tzt?vCqRXk)i#B^d`05_j{|fwP$@Vwly;PWvHvwqi!(* z*C~%>b0V%iHX|i@;XqYEmBsN)*F=x+2>4S_AMqo{Wz^^z(x%q~Z6 zG?ea8$fbZdbZG=-myO%w1KHar zCWuPs0w{!Pa&QzZXrSdt3f@MH9~ZRLT)rR#-lyna@LAja%k(R@Z};Uo5VpCgkxuOM zW?7-C4n#%K+YE3ErgsDFKcEh~!ps{Xb~DHLh88VGezf*C)z|L37AWc!A)$IKEGd#! zXs`*CU|}*z!sjFb*SUk@!=KB24`2TPW)|^Uxed;=y6S&MHHGH|#8Jr~{{TgZ=XW_c z$C00(!Ru@JoK029DieB!&n)vg`Yr*^Ax6qbv5bI(nr^hND`B zuU*RpO9-x^2*Ch=c>MnWpFLywOu}5`;x#CgzMR+XPh57ki`zN{Cn86VtyrP;TNopH z#gYF2-MF4lf!0qW;qDEk2;(xJrMs4B`*s>jPTgl(zTm}5*0oNxCKuGC71z%a6FQ7x2P6?>IXKr ztiwD}CWEDKVLOcD_bv#^FJ+b4p#K-a2$~Rk8IeQr4ks z4)KfnWoa5<4-tvVk+m`iU;}ahCmf7^8z|x!!g49?VA1w(dtQI=BV%X#`V52z5#!h;UOb**7Q5|aPLk^9uH1f{}eNS$@ zrP)?(G6@p15JH&PFDKjy2aE%QlYutz^`dSuWxFc{k4^8%Dn@Jfj+bOb8jn&|0 z1zUmu+l(3hjfTsZ-#LAVOsn+;l2~f@L9YP}OqG!`M&d|jE1muT#{(n#^tpU|hqg0@ zOnuasfVdzsZUbreV2!;M!=9+Lpg4%CiMxa9AAa`lt8-Aj zpnZ&XB_5d5w;Km(+vzs$2r2^~a03U%I@j?301wH}lw(%Zo_yFbEwG$+f2_Yzbrq1O za9W9w<^_b;ioiG^TXeH7M}Aen8&URfoWtd*KbrH(@zM5M${R23dW5I|gM@ldE)4&!C; znZ0T}`gio7_;=Fj>$=BkwOwmbn3(L|TL#67q~bisAON;T?ZB1~_cv|oI%QoaPOH9muM{@YJ5e_5q0D@Z;XPN{K?esp1fIO)_<_iO z-4oEu@X?J!WA1ow_=@hosQt_I=e0C`-tNj81ln(N>6a(c+F!SeR@0JD@#*Ge*(^Xu zR;#kY%&e=%;CA$<3ued@7`u;w*B{lFQknIKyuPMd?Ruh(t9_wbI@^>-2c;ybpVGt0 znb`vdQo8|M05aWu=fnOFiwX-iXMyH?NnQ2S{J8Ia&s|oTzh25y*W|n*f=P^M^4!dh z%CER?%x@zSa-#)($308T$e7NcT&B0w^7nDJqp5%9EfNY7#aXRIeuQeTHGRz@vn&a? zG{F``SA-blXMTNBvJSjg_>m}h&qp7QKXBCauBF-i!`sy#y1Sb~)G2l*i`$iN%#z8y zZjlqu6}2j=l4a~fsVFK-N;4Fu%PRSY;>pNa+)ho*BOd3KbS~%C`?gb}y^HDGeZz3w z)aQ;+e%g#IP)5we7mxb#%2qag>MO1oMltIr#8$qeHa->Vr|8e{?c80-r&Q8qw;Wm@ zZ)zx&C~5Osu_W51c;F2!n(C)(1Ch2!z>Xmr?m67Y(88wY6=VwEt*Y%1qE8gtOUu69s?GEp)sN9>sQ@OO83X(x2(tX5Qg+1zH^{Y1j07-S<5oAK}`Zlj-7j9+%vDoZ8&?C{q^kP*aKu@H1d2^%rQ}ovIaz zk7P^lefvk9lP%=YoAG$WspO+mSw5`y>$SZPS2}-j_XHZh^3fz-oobB$0Afn%HJ01y zylh(t#F5}~Jx5mIN_Y90aBu|uI+cI?iKX^n`yDGyift77LTD&3tktB95~iw+vPO;x zLJK}oR2&W)!SeCvp+jT2>v*e;mwKHCQPt%7m89D3XmrW1(`n&*S*Sns3LG-omB=6g z>Fr?32_b@#PCC(t%gV--L7T=!Ug35v(E4lIGexO)4!PbPu{@|2bEP8&u86N7^p)M_ zFSbV;e?qGePB6MWjL(l9j>p<&!hroPIKO9iC-B7_y8fOtPVdwtouaL+C7nU6eUmaa z$z^C_2vPE=3rInCIRKN^bMah7fh!q2Oo-O4ITRnm@9?K{U9+wB=h2hfdU2L(Cf9B2 zdX-4;NUI@lQYUwX*lo#FRcwL*DYSKs@nOt)alg0U`p-o?yvy5vdFI-mLwldKyK7VR zKCX86d}(oCHX>PTx+QAm-Z*2i8_HcRH>ixL6JU18-IdRklf?ckZ9-@sNFs{QbA1W= z?+1L)QD0VS`khH;f=PW{$d&-uWH9JgA&ALWoh$aW%melP;W^+uJU z>2&l6H0f=<9R&%qZL8TZSk){$KKQ^~n33K>%?w`M=o172$Ljoioc6mj!F37yr?j13toe`Sce`h_YNo|0^KlPx(`&mQfwxT&@45#aH z;cfwlo^D(g)q0ga(faSHeJbxtn)=)mQj+4&c8b}`U8r&QD2vBi`kD0>w{7ZD$sAfPi()fUuvaLMM(Ge9l9d?G zAQnvI=dZ{zc=)n?V}7cA;`zDgC^ReWD;56$@bdm0X;_Zbx{q{eQL;R9Mf}FZCdZt! z2AI1Q+;h(xusT!Vc&D*Z+v0veTIK%$zuI@2pTZy08$GcuiCP_M?K@DcY%%usVgSZ> zj43gaVqLBRocTE$^vl1hP_KJZ`kv;fi6ME` zzLHGHJ?;z<$0V6x4mWKCE;lZBSv*WRM;2f1L{HUbU8$UM&;A%+O4C=;-%jpdsLmz!aByiIvNcswLyHG*6~|?HwNC z(P7f9T!~t@-9=$Zi371+uL(tE1e1fDd>;p`STkYG^$-b}Gb7_XM^n&RewnM+NG6`X znLVHkQp*KraLb=*EW}BS5HXx&{>MJ1HgIVF0L00aLHx^O-IBRj^$6N5w%FIx%Vhrm zgyW3y@&E&^pRFKnPxQj57?wNLnrc@x?MnXu+SoEj40TGh7LIw5MhIfNNb)cN`8{U+ zRWKl#$HyR1jCFfG!4-I)N3)_w30BO}4b?T$xDkILmi#2GP>)QM!0sPrvW4^hh%ZRLGVe@GY# zamRz?@;c|tLw+V>-lT>~b79OviN-xPkwO0a93St|e2B*Q{{Y01z#@v(BCB0VSB)2c zy~iv8&$q`J1mJUmI?@$X(SdVcxKc7INequ1a{7A+RVl_e7|HlPGI{>oAYpVMzzEcy zCPhYY^CWSc=1hYNDucDM08^a)e{=FENE$NjwWfrN5LcCB4;kEmqsK3-xX+Rl5u6k5 z#~+e9C+X{Q3`qb~Y)Mu>b}mvF6v1m6$tSyz@Z45At$C9PoGd0IN)=) z1D<^3Mc34F!0Hw1M*)>Ul*Y`gyUy*nE&3FLftDB?A8!N3cw;@q%ES@&l94QT#I|L* zX&AF4Mocgu<2hhJCkG40KWuQ0jo6ZMDq=;Nb%j842gAZ3JjQgC9_tq0%yy|wJ z{{W}|08p${6QpgcN2E^l>xm9gnnFT_8w#AMU_mDUWMCd!D{$leW}$&dCignckQ^CQ zT-;QGvdTu_#0-Vr3CRF?1K@t!`D_`04kuIdtN6wGk84NQD${tRzO-o9^rAo0c9Lci zj0`ysLhN96_4JgqHipDgkQviE zLl7NQ2^-X6FgYwfojk=ojZCBA#0yhB8}wKBqxwyy>npG7y55Q|Tch^2V>6|EA4p6# zyT*v~fZ5t#d0;^#07sGWJS&w?2BZ0x!V0kFCvwyO0ElkfpGlGi?|lyBGDyo((q04Z zA<6W}KC3xC)+CbLmKgTr{0E)jc_V7KUupDS7EBdH{{VP~-`83VLfkQ5j>OsHWNPbt*%{7*V1jBQ`Y%VPLuJN?4{0GOPsy1$5y>6S|8N$sxU zeL%Tbktt0ap^FddEZa*)0chKE6+((ZBgoC;;rNDKt|qU&fAiGWmoJZuQ)CzXcJ5xibpHTM`T1++(l6x9E%AR8 z4oG<)#7C_CeeLZ7O?%o*^7qcU94$GwEOJ7LyLmDte|M+>ft0$23IJvxGh^@m3E<%H zlqfup-h8LUy!_egj)bFB`n5N7SeCDKUzPzcA)u};&m2k}nn_@Vvn)-7a>FDnZM4e3 zqigg0wOo9Z?s@Bt*}yxMN$=${*U0lsPdd+3T#gU;=U4i>GLl~ z^Dj_M^uPFP(zH!FjrhBt>V5N5Ow{|1)KN-C%} zJX~hGpCtbP?yx7R&$K_P-OGPU?+t!V$-Cd(m7xef^CS||Ll%o==5&YEyzscFBzWw& zPB#^0K`7nnGkLg>2FY8LUm2Z>uc_N~kE}mIyTZ1UYhCU;(W_UI{-IJ?>&GaOA&O^c ze&b-2Vpvrsc2dGU(eDZaYi!E;BT`6=lh~g~e~6^)Z{@weH+O3{`weZ$I_WyZ(X6o` z^p)FBkU=b{!6%A96yeD})I%bl7tHuFIp(INv3Y!2ns@C>y?buscDL0E-N~c&Jes@> zet9leu`MlPsLD+xS>uZq9tV)b(d~?73jywppX2f?QCDpC zpVZA;S=BA~mv76VQ?EFsPKCKl>wTR=dbx#)q$H^Wa|0$A{9w(-Yn2($3KQ2lCW~K7 z&~&ZZMfwtmp2U;G4#=mGR|?V;R7T2-5j3YEu?laEu)ny=BFQtj&6=V|twTE3lHpV*xq zCUm1MNTzMC84jRMzOlrPSy->>1-EsHa67jp+_P~VwFyzWo&DMtv>hw9ChbXW)4w%K z6(rHV*1T%%8Hlvo-4Mnk1jqvsA_RMKUyJg}-65YKi-WtO_P^wvgHpKCEyI3(W>`YqVont?ym z1fHiz?;rd?;nyhlB=tL%uVzVRs1>ws-_qVm?QZl$ZV8#8{=knY2_QYGcz+~ z$Si%N{SGW4YwRbM0ApmzE#IqJeT$IVmPfrAc?^Gik0Fi0$EUtbAf0_pTy(%Vq2+n-BxsWn+rTQOK^S?jd2XZo^$%+6Ts zT&l4E6&MP7*1)OyRv=WZoyuz3`k_x+)L&5geOI{Xyg*h>ItozCs*&KtS=}XXd%nj1 z01}A+qHE%v+A^F(2CQN-}1{3ZXH5gvW1A$XjVHU`;;hb zraWMtlP{T#Tv#Lf$H#yXQt4j28s*JS{<(JMiFWD;W7F;JniZ4NB^&J=F^P!A;gt=r z4Y!qrRgL{LONWT5~m3)!}&<7PDcItHcgB4Xe3_;W9jYbrQ@44aX{n zaw|Rm0NS-@lD4I%YJYZzYZQ;y3_b{N?T$V=(w;y9nRhe0@1Y*no}}|5!Kt;WrDk}Q zH;43)V}fu98Q-`b2Gft8x8%qhxqxE^Y=){l6@{8nvtc29{>h&i+Nzvn<96bB86HkL z;lyb7C^4I*Gk>_OO?QloOu%}(m59Mmxy~?2-I0(mJ~NK1$!~F#6a1ktfZ`ZZ6}K5X z99JL~J4)|dE^=6&ehxl5ZF-7w4pla+)vD#GR5nI;Gl~6KCFh4E@^DIita%6LsSK>7 zgVb}rG`LdlWsH7+`5}2Eh94t$jCDfrs@Cp747OoO z3PBFs7KF z{0KXji)~h+Y*cyTW=+Oe%yn222qV~bj2w~f{Qm%cn}Wp(0l1!9vi+x`+P#RE5)~zk zOkAIny}3BaB>entJwcbzj62Y|`KSD1_HF*~?ta;WLAOSX7c^^zNvzE`sWzbmk(jo} zf{nTfEFJ84F(QcB8Ob<4ATU4THctldRq1O?!MZ)q(`L*noHkzn5xeb zqsYc|4nT>$FH`-LvTl(h1JfXTtMQa?b1LGc-*c2}-Kv^>JJ$88w|i4hNMb4c*1Jb7 zYU~NGC79dPD52fdQHyMi6Qq(HWP_8=#t(8~nEfMWd9P8SdqvhBTF_b0ELH}ju-*(F z0-;=2nZQ7&KKNmW1Y`ooCA!y(&J+MK*AFpFHxH@YUF|DPS^KhC5_rL9zj=hn$jp67 zb16tI6@{bwMd8%sg2#u=<^>eTxnYDaVs#rG<*d=TuutFJ#Sk>(*&184vDYq+<|TOM zefQntDzqd9HNbBxp0PKQ0MVHSE-%O|b0cY=j`7s zOg1*fx>BcL=WZ7RtK##9fj2ZhApNZv`Z_w54R=o$^pUD}as$v2s=sL`CeyZ(iL4PBiK zdT(-Ty}R8Mp#FqaG@3Q+ED)AOVw27U5EH!RhD(P8DZtOr{5$C z)ONzjtk*es?ObBUDJOq%laC7PrlBzPOG!HA5 zoN7h%g&WBo2cy9T`;?pHP8mQ)*iM>lUwCntH4jS>yYbhB;Nwu(ZWiRgOp96jIExG!iT^ zvob$ZY^*)NxcHkP}IslJ-_bzLX_04jFpd>M2q+U@xCm|~R!Pihc0&*^P)MmI?xshy|N zM_iU8uM6WJ8pn?vF79aXyhnF0Q}z$(U1+-lwbQjUuc$P1D5@A?(zOULMLU|&soe`B zAy5g6)MJvxLHB@mpU=u;QdB(yy>~suXXq`@PH5jxG@0sX6e?*ct>_Rt-JwfR^{+#v z*~r{#+4T&Sf}N~@291>M;lECtULx_(vHm}w^PKrve{|_OXJY7e=tgu?t#WJp9urP{7I5~{51A^ay`f>BsPa?E? zk_-oM*|fmzx1gHSt^J~NH5lpR;JG+jo}vnHvj)|8+Q zvAk@hzd4c7zJ5pfo~&}DYd)%Vnwo1#>AbaLX$mj(3Wab35~Ko|3^t7CIU~jfI`$I{ z^e2$H2@+MIIkNIwo~@>AB#|^pgu@2p;QN5v?mxbAafUdRM^RXiYB5QpS;&^wY`c21 z?P7_%f;TAf@(ETW=N#jp0QVgju;L~jtE6uwg_7uEcx}?gyq{ptAL2OA0|Oim5Q;Z0 zmtRyKO(j-ZZyf&sXB;PJiY8bC?n1aJ85#Dm<0KLn@H#FzcRCPYp1L%0nvulpsO3y? zsoj7Tqabc92Ltst9Qh!wx}0*>r1fbUCF7DvwHQi)A@->#r9jF3Do30JzyrYKw-67g z%UYwrEZGr3 z>3}jB1`h8yJow`r=dJ>*nZ;LC8AS{>OsNB;=wd#Z2V{xPd0aB!dx+F zap!@~a6!QU^)+aLKC6rEzPIa^NDU<+gUG<(OV0){34CN>bI8ZIbA)_<#VbT^R35&p zkouP9uWDvM2`Fi#hi(SY#~I3=2P5(DW0AQXnZ*A9+bKHa;;Y)PHBVN=l1B{ARR~dm zlb>$(ZhuMOXMxn(x)H;khNqBzvUaC)cP%%Pg-gAysMgKKE?3sC>ArWf;6$j~MtCfN z*C*Y_ljUMLH#hj+Qi28Ucy|8)e|KMNQMx423M8CkMDHfbae z=*`CH-H)-9pD!Y^AH4Jmb<$B-n!SriYiq54cZR(V(q-_e$%p$L()+@Q) zqh(c|WA4~@W(d5l()L$=h6?(#^{yp!&7@-hTD19KDOX23EK!*j7a_9DF->PD@k zTkOqKu)8O;^cxixC}*`&lmN@N7ml2@gfWM#>s+*qk<98!mjJ%(YRLXMwQvo}W5V_> z%FHH|_F(b|H$go@x-O6Z7R4J7Lvh_UWqBg>yqsUyVlJUKIa-&Hn4kEr`pc>7vPo}P?XKN;T1h9C4IU_pgg@72a;#XwhyyZi zW_L~Cd%E<0OXH&01NzA2S1M@i%U5+-?tiGuqUfF9s%jLk>Y~+()#hl!Y4HzB?q!lj z!bcQk)-B2EyGqO((h3h=dH9GzN{}jNRcZv8$nX6L`kSR|cWddropQ`k+bU&|Jy_+P zUEP>T8y1qH$dJIcBg`2~Cm2^fP5%JYIP^3Ll|rQbJC>f=`bkIV{-3OOzkh4G7NOkG z>HD<$wk@JT9%D$t*m#E0GZL_9S$Q%mD`02k{ww~WY@b4#6Vk=tyN*Oq6T+XwO#c8; zyPLH~YIc8b>M_G2%dzzt#4@}poAVMA{Xn6&Z3F^8NI5-c@GysdStqrR8e>fz&r|;Z z1U=~wX?+yX{+oSXr=-u_nhaKSx}#FB_YF3iERp&#%96LIEG9u5Y_bCw5yH3wC$B@! z$^o&VG2z3^l4quSzqNZ0>Ry2!hflVTh%IN zkXHeLbJ%_=acH)rTf{f?&noNx0K*jee#rZ7jSV+K$gwn5Zb_)tGQ}&kd07lh(^^w6 zqSaMojbU}2&Nrfacz!gsN_8_nJ_Ow0$LP;?=rLQ9SM2`l(d(a;Oh8KOznKG+&D%W%d`<)w6(Jg04o))nLT4*giG~5Q7 zT3)mfH1gG$yZ!94?T#{W;0~_7Q2sGF4URIitkJWmyVsvl(=Olsa2Bh2zkdp9ZijE6 zjV%8F%vR!&x^`blBpea$2AKSj%E{EOH3~SHSM}o2^w{NU@YmDJP3*E<2lU+f^rn4~ zeg30a)HGo~qaNUZoF25No5`qkBb&ss8B7{oTIKsUE8Nvq#AxtMcFSBdn5RGg0EwyE z+dTfYk+oFRjBQ+U7ml0tC)udw2NDFkS6d#k(f&^$)!N+98go)xwhp2|dFt$1Oe|WR z2$mQ_dT?iRG4a&@09HVRR5e{l4@X@taA4X)K6~hlV))HA8h+L9==+ zYZjqojm)YL0qrJh6b*%YxF5)f#JMJ;<>GfSpQ>{LcC6Qif4Dd@#Ae=yoV-A8Tk-cb9WrV9B-v_N_ z#{l#Pm&f**F^Yw*<-EZqpLj5l$GPSVx&Q!%NeMzK1wM+_dVY2%JLP*{MV3Q)l|$*cN;M#88Bm{|@S z{{Rz36ig!lEyPbq_D9ez=AKm1JB@U$16`UM5>HY0w=aUf_(t9s=(T&m)TP3`n-DQ-)XwFxTLG6)>0V51W4{I+w@w^ z_>{L%VWv%ekM+a<=+Tbixc4G&yk4(XZ%f=kPUS%IwxuNS76w!OJ38p_;f8}QAg9HjbpR? zw)%!=G2C^MW%SF+swB#+as^`JEz&X!e_0wm%O-O66wg!Wt9Q1vO&u0a=) zOf)tRq54&3f(2B9V3sqzgoa?A-*>qjF#_@3e}C~OT#wb9%(djn+PabX(6sH}@DYhnkUHb~{;J2T_Q zJp2ad`woI;2T|J?8!}66BSPj0rj^3)+a#cQ@TWMy&mKqoI<1D8*-yATSEX_~%cxS1 zP>g$mdS$SG0fsVj@_ENxxJOlS{&DnY$)&=ZefHuJv2CG?h4|a<&IdmpJ`bL__}A1? za;Vfk~oQu{+5;pK%~vip;yos8>1b|GOgVlqk2 z2^q;gIQ_cJeiEaj6ZLC43)zh-MP@5?tjy%&{{T`6;NatN$iO6@!N)y%86Q>-=2Qmi zPPQvVs%D-VF^Gz@$u1RuDhD|LoackbIr+z3izq$9bZ$tt1N+`M=7`3`e@I~MY-byC ze13U9+dUJI4W8u_7y`tQ+_`26E4sx3dz6Nq93yeYHjueEI5-0!^Y9ljZsQC4G!w0!7OlV75uHy0ovlh>+S%!E8i}e%*4;f(VFCmCK zKn&Zqfx!cgayeBwjc#)x)kTL~y8=n}=8D7#NMCb1$?hKY2Oy|Y?YJHR2ORO2CQ~#b zlNLi}M2f}vsmpeEAz+FzwpcJp!6$>CYZuQ0A#sd!pE!|Ht4pEoJEUQ-)zV0zvn%X) zy;*-ll-fwZ7{)l-OLOBHBAD1Lo<-v!Z68ae1ohUvZ5lGN7O<)c?B{~{zyR$f$x)s& zj;g6*ghX9gGsr(u{{V>J;mfh0*Xh{qtt-AehKA#Ewuq71ma!S$U5u)VGRwG)*aUHs zGpKwh@w2I%xEr6IA6CDK^RH>|U+IU^KGBMsiR-nC^UHMFkVP{H@f##&U9l+$;O_;7 zNXJloK0Y@3!6s073MWF{ofegRn@uaUfJ# zGm^le!jZN7+(^9ffJ7;gnEhtQ^PZ0SN%S|pJF~UNbN96ivr~DiQWtN{y(ULoym^?h zFod-tGh=eJZU?2yADHw0B0gpEshc9OZ=io}r&I1f;CHbv%Try_d&2(!ZqGa*ww$yX ztHR1d^2G04Ov{u-WML9U!IyNa<>vnYQ57|JJue>yQY-H|M`8Z}4ZlfyCDUBhYQ?H+ zYRI*l?iREV1&N`JquM~3y=-amS2Q1+e>|3!t74;O;kg9{lB`S*r#8tXbi)}V!@{HjA@?df8RW%_&+?svW11J?NT=&EsjdG2 z2tBVWsI%DGR8vawM=gKnfC=7N!@Q~#kRSSLt-NiPGw2l?caKcw^H}YKDdOT*zUM}* z+6&v-GIj8^RMawkwu$Ut)v|>5j8y#pDBLC76rg z;kzKuBmH}!+OS6)-SPDIR5Qs=CK|4nrTXaxNsK?=!jBUYiqU3LLoP8Iz8|c({O>&t z4yLSM4Ds6N)bfpZe}<0Q*7XS`tZd$@O=)OdvcW3)Pne=;oQXFyjCyc4+}(F(aSIb?Q?Gz`~#R2&o0{s6SY{-&9>2ORw7BOElfm`t~(R5vPq1v@hvX zjnoNNMfBTrtG7610rGi19{CMBSMrWV6jQ(hQavZ=eEq?%MP8n_t$j%D`n#jmZCa-l zhof~>ob7>B!3b0V!Bl4p%Q0Zs_Iy7CD_$k3iTvWiuBUwMPvK9#HO*x;eJen1Qv8yM zZObGXLlk3qS1c6*g(Msi%YM9uJ$eJde)ll9mB`fe-jVzz{WiCxxcZPZO-5?+y>X}M zZj%aQ(hup_l_vn5j*Mf8m<{T_Lbp}wZMa(1ss1`^Z~zoY@O(Ik)M9_qcb+3loSxl=&x8;LL) zD%2gly3Ld6lvo-_oXSC0K&rb~hTP=tIHAprS5U4SJx@7zSMbI3KkELSM#iP3*3q?D zZOkEl&evqC2D|7=8WB8khlX5F1Y06x*d-t#CH9ev%SKQKaK0u7eSe9?JG=NV`b}2m zo2~a(bN4QtuIe%mQVn}W(=}~RRGlCZpH4NcT6=8JdYUrC|w-So+ ze_ky5cs`AB8QSR13-v+dWU3@wVDxTz=e>V~&#!%js%ZLI`XTkEwACwGSmn|CR()=f zl(+t(v~Nnqp$t<3rNMTw47mWtGu3hC%6i}R5V03l1_nr`?L9MC z(zV@S!L3C-GCs;C?ECO(v}9Hr%S9zI>OlD!tQI)nExTmpMsv57lsD}&lL#h@OMhqm z1)shVDfY+HAFDr5Brr;qVbeP+Q&=?;L6S;R+KM1r4D6OW1-)5z?P8Iyra|-a`V|Xe zRncMn=Wft{gWs)_YI~#X9`v1E1k#S+)8NwH=?lc<*s`r0`w}H)RcPEG4vd5C*dHam z&S0Eq8QG|R2!B@lqB-N#ew_V0qgv^@)yAdVmu|dmX%tTOgiu#W-I^fEz!I#hv?_?g z!Sd$5A&eOEC-a@}PyP(Prpxw+UHYc_YGMtM>CnwmyQ0J(0pwYd!yFB)W0E3ryn8?x zP7->R#pEKl5R=-Sh5S7BPttmJsYR^zCuZq7yve4$TCWUCE#;5YG0APW&nuEu{VAs) zAOQ~KWjXl;J8$Rsm!>|q_ub2;sq|-1p{-~=!Ajz20p~M%&8OFTHts~FVp~x}=_3Qb z+G7B=)%OD2ua$vXtNX^qwYQC%Hi5vheoJOet|?J zfDSN0@(E+F4h3LUPjY9K*&pI|Zrbe&w=8K|rlF#1RQ)iK#UkrTD=1I_Ai%zytBu8n zwfVqZqYJyk@WUP}#IUm0VCnX^?waWSp5pF!mGj8XGRGsFcm(tD){l@wYDu$_yNs*W6)L^O zT5!tf9b|;Fjt)q`{UmY${{2%L0H^N}3HQ15w+wWn`>3qTGKR>kp4I2ufP54Fg@?fB zr#Taidy1IPaeK`bn4t|VrAKGo_b`-b!1J^w21nqNG?FOm z1BMZcJ>|q#(+-W1mcOk0XyK&*zS)pp8uDW<~bFWp!UfqF|h3*gtN5bN>Jz+p4l3 zQQg4Nkk3^%J4<3y8$MYhU=X+a+2@=e{{S6Lkm}+G08-6&?36;H6lDZ~oNgXD{C)uZ zV~-tJXw*W*c1+SZ-YHRp@UjezfiM#-kaOj{_~ZT^F9HuC21_F5jT4!f7>`+rcMSgB z!1?{oK7Vd{eml5?*pMXXpzgm#Se%jL{=EMHey07*&)gC0h?Xeco(neTh0v8}{{Z%L z@H6@T{ANvrp5^i1D>FWFcJh4*fd#f3Y`q8+bXu z!w+x+^&IEOxxlq&xof}MAvP+yg7Ly5EwB|V6mx<`M?Nvf`F`bBRTP3YOqnl6BZ{=9 zNMz;q#s)$E00_fla67Y+6FwmXycH=(IloKfQ8=YbbS7vc_q0xC&o@sj=2<> z(qu=I`k$v|x}32LNQ`_yeZ^1GrIFce{hhsV&*wsk0(#G31y?Xx%VB zNd-s)1Yl&bz&OAJz`DJIC=@|f+v&?U+oq>p`+!jvMUrb6@P(gf+!S&E=YzKZ^n~$= zjWkR*`9xV5lbqw;OT+=Q;cd=@ODJ6r~aUgOe zb|hp1J-8=1RUBX(WAo5#KyKo*A9FD4bY)oU%RZlCWjSF^R>BRW0kw`$e}|us&NI{T z$JmD;AW<+$wP;pK!79q~$^@HiFRF+#0}-9OGvgy32_v3Wsd^CwK5wIMF$sK(kfrAi0=edi_@*KSZJSAJ` zMup#f&lhj^U5ejLd*Dqf#Zy&*rmv|qZdyp1NE`(!0FWBe8H8XlR#L$-d{4+J`%E~n z^61W=qJ0*x=} z=v~pPcfwC=N2l6y-K(dxS8M5&PoW5mIgOeILP9tj^X%@Dg|wyIn)yW! zNhC6=^RyosJJSV@ExDU6Yu%aID}N4uO{lCA*7`~G`n&qAv`LzMyFm1(@C$kwy7a_$B^eC0;`SeSTF}>?V*{DKP zGAc8+T1Qzk2w~~3xxr$Y#&_qazl=}r5kvY0bBq4~!{^fNPe-ApYTc!IXh&AY`qbyM zRGTNCt|T$}iL`e2vLnl%eOtKIo=2lCtY zE6t~~$F%HNWGbqFuF)FD>5^Gna{}2%gM?a-a?9jpQ+F%f%ltFc?)#m)sA^r!-};uP zN=R&Jvo-M%)6*n8!POPFl434gnIB;&2e@<_;Xr^6$c}vWQfh!diD19<=f5t}*R=Df zkGzr*7N2S;#5XH%L6@FGw90asb{kxc(U1mnMe*{eGG{(M=N-B9$Ls#7dhP!0??0vA zPrScFvFr= z4NF(_R`?{R z`zQA_u+h}D9ZyndH3#~ESG}f3cqtatTCC1;rDYM9g%(8$SC%*=ZBj@qzC){VbUeQM zulb3S#ZoL0U((_5>AmlBP}8)EorA8_sVuUH<<&cwW}){5m_-}NB?oL-K*_K$7~H(s zpE3GsC>Vgz-|~v!?C$V+;ugWRyxmr~ZI?A;?+l{P(1Ff&$#K~cWE(nhRf2ppej_hZJ~ z4LEV+}4jgg~Qn z9G(VFP65g5(8b`YYzse{lFj9ltN#Ed8&9B(>(o}Ta9dO7pJKp_QbJn+qh22nn@7^*JEp;FFC0Khv!P zDLOF#7pW9tqq=TE4)9bBxGnfN#~+N7pWCUb++&Ts^&*Nnl13@CU}G#v2z=z6%$j`@9ZybYH2h9?uU8fD6 zc;xU$IUlhU`L&@de1*u3q^rIu6;a3q1isc)1mF|2k>i~5bBql02Ytlo!Ir^k9hin)G#Ei6h$Qe0Q-S(PXUi0 z;~C@6Q*3;n2P~cT&rg)dMUN>AG@PichquV8s(d63l%-!GO;1X(aLqIL-!t zeyUM@LRf*^TWeDE;;mrQf&?;eAr!{DP5~ecl0xGg9Qjwa(^rx`ttEjX`b?x(nnM{;9wJS|s0XaC`et9K9nI|Qor%q^ z{Z9Hff4%#My;JyI>@7FEZ*7X+rKHf`_g3W*w91B5K!~mBZVQN=)wY5NW=|g1Ird>*AmP-t(R6Me-95x-sK?EcE zdzn;q*!&QyL_wFyHDglqS51nUrBZ&=mgrgk0NT-S=++IL{{X4bvW?l>wSgqyj!1OF zT?rJ)dnQ?;($+M1wEBV)P$6M5D#8i&?HJnZNdpazK68$=M9H(bbb53`Q5h^JEU4&! z@8L(caO7a%=llG0y6P~XKAp`W(<&IIQq?3@b}=)^T}cPQPb7eI`}rRjae;dxOk^b7 zQ>cLuSEbtv0oC(&$b zm21NZJxzkZ~R*1DE`LrSlzMRu;O zRzI`j+VPiX0D`3MU^f1r^UwKcIK#*;Ql|PcSiaW2AZyddak(iZ&46%D;g&mxIOOmT z_UXno8Ygp***0}HrjSO}WQIRGLjf@*7Y8|DGR?@~dEjyJ-y)?rlhi}0cUF^1Tae9b zR$8?-8+MX}(#K7>W@nXAlrRMgC?xG6xEUQ`&*Y*v6D^O$X4rvC+;z9_g$*XbBY-kj49XaAGm?K%&PmB9^r)vsI^}6z zz1%&!qHDT-liXd`rPkFo26%MsDqXR>r<0VxE$$$DnSf?E1FYGyV@9Sslo;}476ydp zA5}iH_V;7j?QIP^Zs&9By3Oz9mFmGbg(IIHvu>5tMnH^7BQ|!bqjTg4=g-9bD{{EJ zdmNn3nNLuT#FZp%FIbOKe{ofTu8(-Bd49N|V3pIJsnNXEzgJH1eF`cc2Voo{gS#sqU#br|pAKYUN6ZxHpt0uqO zM@ZH{Smd!zytXT=hU6=dvIJlk4CsWAPB!56KOOR4`-?^lh@evSEUBc&J!u5h(d&{6 z#(*-g-RB3-Ax_i&ryXoKffimj7DhONgyIgnQ{QEKvTa4Km=fgU(?}0 zq2bN64!=8J*yVfj4|4!m@#rWW01!mm;qIE9!P`2VdT-Q*`g+x(U$)eBDt8PJB!>}1YwcgSI3$RpkhG1o0fo<6 zvK8ZOc#ph}ByM&9Bl(F!?l0jNwf>=fO8Q6jtLq|9-9J)$Qnb)gy;W_|t6?QoXZ6*} zJc^7Rp^9AkMrpTaAyg;B#$^W~<@So+OQovlAI$Y0o7;K@i*7p_bqzM1k{AY_@*r?Q zVildxzA=I@NF4A1>(pF0@r??Z+_?jC{a&Q-Ls=qqikr6ZtDVviF@`ufIL-(B!0Tc# z63G7YMG?d7;11gb#0)UJMAD%b_et5<@+6bXDvKyPn1hPd0VmaxRDzSpXLd1;j zZV6ri28#5*elmUO9b8 z(TcL;8yF0Kh>^z`1K|AU$5p?ih@1$6{mfHEN-w^GGN8ExgppK?{{T=QXyZP4s`Gu#t>nJdUyg>*#`H%Sgh*qOScC2)5a)JyCqF+U{#^7~u0!OTjN^%nk_ya)+%8%|IT-TX z`Tqci!RZJmsK5mqGd_o25GwYG+iy} z)JpAMI1c3!GP?$lh4QL%f;{6O4l(i2KnJ+N#~U*%E$AjD)w)(R)f?R&!+MjJAck^; zDI9^Zl;HEb&T-?%^!IC({!20$jL$jf4l()l0I@rPkf?eOCQQhS}3E6d23`b zm(`So*j$$LmQux!Kda7gcALXE%>-H9X`g;w-TtVBD(VZhFJ z7Q^%I;A77m^uU55fKA*O-K>tWsDZ5S=L|NIeB&j&6hGTObJ7N0s&c6@4zsRBMgeSS zdfu?o;gTA0hBGNRZ`43h$Bg^FG1MQ)ui7qZOgy_aC7u_(0Q*#yPt*ZqY_kAB!jB#o zjPs2A^u^C)NfcmcPFvmd^otdzt*B{z(Q*Rk?hz$3LzpV7CbcMVXGN?BWL)1Syi{RA?VKA(Y}zE{fl zQOj*a8S5vSs=Qc(Jnh|`>#kPPbq?S9q4W>xFKpFG*;>RpTWSz87RivUF((Yk9z!V% zRZynI1Fsh^h2^Cb*&ms)6UKjFPG8%-!~8vsSnD@Fr1tiYG?$}yqp7>no=_e(FgnY! zMadW#JDGxRVq!!$`TkH}sZ7h{d_Nm$NHf%3r61DzTIDP04x9G#EmzaEDxl52Akuni z8!J9QM)xv+5Xx0{`f;+4*SQ=R5j_6@`dKa9;NM)kyXpR!uf?X&?H=@dH#CbHwu!6N zrq?Y005FC^HZ)rgq+Q3S>afWyP6l^%EDxDp)wMkwKMF&#b|s3x)6VJCy9ZI!b+4*A zb-ixPwDlNQQL5EjNr9Dzpi3MX18y#iHnzj<05ES~{;bPQ?0=b`^iFd9AXN6p@SF83 zy}N6=w6CMx)7)A=cIbMA18Yr|wX$Bk$XEtQnN&p_ikr3q(dT<621j2<;rv`@9qwDp z@Pel=bM$F0RHZkgHH)t!#>`YzmTlewa;Q_17-SLu08Y2QVl)Kt6>conJGV&FB$}fG z9o4R9^rr?0eHGmqec+XDLC65e6lZD2SaW%Gs@&C!9scg8n04RcdtBFhXSX#UrX81K zzuNVvp@KLxD{Tzw(@6)dN^QQKw2D}U^l71Ri0)e+!-V-cn@T2y@lYFMLSKHLcc0Y_ zT&$hv+a1%}OZ`b9uWF*J+uf3|x1{Ds4OfDJ0Q2zj- zSe?uPVsY1xoGZ8_bv;ZDQ}(obuXpd$pQbuyizjpc05j3(Ig$Y~?~=BkDnio~w>+3? z`%yvNF5EE*<+k;4A3NhD z6(O^O)o}T0+p3Z{sPR*CT}I_WroEcgEl*tHJcu(WJCGa@Sb)Cf-JSr>IR~c6lsMMo zrZT3UtHF8o0U=ry>d^wij`zL_O(VT!J8pV?P-0a23|*AdGXTu zFLP+iV!cFErtce9b>HPbQ8c}0Og+&?i3TShT1mxCp~I87%&xLXP^|Z zGQ)B$QjFCYYFa*zrTV*P7Awg-?Om0Ev?`cIZR~?B?Ftx^k_T4Fk$@m-COU0_WBg82 z-hJ=fG3f6;lcj3C*`?F3E}f>y4&WC0_`lW;;o4c6E>Ey8a7|0eU(F0O?OE8QQMjYkAJ5q?Pw;uIdYN{6_sQ$1B`(qG6G0t zJoRjzIAO^~q()K)8!;DkUiyVjT^CE!bp0Pp)9MK(l6x>4KWCA!3auFR5==Y47$H+= zZ*qYzCllBS)UG!{vpLt(-4E+$Z+Df=PJLe8&c>d!_dd;@TN9UOOw*DT3mtWYWEqOL z=uc~Ug_qC4;^s%SYaeO1Hhg&!xY3B#@AQ-3kZKBd(y|fzOay{pk8T*GYNA^)*k=Yn=-!#m;4;BmYMc}9;N#9Z zm|}x{%KWp&#DdGjzjI=76n1buSr#v7f9yEG$YM@EQP0O*7+uP;5-j%?$Wp9^e^?lm zckP9Dy01G|2uWt=0KPaU^VIV_`;+2E;jl!-tq)wXCg$}3Vh5eW(m32aHb;+dCmnK= zUCt~4pd=DOU)4pirYv>|DDLPpD_|)Z9ORM*xO{P*Iu6Wxop-yRHF$p7YO5xj3Iz1# zgbnc^AQCZwpVFWZGyRTFNNkD{%ac&ZW0Ai@%N9STA~BgaV}Qf~?&JVh@;UkGf*1b5GDvS(yEg?Saa6sHL zSoP3_9heN1W{y)lK@!Osl!5PzuLpZ$1+sh`0fXbB7>Ng8a}6D;?OCJLBY>(g84T>o zd=1#a7#@5P)_`7xnIaF`e5OzVI^}3&AQudQ$!v0S@%_3#s3TVi#=mHK)~8Wn-+qRb z8nD{T!ZjHf3cGSVm0T#!c=_wHQptR~76jfXDawMcsw6-;%)|Ug1y2jkS$H@;2OV}b zzU9`v&18a|O^`ztmX-<^i)O;?o46ZUhVDUMBa+zjfy`72ft8PDXe!iq`<1V{SxHbm z6O`Zn5(!|S@(u~fQ~Q!|)e^FfKM#_dI zxH#k=_0PZ-WmeL}%FGQ3t*9#)!MJA(hXaKJBpie1=d9TjAEZqf z+*aT^d_trViCXe%4dzK6Ngg6e01UAM0Qk-@aC&4)8iyucT87INk3j`{AVKO4%@{GX zZ^;J(BjjLy*gqW>M(QFx9rqPj)Ak7(6stQlW;U6CZ(-vs2Gj6-{{Yv6@@PQcdx6QL z#UmNfYgv()K152dpW7Zl2anjEGs#bvTc~Ily60i(7Y^yG*NI8k@3a!_w;PBD4U$=R zAD^F&r}9Dl(bUxE`sdJY$k)}W>iw~x>I~S0no27sP6+hQN9EY_#&gd-BY22cP_|FB z;GM<)0EhSR<*3_KxAd#EH2LC{h#GVgR`l#i7>LLXc2yEG`w3iRW1Nh2gm_r&nPl=^ zMx`;o@bLOCq~5uxX%@TtvuBkek%d0tw+IXu(;HxU7!$J|&;h_Flatn5p9a9+xJ?h| zW~=35Z$I!G+YKTmuc$v<`=+AAYqcxg?WqdjFEsBzQ5n^3W<+O=jW z%Ew_$oBc;sAS8tY2^4Pf*~aao+75Y?M^cHz}KOI^|{FWVVowX0r*5(#%O^z6i}&XJv{9k#nE zd;mxTpq^lFbvq}BLu^?5N|ui|boUjTTJEn~=*QGbY-O=h-)*T|dTg>VN39p!B8f;c zHYRO^sa9p&dWsU}i3U@MU#DVr?%30{wGr6veFka{5;j(gT9VVLjP1>dBLX~N0>ohp zla=s#)#G^O4j>(a@PR;D1I zRT!qfb~Hu``lKgIw80B)Z(S+_#y6@DJBZI1&N6z;zCjlm?lQR_JWE|e03b2I(ytHnTJ~bbDgc8+0tOKH)x*t?;5ej^)HX+ zYbr!h9I#|EBS$$3n^>t*=Wn<+I?u%szKNi-9kkwu)1Ll|Rfo8HSL)2#eufqyHjl1p zW@zE}Z*nAs(Ur)Mp)wf)lm%5zPgnURwuX1iuiPMg$@Gu1dy4cO&D?$0sc6^5wYrw{ zOY=cqDHuXs*lv{S(fGqLEs|hQ-;ZqxMs?1PgUok=B{7@c5@|EQJ8{ARewIuz;r$ZR9 zS_f)xCZLfg=^;Y}Y>mNW46HrO6cQJ$3xUbAp%>={>U6y)>7J{j&r-$jsJh0ndaOn| z&w7=INqHNjWqGq1!noWp2tPSv(`Nfc%3&Y9cOX_eBoonn(i61tPVt22)+ z>qY{rjz&OSg8o4u>>v{r1g7dY_3qULEOz&G>aa~PsizLTK{w$XVBhE46Sx z#51QZa5Ru6JN?k@Ke>Cexi0E@pK4gUq-jw%+9O>HKAMrHN?Wd0RB0lVH}xBN07goJ z`C^|v*y~XIr2AY|de?Xjc6yb*mg>5S&n# zu2plFfUzvg+L}Fm9<3>K<&RrvgwIk-Kt9apD*>FOY*=A=A$JDwa0tYtU%3Vh4)rKq z!`uDxadWmi+g|MLDzI3VKT0OkqDOGjyx!8YOhkJOtm>c_;~w6#=J5$1K_aE`c}f(m z1352sFL+(n^^VEbI}5nW`ZIZq-vNRQClnlzyt%&3&&l^H+{K_f68mEZEgL#u zpBec%^OOC)aYf84jG4!0@gu1)r6jV%CYDTqkt6Tgtj8r(u`82OD(gy70ZotUT$s~3pm;h=^GyVK&Cd7m|0iuiA%aQuA zkV(gnAEf+@b=fj8Hxqywxv!?GrP|w15!hi++!gp%!~UFnfzLk!u3Eb@xLkkK z>|tZvkV6b!n=~dW%-*aiE3$!rM&bx9f`7x#dHE-$#Z^o<3uM-}3)AAWZJi-DlE9d# zGeYl-hX-S!!lp3Vu0Yh9za5E6K9a~yujXL#2J6-*J{kaTA2Hcb*!N((k z=d8))Sv-1|`DK4}ily3><3+jLnhkl?{VWt%h$XmZ=vaa5u6j zRtO?sH*n(1h2-qW@-rC24aYpCu1(5~9YbS*Z9MFn-Nm+`tc>K1LuEx5xn5pC+*;o25;6xBEL#meJoec%y^XbtQ>mgC-f5 zV;llKoaceL4u7Ns8;-a|*)ha=;gScJLGAjJ)i!s#+px9Txz6Va9JzPF85=nF5OIuk zbn-nO;^f4m-lMv863?YG%YRX*tjjbebzxYM=lWq7h0206i~-L(y40*pi6hiwTSl&J z%WWya0Is1G>w}a5M=i;2LJ&v!XOeoT#yc_&fWBjcQk=5)kJlXp^a8>uEBjd z3t)gugfsZbAY;s*8nN8va-%}y7!5R_w5i&bc?u~=ug4_8m2eA?+f=ULzT?3-J_b5q zplC<3M{vTnC|0as^by@M;Hgkf8{ZPdG8ZQ#k^lvW#!p;LXhXpE(hK@W-vqT{4KCHz zje(R_K7uEwf>u}cFqkBB`2*)8K1IElK?Ld|w-pNMUsb2twY!LI{)^HPEXEUog^;@c z08bds3i}l9<0GyJI|AbXL859Ye$(uYLs9#?y_ecraIBuB7S~LxGLriVMLwFyHxg9F z0a-Sk*%%!Ija&F*DX(zaJ-yr1mrkkM{jp<6)cx{Sv0q4zlu;>ckiMF#AaB*T11@>N zAf70yDMU_e+c3NH>zZ9CuE3qcYO0ovt6a3!3Mp=VkcJY-O63&f0stfdg1JK~+REd| zys3>!u9@B4!uoxS8n0*f%><5Dj%C!RsIo~J$uhE+R7liuhin`iezBa`GO~(q@@BWj zgxEjgD(>&9nzwC^+=ooP#HK$prn-8?QK%_~vquaEweZib6?req4oMgs7 z+ntYtZRezP3hUgO8PtBMcKb)%G<_>kU8zcHma4}NM%J>Ue7IsU8v-4fhQt$TS8QfBLLE-#$rrPEOOyMZX9Ad-4U!dZ9R z;^210tZ^<)Oc742MwyqlkVf+EXJ!)2NSL!1+xd~)y+N{iFyQjaYByOae zE~SyO3aLf+U;r700n1={9aWIA)I=kwzN|Wpy+*~&Pq#Ea-33KoTD9b{B+3BVn@T8A zyF3i!@E7!vG%i}$f0@sbK>B-$b$-q4Zs^o2-0mF@w>5iD535>B8f;tRj|!oI+DiMM z^p-r4k=H&D$Lbdk%xmN!OWImqnN2BZ`Zk|J0)FB#6mUl(9sCm6ba!1XVtoylgsT`yPe-Ac}li6w10wN|bSyFg4>LLDMqs&<^P{-Mb%M@9TL zQN(I+@=f3VBT8C#cUG`#p4#lr-q0eNB{FE!u#(dHBqEr00ICY0+}Xex9OU#yCsq2Y zILryL{$bB=yzN5udw03Dd1k<{Yie}D%L+#r0A!7<#E?cEhUVq7$l=esh|ZqGqjvH7 zgJV~xrRx2p&!)XoY3Epyyan)1%L=kHs5`(fwZR?_Bd!aQ_9KN#vSVAbeN%&7xRNG; zrgpaGioBKQCd>hVRWOqnh#>iaj&_r^pBY(s9zIoi1XNLx4uG>0>%F`60)F4uleN2# zPknlM6|397HM-TIg@SEF-j3=9(8{ZrfyiLHv(h#vHUZRP=nRuV?SAa-`Mk)T;{WjkqECKXT;9!m3=3bS2e1mr>^HYfgPMJqtudP|Pr;T?`e&g6A zohfBwSimD8iQY!yd=}>bcX5-RWsl2d>`;&LY*Ox6 zZEG*3{k(8Ne%^@*c1w6DYJh;Kc#7q>fO6Rk9|}-|yR9pofY7lbai!VOnbUi&2)AkR zz{{(b9+sZ>l+WqlFRitC3RoU6Kvm=exDFmj>Y_doc4pJ}HI=B+LL{p!EYU&tR9r2$ z4}u|>Iz_Y+ppxJm;A9SiXAln>=Rc$MM&)_?rpIuY^^2=vhMh-E(dChn6h%8N%49M9 z#_>2Y$C$>{T(R6&EBP*`m-v_cB9W=pwCz%la6~TDp)7FEA!afrT4FfbK-$59YZ zGsjzTW%+{UEQBecCRCv==e=?TwYcx~U8Zx-1)mF(jCcTbUzB3}=#E-CmXoJbYVzB< zg$l<6!Ig@*z?*)br1ZS#bR^!QgkP4{(09Q~dJNeq@w6qaU7D&eZ zNzBSJf2emR5k}B)Zy;=>}ndND^zOJkzi<&-N@#zP=U8F zh(_Sdd1cR>;OCR1Bl6^05ctCJ0!?#EeO4BIQg|0@i2{lZrlno!U) zYQ3qW_WqSM8MR*OeHn)1BG`AGh$H%I>6IW6k7ytfpWJoLiEyO>7w7VFZab+KjL9yk zrqtE`^3iEor?)hXB$*gO&xUL$QNrYM2pGwt(Hn?eiK1j$k7>(p^}R{EtGcw*HOZQq zP9lb=EDi!XZdURPiou8nCm8EH_=?A7RdSUbMKix?Xw6eW)a~jTzM=q{2<}R<#cKp$ z`to~AGZI4#vG-)39W5x(7ap0nb5zu*)72%>)9K}#nw-kuOCFM0H+eJ-QU}1Rku#}b$5)N#R({DY8)*oF}n*q5c`qP1{q?k2hP$um+Hr08C+f~ zR;n$p-rrEK+SKNkvlmY7Mvgcs-L&yZMtMpSS;^ZYL|7CIRB}$%JOjf%QrGG*Tp@V3 za{9ORyD&%-Nz?RgI3s)9WiqD<+%_c6TvCL#Sc66X!S| zqwWaeUOO!56FcwrbmsU}8#hG%VN6*j5xIT_iE_#pnSxiN(rj`{CKE48PTvuYdZEk_1N_2$&qX5v(Oao1o(O^v`e zk2&dBAALY<>~$MywAP(V6^*GbQbrN7Lo!Y)D8NV~julXds;YejQb<}b*!S^zbJp6P@iO9a@1*(#wKp#e`#53Axtsm95 zmIo!USQ0p>`szbIRzFP$17}`)c#Mlg>RIy!Isi>IZsG2IW)@D- zy-FVFr3+cHqW4|N*{a!+m}IRTpY}~ZyAnkyWmRN8&uA(O5GdFDN-8eN7asPF! zT^{te`vPqs>5x6hbbU#tlEb`WUsR;Am5jvCH}zRY{{U!>NyBu?nS)t21etWMG(n5t z`k~yN)!mgX{V}Pd_g8Blxrb1-VLsKg85LGor;;cBtg+0Yl>&kTsZtXe>Wk)*#0}1D zXrukfuW)ER>Ggj|(64LVs}`H7QkvZ681Kb?t5)3}(6U6u45|Q;3FTQ>agch@eq;KR zS41`p2e!LI>Smp4JuZb_;?=809!yFj`(*?ajYNK;wnzotzDpdbz&#a5Q}~l-Pkm0; zqAzjjmIaMtSnZ8C+b9|dV^J+o8I@ys@${GO4y6+Ir`&OBV8Qh!y}Huu5J9JC zWu{i%Nm+*@VyXDs`w@}^GG!#tGRcfq#NSBzm)f!F+TNdQNBYMk)TY?;e=C~JI3|;F zfF}!vSKF1Cj1*#c2*gVtpGS)W{O2DGGVZfKQvMnqyCTyoFm4 zm_tT=Oh)g4yt4QhJZb$%BE-%y5&Wf^rl4=pdE=JDB0g;Tl-{Gy7Vcv)wjE0l3HTtK zXE+*hu{}xT3O6IID7mQ5r%gjwlIlc~&2Uu)w5nK<5=)QOl_ZUzjClAVauVEPwmU%A zCAAVI*%H0Uq!NExBMj~u5`1LFR29nS&JVlJc9P?Jilp(HSTQCyJ za=8GMQg<@0L0kqTbAg_VxUI!PN!(vM#IvQ#6kO7dHj;IO#UtgvP7z!X0mgYbDli9= zB_dORiuJm7H45gfJ22MN9U5m_bny^!Ny`}7OJ$Uu;g}7CV1fwX%03wWpxL~P9Q4uAm>qRGW{6;xeJj?EImF2<7nM)4t8Bnw$}3;gnYTcD?wThWYEzhV@HYYQ3)@ZeA78D2BJVsGGe&EVZ!c?R0rmckQ&PdCfs zaZpa`OA~Sga;Z<7>gx8|cK)87ryiNwvIJVwE%=t?(vnL+)k`>x?F@%IapXG>cTrz9 zx>^yxOe^pI028a}z3JM$=LLDBkGQnirNm!t888_!B!)80S(GvrP3jpy*^+z?wXRih z#OTb~120nD4wkB^9hA9n#!X{95h!UCcm)}D6i`a3UwB{f4zv#+=^*Y&a{8Q?yL&sd zT0yPrdXIYOZ%zYR7(KR92%9k^tsE)(2yB5M0Zx1!*~^#5P_|`;R$)szhL^0-zeIm3 zyI#`NRxyXO`ju2NtiGlWc4VAx;173I@w|XX8JV%s+&1R7uU7kLtE`I`q!?_5X%ys> zl|H8fpB!WH*0Nejn;0;fn%0puJHnT7X;-0adUc?gQA-u!3k)5{1-T>O3=VpmdFV$G z7xe6XT&QaQ08zD#Gf|IC8a9ul_WW@-+*p>t#=_?kU;?TMEODNGN%PiRo^!F|>TAd1 z3W3ev`ZlN|9ks3KX4KWAwNAV!zt@bh1p1PajH7;b6&?mYI!s<)%x)@WPlixjZ>Q`1 z!)2wh7N?|MqCM{o>7km;7%RJTR|P9%o_h5TY4_WfD8zxn;gY^*%pIh!rQF^*$dXH`?_ssUIBc!T0Vwq#cy%}OXmUasIB!GBk|MQZl$v2LZ9>>A>8R=WU!c?D%E;ZHBTV4R)1v!1f~4A}@k zYBn@cy12Ykn9!w8-1p4vNoy4)h9|bgSS!iMO{G9zOqoj&fHS~W!~mx5F=JLE8I75;P@E$>qby9rGX^8iK1b;ey2~nsvC4=hR&TW zWU!HkMV4nHB&>h~BV{l!BUL!bY;dT-m+Bggme;9Fuk_=#gqnm}C7Co9xaMm!>9c`u zOzkZag&R|F_OL29Wtb4bTc~64u+=Q4OwOQOZn{>N+fj`ghMV;hRMX^&H#Jt&(^XtP z2;N3_fVtX91zYk)ZQwIcZOUf^k(_a#oShV+JE0;UCH(>`3nVX2^wMlsXk`*QD=;Q9BRhb| zJYXm);A0&tW&#n8#^koGX>r!EHOSzvr$%>AJc``S|5ER(}J9d?A&5!^a*eZ|@ z1pJPWCh9Int<9`h{#O}>y(`wBS2F}hLV<}S@AX)2Z;TUx`54DVkgnrn$PaMZwur3j z1W$2*W(oG&WRd<04a6&DyyWM1IpeD%3E2=Hp(|dMsnV+HI$c?mNCcuFqB2y3C=qxE zB>Qo<1o506IRVfTL0aau{bBC=dO+jH#xeorJo}pml00-b@2OGtn?f}CtSB+n zC(>Brk=q+*-a*J90CMY}l1J~sQVIM-XCt^w1}TMSwRx;LVq=*=m8Eg*EsO!To>-0v z^YPP!u@to%Gg#xh8Z6f2(QH!6&bX%xiT zzN@I+Fv+$Xw1b-k6+LuwWqAQr1od5>bA@hYAnqwNJ2J%;igqYLHGLkYoW8%Q9D1U>CKpWQSyX_SA;1|bap!^7m14rgQluW|EABU5 z>a86sy}RFaOcKW~pDv5s&_K3ThFr8QCf96#^kr5nn9ehi)?BXS@nQX@q{Zfg~yi@D{(`= z%&TX;ve=%2^*g$f!*5pC>1fod(~2085oP{iDh2grN+@?2nX*K0At=mu3~)A#j8K+8 z+Gb&A=n`E@aFA*zw)I*XZ*oEC$z(&P!VJ)aIYK7{M(hZ1q>Qcx;7)pU9{t2b-o!;_ zht#0Z$uu4BtJbUo166`YJX?M7%#ma{rp8CG(G%(_u>{AG${6hCEbL z>FyiB;NY+Y!QgUdByuo(l^dy3hw9|Iec@B@bX`i(HOs6Op0oxw_V0-LjwBh}8C8_4 z1MT@SZ!9i>xm3a{dY9b;UahF;l`ZQKN!v+nnmFs-j6)@k7y!t=$Oj9+%CK$;$s)M& zk+%0DRC<=7M!guOlAIRx2qc-L-x7!189z`eRdI}w;}>#qIJljnvf3WISt>9MaqocO7AXS>gRJ{`UO{^RbYiFFW1&+`OCmjKVU6B;#{KgtzVQHPa za% z09!w-J~ucF3EXj<9OPgd6ZVRPbyJLePVAizy(iMGE&lP)ySq$|K}t$yS=<74Nxe~` zfx~cpHj^Vehso>B^8j(g&6wJaDU16~aV+R8YH?P9q?wdp#_=*a ziX5GxLBYoD#|6)wj~nCqc4KBm_qlh~`~Lv8J9^mDwQlzAoif(NlhLV5)QLg!hV+hk z-T409dkF7S&tyMplTV>q?#Uw3HC;EguIUF4d|a%ON?aVQu}K=Naj_B;vmV{ehu!7} z$1Hac<>C8GvS~fPQW_9xbgIx02-#L6zF$+2L$fllQ^q*jymQu+#bT;mvI@CN*L!bO zg2WbkIXi2%wFx5hzMx1qh8Q;4O>inShF9Z3}m6;tZ%iX zD#JSg$ic>P2U)zIAP(k*$H?gJRNGm3h?BQ;-71G}D~UgurI*v?cy@wgXxMuP>Q+*~ z`$i6OI)mmcyuAoc6}mey{CA-`h}0zRsk@4in`KtEXR@dqD}Wgy%VTd3kQ5&T2=`--or%3ks;S+@x4Ukxvej&B^1}s)W)nFU^Bc%P zCNQJ|RFa1tPBVZ!&o%{PW>HHn)aqo`&~!gWX{XY=YO_esD?I9E)Sb|k+z_eAWkNR( zJ5D+32#Sq!L2|-`y3;7aHLG2W9-~SFN)kBF0RI3>;Q99)4nG_Xz@Eg$;DbgGW{q~Y z+bj`C>?41+CSv7)-T*nz=>XuIc=OS}QhaN3btBfEN$0f?XDcLC;9vbX<2ecxmhf|& z=aa`w0QV{LELTwFsSIgd>sN|X5#8zqL+Pj^1&BT|^ZC!?JwE^i2;9wG$aM;n32U=h zO3vx%M#X|NGhvPuy^g>Qzz|MxgN%-%_zhT#^GP&1mAP+f5WCygELN*@&9am=LHZ>061vfo}q9>VVM%ntE1`jt2#SFU-+p3F}ZIw*xCzD$$27 zsb5W{M&0T<%?&5inUT^a*%GeYyrtRRHRmOW!OsKZrcA&YF@)E0TC>!*N-a{Cr+zyj z1WyZ+6;A9*GB*x3_|F484y|6qOo%iIn2o1-n6KQ=bb4jtn8h2$gC;d0ax!;20g`j` z!0JVd7{x%;mMM@*MR_V2*^U^%ulqH-G%3Oo!t^^sK6SyW}RXNSS6MO z(}TjEo6j4gC_JkaKI4;)Z~~A&B$q=ckk{2Cdlm$ip?yAMBJKevf6_)sP#KJ1fS>|P z0&|}rbk24qNdyUCvuzrPt>vE+b-hsYRQ?L6cL`^6&l7Rb7l;aVC$ z%(Q2&!8SY*DX_e;E}rd?lEmd$F&`i+0=C9?8rX-y1xnCF_O%z5oTR8vxWsCpVOB6b zySTt&PU3kuJr!Cv5zi&uB(-Qn^Q0QALPb?F#~KGzT;QSuw5|alIODE&PWN72OCiLFvV9jAsnURJ) z#dekCAiy5q+s{Lo22+jdGyAy$s=0xbEbD0g+p?No&}@eSm{vpaw1NmLTpTBrX_+3%G_SEDlCGmP#>^ z4a&^7X49s(U3T8}QN;DaCizs6wEBz?sqzYfqmn{pdd@Y}xNk;LCDJ?Y7_YX8sCM?6 z^E1dLxlGCS<%21dByv9aauo(dc@i-jKpjZq$ZV`-%!5t2A9HCs-QM26OzN%MI>c4c zB-XU;HY-y~&Z-^#5f`wjQUa@EEte#adW+#_(bTGDRcjr6r#tM3Yx$eb24w zkw)>?o47O;jnoGXB)-1oXFkt}%k7|s5Cya4ebE%`q&L5#urPA*u?uZi3mak&E zB+$pGHJUbIk>g}nu2v?HMI$RF;;V*PHlB0V%vp&v1fE3-JKTlzT@^KbIW6dMSDw6a zX1v<&YRu^xkVSaL*%%n38yw&foM$Atu3VT&A#dR%#}>P*wvsiwYhJH5o&l2D{{S*p zh{n7-2P`)P18D(C2MVJ;<3#cl^k*DLxGQ}_r>ac)o{1)ls`iqjrK=a-#5Q1c48#8b zN+V+Ygv)z&0DPW0M9drZi;DO-jz94Q(>taOLsW)_ttV;7mOr5i=~}B0(gYr_sa{b6 zn1jc)aIEY`3a2>-Fy(QbNLoGr0PncZN9`Wdtu<>}CvJAFsb!W?C7QG&c%vSs=`te& z2-^t?INjr(PM`yNnVEpkzTP8lVjCkJH&Rd0by<71^_2G!5%Y$3t|LR z2pIP{R-<;Nmnej5dim3Wzw>(Z^tj?ia)H)RT>k(`*9-a_aKs(EhCJ-92-MiCdNRik z)Lzq$GAHgEQ|b)zL|_{USp<9V!ca_V>O${qBDP5@w~Ar%g6PG|;~KFkm;V5WZp7`$ zV}raGT&H}!)R&ebFiKYk7Yz^%6z8C`(UsCCZKw>Gb)_a#p8x25VuC57C) zOoFai+AmGU*Btu*kw9+0T&ZPDuKZvX{UcqWs6S9Bh8H=Xdwn$RKH&O=^w+#K{^ss@ zwQU;FvrY{wPI&6-QyT6MuCC1-P$^}UZ7YY0F}rDT)P5n28iW4;iKhl^ikw$K&Wi_d z-O@c6yR*6UOU79h7Hc)7sXTGB`#2?;*zmY4M&dbAa4(A^FK0$eHZEHOa?Ku^m$f0O z!>ikhO06TBYp5@wAsE1&0<(KOgxm-$OJE+Z2w|xe4!=!79=&T(>C@=;=b=C*1$Yqx z*$pP_W?0Ka2rHZw$zz7+q$|kX)NsXn7YL_S?8`ZCxSj`vq${-pT4KO31w1H4T>Hx$ z;0`g-3+Tx;5pe5NwKlM5`ZH8SG0QAzI~boB11iT0k8As~a2E^-_fVlxxghOSv0{-* zNNCm5sm~177y|^r!|StKhHIR6-WnqhTAv581j$IKYh(79gX0 zSwE&Tv>n(a6~P}Kdauj_ay%@=dYm^bNnv$R)u}00m250?E=j;KIojyW`gWY|037mq zi~U{Am&AQB8g`Ft%#q1|w{+-aDmJXL$XG5v`#9%3Wd8tj)Xo+B$nyUH_Kj+55YG$~ z!>Q@|luV(T+>S(4(Xi1+D8cmdHV{rTxbSg~f_Z3laxI0fh?8p;XVaBrpmgU!8asOP zt#V{yRI1~4T(>z4Gq?lbbwlJJ79*Sns&X6s_1_78&3z+W?ATC7%INg0XHCKbyoN$2 zV}iLH4cW_W_hC75a^w!Cj9hA&XGN#3MG`)xskWabd1XTkkV!0Y*e=QdaWUGG9D+d{ zdq5fMX!3v;CU72Kh#s$~Y8tGv)q_Ir4KkHkoqpl%As6axWnirQ=HKD_qq+LBjot6?O}5df$+ev#@T;m&dA zE^=P7Ec6=~;tICsXVUeq+iJR+Y5FX>=h_JB%~lf9$)>(Y21zBr3&z~8ap#VD*^|mz zr4u9IH)QHD(xY8NPyYb#74xMut|+L4UT`pz6l46>cG_?=kX7M)vI#GsXS)8zT`D#ZO4J_ z_hW!gaBy?U2cl22C5pk3Qfk)SZqTHbAPjV6*=|L4 zS~E=*SZCJg$j(zW+7=+P4xq{ zD^C@FxQfg#W=eQS-b)^;1H?%ikGoK(}Wbh*XU?Y$b@!>0|T6c)GQqa+~=8oX> z{Ya#dqJ1@i-aT205af;AjQHo480GQ{e$f{%i~;(LY}&Ne#jP^VpkTKk0po`$1~&LC z51vQ2JC1YbfcY6n;@;;Bd(fUwyV>7esP~D#r&{dmy`>Bfe!LoSDXT|nK;$h@6X3uze>dbZsu&BMKxnOhS2(B-ifap*M6hwwRBYW>H1YFk=3hNjtO9>#E=*< z1)Butc6VSRcrV*DC&~nMI=yWo_uD0SpwnKIaRQ9!suh7b_8bIUDI19)i9a2B7|T^m z*&RmafAkGX3lnN_jhHFwOr~6@4CN3IaCZ(9cldbeikK0J>NFw=C9y8bpMNket?E2m zfno^3!(ea*21($M0L>IosK0Rqxn6pbNiBMn7PBiEAoUm5wofRideoy&C!jdf;O(y}(8Bv5Rt zVrW-nWDpri&RAhvEw>~NeDTYE<1ZbPH;eA-{lW>Lc*<`Pfh@9CJ9$rN3Ksx@;DP=V z=dNM-oyb?>M^ss5dlr~_g#ng1BOyq^Bh+3%0B##bPqY)0)8j&78X|;i4L?dq=9z4~ zvTl+Oxs%o?(#F~koJwD8JmLKKUjFP#F zVQBZMUL`EYaRGdOazMsLbB8BJXbd1)lrtlWnnmiA^E7c4c+BNbAhQVrsP^tT$T%z) z8;(`Qnh+895cyshR)M(@`?!?I+ld&%a)CFNkCHhV{{SFJ>q0P5Mkz`*B%*r|MIw2!Q+MS<%-S{fgvu;d@EkhiJNYTVG$@bP7G!jEBd1T0N zfFX8f8TS$HAdL0PhT6>L$y$SH_p~hsTpg<(kKS5!mx5LM*|gWNmLiEkm8P;?-B<&Eg)Q(ci zcidNJPVFAXt*IKFnGAKNj46F9Ns33aw1yx0VYk;%ef)ufc_i{>krj7RxE#KLmRee* z_H^hp{VmPw(B&mPVw zjeD~pw?3mHvsg*mLtT9*WIm!26vgVR?HKp$U=9v?k2K2NZZHgk?<|_Oi#DLO=<1SH z)nSL<1iEWW4a&~`)+kVhQsjmq!vYGBtVTtKA_oFsIigP4?W%fP*wg7%?wW==$igel zD%3zrpSVX{$n#^)WEeR1k8TDe3#hM{pWV54xV5y6J?vh()#x$=wPHF4bmuA&5m65y zum_JDfgVRyYEgFrwVH~m>a29i)>2JJy0rBoDWr~?>CIR->UTh%MsgQ!Km!L4(k^=( zmNTHbJ;9^b_5D(mmo%=Bm=5NX+s?P7MC3EEFBXDJ*#ee1Z>1zDqFbP1P6j zU{ZHia?+Sm*LzUum6B0jJ8FGcOktFrny&u#Ox`@G83YY%ene`gF zM!O_Q_g5l&F&BZVB1e)(^yI{W2zD4?0VH5A0O-a`3o;HzN?{t5o{bAq){{=JrWGwK zq;i(48iL|PBy-1vC>Ug89SoK>v$<-Z?;eq;H{XBeDg`A0(7ov)1#!mI-nbv-=RE}S z5Qx7my@Nc9r2QAE+1gc&DgL1SOru8Bf;OjAJ^7KJD}gEv#X0ve7)3Y(9eF?Wz8jmz zocTV3U(C+Wqv#r?JwEMP`ZsXsQ2ziUD`pkVVM(|0^qSL;AE5es$^004Zs!Kx`=IP z9oMNv6_H!DJ6}Q|SZLzYA-VUtW)f|7Bs6W7AxPbpKJ4QTJO(F`I*PtRCV;s9m)=tp z^xY@7`)Z{HT(}Q4Nn>EmxWNhsLaMt?3CJe{IXz!0A5bf)*^jH~Ei`6-w7gM&tzxW& z7(jXDT!A4ZV}NpZW9Orb#1vYj-zd8EE3ne^H~i2R-8qXqy1 z7&3v3{;isOl-WyX~1_68$IVbQ*_!&KS$|Iu!k_^+{ZT)#LZP>scK4hCyLpjRckqOVx)7Uy34~7%v9lk$ijoqo^!=WraEeuJc{Hb8BC&DZy$C`e$;0^x@wsU0I^6*MBW zjc@7JY}7$4m!d{y2xXO}ie1|S0H9_(tKgjDX*kH;(8k{4u_n~U7c6L6)Ry#`aa7bM zuOL;86tci}t135wMlg1%Bgj4q^>W^z7^|qpoLX#9C0M_gzoZ*x0>;F67*!eCGRK}U ze>mulBl*d*8>uJq`EuK3NMl8l3eftBq_ARk4{|W)k&J@F$AJh|3#cUbw5T9~qIoMv zV=FL$GnQ8FmzPs2vBR1d~*O-_p~XHMWYO?3Yy$J_#{~BXD7m zatF^D3VKuI?u@_5u|&qvn|9G-r(*q(CL(>&l3l<+*j%V1!RP+~amGpN%aGJoQSKi* z$6Pm`r zE4HfL(V}+=zM?XxfFgq#IKkY1Qx+qhFc39B;`b-A^Fdh8W&X-J8+pxy4C-(T$c()5 z%Z%e03~`Kf+=Xf&6#|TyXRlf+*qi0zOt#H6<4wNJm?q-2G`2hK7I@G_jh8<4z~ zYH1NiEqxi$V@aTn5@ucJAR)*M=g*Px{u}|*@zsczf3{6dN0!X7MFm|w^M+)M<~J&N zEI>keCph?C4?QikAc%F6$yy55o*AsnvWlkBQe^@$gD8xG`S%t8lZF6}KnM=6++d%1 z582wb(9Lqh>0qpKqyiv45>%%2LGE4)soF+Y001$E;iy%G(4R*{!Ae@5w3nhX6j>d2 z#-WG}{ZWTtPae;HdFROBN{z`nnX=o{>DP)ju_dWt1i+;FVEe%!9(Dn{h`Ur-s?$Hq z=UcUyGdoQ2go)Va4er{&uY^^`LHvQ&J08|LfOg$V)g3OqIp)=MF5ao8-(`vuY{3LV z+vTtx2!W6lz*Q%HR396N=`aiWk8r&a#2rDdQ>{F;Y-*Z&KBF3c>7t`6Mq_f!<)RJ_ z=`uk4F~9%;)tKCyEeaC7tM=BhaIM6(eA$Y>qRTK36hcA$NYTz%WlN!v4)7Vd00Gn= z75?7kQ!NT@&LQ7lLHm=xX;RVtpm*N2V@9p1J-9_Vf0tRR4(-e(g4Iedo6^)M3C+;?OeZ5Zjn+m~ddk21}uTb_xA!l?!cLLg*ZN$=h^ zvbn)rZWYOs(T#`!+~+z6)Gw|2-+1c!1f7dNWoS@SYf$q{ z#_0JZHwP2>4o?s@_kpqF(Nam5{TI0OJCJELZ)m-{4UIy%cb27?sTt;sGdyrrk;80F z#jwqjyyqV5*>kHX=T{phHU!sFy?ecNyEi+tO1)3Hdn$&PBnrBfc_-a@eWaS@Q~HGw z(6VK+g;#0LLci|fx)V+@Ly$ky?Jt!(i%4m$_U4hO>eoAtDsj(ZH>o9CN+iOhg(VGz zWqj?&IKaRJ4U3%t;&x}6_!{k|H62&E^!l2D=z4!@PpHB$={$v**Jvbfav~1ia@fle zkUaHPW~+|4@qxQ0bL;)x^<3SrEl;O9HF{!qj%1N%63E*@2Dh<;79POOv=T^N!;_yT zcd7ziQy3Kp)3raWnv?2uwY>wg>gfUh0QBu&QFWO%knQTi9|WQzWU)hmDi!vtb(_miaSu#dk0u-qx%C4hTCPEzx zo9Wup(y};sc?r<~z0Jp47nU7J`6u!*eW}`ukiFIRuX#dI^=7 zZWhF+>W!qa1h*a=k7mn78I@soDF|L2aJed;Mt*)d9tS_RFFS}D{4m7d?T0)!blebJKH;4k~Y2r zU=VS^7?-<+AMPZqbri0V8qKyeSsqxMY*3Oj>WuC?NY4k4cluWwU$~8{u`#M!(!@y8 zYZ2U%8zKmq?+F#BI13XpXKbYIACdjWI!w->>Lw68-iMlhdo}NgIFy znAdA&Om6rCkVY_1P;7&{ngdi{9F%R^Sn26D?91xMU>oPv66Ya=n}IQuLJm^|w;3aG z>LJbON)2q7Gf=aBG_$979;HeH7xg2i2(K%yaI!w;a1<$DYag5rMm07I5k_j+m7NE& z^=kC4X}X7ecTv)9s|joAk+m(X$8p&#vP&+0cLrQ}z%9g6z#hzT<`y~|m#Vr(k)-K~ zO>JwHszBnzvx{pJOYR#)(u}r911C8lhI5hDq(wnOXUNR$xp0Qyc^N+5dU7l6^^Kri zQy$~VFXNs;W6zI0XnHbc)IcRoJJPWB5ulld)|IPzo8lzx%XtKnM&FE_`5hwwXD&h} zuuBzROpPWwbEpOt^g}Qo<5emF+;B<8Gspv;xkmTY=4*K0A}`sl5?L2 ztDdzc2<~gAdeu$!&ZHfv!xHas#;>PIZ%dO-OF3yRQHhHTa;4+iRZBZ# z$UAY4n;2z3S5a~h)j|rkne17fdo@=9B7tLxP(;C5OEa;!W z;?wVXVvLohF@XCPDMJ<#Y$1tK3WneL{U-+i9-Z@Ep-e<~45uZVS0!luRFxlNOsCap zFrkT!2emwabK_~wNykM^h?x}LwULwX6Cyqc}mVIsH?u$p7W@<8xC+@E;be|$}J zcP<-JGh;U=4RLDNUW)1=0xER#U%L%3F+U3gJAK)q z%N5CF)oI?SuzHI;)1ici!4U}%@OLoC&p6HxNsG+H!%Pi8*tbMQ8n@K0$kLCTEX=o$}e(bDXDd=CZ9FiQtVS=HAVDC3fn;1 zcW|VDK|BNFJt$f+l}}d#lHJBKrKtpxmv!|4-yqyJmDuf(`mzf)++zc!%C|W(QL`3D zYQza06oyJ{SmDg%vJ8f2m0NPg8)g&&<;x$=;Ux#7EGw{Jlk1v|s+5NGA2Ui%#749H$++iI#>QiP!xTw=4 z(!7Q_2*@F0M%-MmBY;K}F#{REJTIvr;AqSw5Xm~rJTWGsq2)3|5LAmS0QAwfZWva} zWUt0@I&|2mMH8s8v>7uoU%uTM2;vVC1&J_0+mnTDp^pQ@9i#)Ei(x&+1QSpg^*gsB z6W67wY0S1+eKQgjmT=0(Pw_hf7a73-?`A#w=g6Tk?Il~fI8+%A55Ay_Zv{Bs4aDaSG7K? zQbu%-Pn`>_a?Hy9q?3A%JvJT#aKx#??d4Y+AE|_P0F6WBicwOQTGVoA85)Mmjt})2m@(u0V~fR8$ivG!Wt%3SCFwQy~o;}%iVfx6%)8M zZ3|yY<waXn8s+WI;5mq67e)bvT?)_a>O zkzLdEIO9Pk)9OyqO2`|_s;C%k%aPAmU&6{iGT*9C;+1U|>p$@w+MUe$chnBdp2mRo zql`zbRGPh_NeZb1ks`;Fza!L;j(o4_>lpHUoNZ1uGzGztb)!9zqJ2`+JD!j8T^c(} zQKV%lEDWxVocpq*9D=M0k_cdPl6v-2;sUk|@-ty&Y77js*R|Z*h1YLWQxP(}Mtc*b zca=dq!a%#W1lj;6hvc7vQy6RCae>A@ksC`jYSnc6*L58yLaY!UR@Kx{q$>d4tK&G_ zSOo*{PbaMkN3#dI5Q*;T)ggH_iQ?8`iN+#^c-}ci%!Otw3KBra>B-J`>N!vk+bfNj zEj)q^UsAWJeb|u228VLzN3H+ zk5GCYy>dUe)~xp*bW37L8hBu}VWmKJY}@wu}sWpih-#PuA+H7I1O z8dy5B!2bZ$k~<3iNai;0yfE9LsN9Hdoy_b7 zbP{cl6B};GR{2sm&U|8Tg|AR30NF9dpA^>UnI_lTEiMIP91US*+(A=?k+Q(97?KWo z@-vRAF_jk?&4{Pfp5Tj8g|&I(i+o0Oj#4WxIT6Brr3+&$0m)!M==kc_WW{<>UZs|J z3dshW1oWYUK8c>mAV(l(Cujr=61nFf4CEYRtq#WGA`fu;dbX2Vv&|flTM@yqOFDXw zDCD?Efeae};c>_V9Qajg+?{%QczM(8|M9C{7SSoSP z>LlRDgN{!p@zIQ!^>8>V+i6Xy-o2P2K8jZns1Dnd?=mH)e3mn6!0{2br~ zJT5u-9R_en5UqNU&m;nX)OCq1X_|V$>R1xnXpDYJmd_}ooE0aZ^ysq%lWGRNz$)6U zdMj3RHzNRL*soF|)e%Dpf9ywGwvm`9-um*D)E(v3m8DavAwjPOhFoJ1a57maK(<%g;EC!NY9Fgv1Ta5K#T>XH!f+R);Wi#z2iHIC+o?QCibbp z{+>qv0CqM_5P%!GCGBnsQ&6#~!)jS9%wq~nlD(NxSgOW^a0wXNe4aSZQFg7g zbzAo;l6Ji&RvmehS*^H_7HlJSn z`i6&JM$$D5fR=03jvEQJ0&htpRoY6A)G^1$9N+XR7IS$FPUfTOuWU^m`c3^NhNGrS zQ|+gzaGAe=naT@N#a5ObsI)f1Qx%P# z7-u75`P_NP$B*BN0b~$06yvi%$ev^qN^DM`naaZmJ3wNY_J#!K$GePUka3dO^hq*o zq%{a6aK|LeVkw{-*t5*7fB|Ej{yY{L$QVCAg=@R2m6gW7Dg~)VzKL5-khqd*CoJVy zWU43|Gs-bMkH;apA25%n)T%O$;dRS<1{Jv-z^{4{jK(&%c;Ba|&XFnfU~4!+RSN|;PFOKw033sy`8_`|6D9Rf zdzQ;lj_0hViNn0nyNUf-957Q9W8WSKau;?>hGBuv46(*L-$ox5n|^{?)Be*={+O0s zv565GeYim;(BKC78RUWRI%LVnYC*>nMpj_dEK2c*wQ7xqmDamTgkhD`g2QuQ0?eQ& z+mJFz+tFv#e1wjp3);XKr5%ed_Ma~l6Q{95;O+&0kBpq)oD7_RGdImgaH=2O2C|H} zqo>xMv&s=gbh3vmToCHNoFfy!IgI!?$#c7fSCFGBz0cUs-INh@jbpjAr-|9t#2`E| z%FM&uqXTkpJAeB}EKUr3tY0x}+`b&&Ea+-B(`jlNEme#8hu=;lSYmWm`EekMM| z$jQ5eL03>Hg1w8cAN1vx9GHR-d=7jOpZAFvBY-*$l*Cl_+L?#%r$Xxbg2TClY;6<{ zxjcZcOuc_$Tjf8lvHlt<$1(c}UKJ4u|RZ@A2$-o{)3F{s#s!gbt zZy%4>GPmB!iRbOhG!`WFE54voH;DoF0SP1`fr3sz+l~R_q0jDW99`>kSntOo2y2No z1u{JAZ(nr9q5( zjB?o_fdzphBxA=@%s^f-(TO%7x_#ZqQmq)x^8TB{JdsR-KX4m@ju#8SUQRL3fh=l3 zp&3p0BaFzmW~&4>VM`H(k*I176kw^&5-6wFF;W1?Vxz(MQl#<+RygQbfif}C8idt9 zaW#=Cu?1#DO88zOROF#zfN&H5G5F-;Jt=DuNF>yW=+=7Ch!EIV<0S7@Za>1?xHtni zAden7JtTGnPo_&l`m<`fm#opGo`3+x>bcyE8yggsQ;g*2qS!d>PDkbYgxY;JR))M* zzS?5Zsf^BKkA68a7E`sk$irtp=fNsEA^_HP3{lYG6g3N&w`)= z7?5~41ohYVjz}j_tTmqWt09UCk18XH)Q!)9!DF0dc^seZ$mzfbLPH^=xW?OCkyKRF zH3wIHt2I)HirY>Dqps!{4%pj_0x~@G0Igi8PDRf}{nqZeKalEegvH@DrMPB_Pb`}_vPMB$JPMbg=YSW~O8 zvY)t+6*9>rK=Mx2$hjoJb3pJl?6z_ z1xPQDar2n7+0cxvLsLveq+fX*VwAwr&mO@%rc_n^Ml`?}PzwN<;eq`?^~;L3JA~yI zP>N7M(Ifq=x?HLVj7DQ+vWMX9_LK}%f<8w+4o)PXOhes9RqMwTFv)0Bhf$VQK`fvd zB>w<)ImjH5kX6@wjFHHUsATrzWU^-rHx&#)=hFar!S9d&i~5stk32ok29%m@ zDwg!J>e2ekVI_*5N%vQchF(Cq{d-PEdGK*@aCJ6(!B+#)w4H9eF{Dy=Bt;-a-)~T2 zZ&;IYc732=VSh^P!=I7K=futQz;Z8e+TVXkqj@Lw5+Wj%rLyw8d|{R5e1^d%01RP& zJatt@K~NSAxJ~}$kGJEFe&Dxmsd&;BMztJqPZ$6wc9g0xW^LH>?)+n|36t58ZycVd zJk&plucX>N$$@zi+-|_6&8Z{ZsxSR^M-5?VTe=?yDj)*EIc3gI5=(-+++K5#0x*Hn|PEe&;w+I*wl` z1_zJ%l*G$lRHgT)(?8;;>OJ{;hJUYJ@$}ad69$Xg)(kZ0xNzno3H2;V`bjf07UO76 zSEBj+lH9P6qwU}MGNZ}Hrj=|-dpETI0F4gO)F^0={88wd){6(CVY8}eF={p(XKaSa zc_Ygzs-3%tEac&K5JnF(AZ{%pDmR5*QYyGRNG`2I)qj_N3Kku+nLhXz?a+`&Fjd$c@8# zL=xN(9R8E=KpjDY2}R@VO(C;+%X-LTiZ_z9@z^7_L2Y?vbRL$cCfI>Ade7 z1$8XNh8+9O8B{ZEKLh+dD(7Kxjf@>ht7^59KS3t9QV67s$m-!FBQAX;t`vpgsqd`h7Hr=hSZdp#rcq~ATeqN`soHW>XcUOnNqt}%dkz#DLP5y8h= zKTbfI!_kU<&eV-8??Xt5j$ufm4iSoOBQndl<#$GL$OH8Ldd|K%7cMg>BE(U4IV#4s zr?F#Ex?))ci;^G(#OkF>GbZ(u0Y=l2oN0{|88qdVU0+nLi34wz&0s7E3*pY(mq|ys z=PE#E8O|`ls8Ngofla1}uWE8quU`J641!wd%CNnEW0EAYtalBFpf9F&wm}V+V!??Z zNxFh1nvoZ%tdy=pc4yrmuOr6tP8S3j0wEiRxcqJ;a57I?>E)%6s6dWG8BbnNE}(=!=Y;qt0SBl~#pI?QuF+H~a9_1#jW_N&+W zciwPC8wIN^B&!6}Jw^eFEN|`vhLODSka_DkJbDt?-AdKVdo32JWi^@TS%j|NeO6H3 z-OL_7PT~N{fA%qnQs}_Y6FGX<-FuD1&KWIpFL>=dx%t# zR~Lt4e!lcuWHZ#l~tJ+B0Do&vBV1+jyZft(yF&HbeG<+tFdQ z>FMj++K#8E#_@n*vm8hY`?HWiE1kYF7-t_Rrpb&vlD83NPa4Q)hJ?0hKq98LBuFci zc`?BBn7@I6ka7Bs2|Yo;=uH>y6X+Rr3k@7kEUuvIZ(fC)I)0<9tP?p7Bas*Y zvN#K|`M@oO+qjT&dee&{(GgA$wEEOcv9|SiqJ~A8L_wLd0nCZF!Q^?!IOOrwS*SpY zFHmo`NHJqiNC9^eF4y%71K^iF;KXe zRweTi#`XYkb%6j;h45^aBvC7MJldK>bWkW(92dS5n__~24q-S zqj;eNWc!Fig#}Jd2nWD9=zQCisxnWwqgvcl7FxFCn{0*(@+p!q>Jdp$PRRU{-yn{B zbtQ04rAP*AV6+#K3es5hL{8z>hpzHB3+y~+g&YES!9N`|lgEs_ae`(cG)|E}yO6tSV{RHd?oljAUPvH_3PEg@Lw4P zDhMdC-0AucV0I>*Ex9z9w2BEM7+%5z24@(^3rSePB45X37D+!8>@9y+1tdbjS41Pwl1e3w8`iYg5r$7?aXM=ERZJP}a6a zvN|Xzt(JBnR|h*V0I(L)*q7?3>UmpH)W3>`(lm=&U)H~)P+f^b-5Q@|`iSmiJH!*A ze6&~%hXA+$WPgXx@w}YB#0CEVSxmx>fjhs^JsZC&rn9d1zjjpb{aaW2XfJ)fjTJie z-acZSs2ORDZmPp4XQY_UIQ4vd*$#~l{rt=sM(j^Ttdc6B3ArAp_0?l$c(PCU--4q& zrUy9NoVQmueZZ}9Ry3s4UO^H=cLXyuj-)V6%3_dX+*yGvxl#GZ>$v+egJZaLN;6iO zQ6sXl%@!Cec~IWb?JN%FA%957@CosT3w;?fN3PsgaitEod~PqjOX<(^izA0c8W z-_uMTkLoT)0|s1x!7F=>%jx8OO)O}&8vL@8Pf@WDMzYF|6DmoLH`};v9!K~?5^xVp zFplmiZCDc(saLD^bmqT4tacspNLh?0d6Nv}D{WE^7_q|>n9F z{WUy+&NGbU?im)YCl(@vihWt-CMr?eMuxbUmrb$$l0gKF?b>$}z-~GG7Rjm-Y)84R zY2>!__pHXWjM09e*}&eRm~uxven=ekA%5X!07&fJ)MAPWVEcHk258lVK{GH?%oUZC zgOC8p@sdUdo-bO29npxaTcIsk{{Ww9a-~oh+Vy5i@uY?K7~K)snOVp=*^X2KI6Yv< zf9{%-7PTR*uCQObtYFpQWs#X7vWn5i>{TUX3f^6T-O7@=$!?S9eS~(zJ;A5e=|iW< zYSxh`D+!TlC6#4}gYGi3Hx1!~C?!3kX*eAP%X_FB7$S*>%xz32qpIq9O^IB*Z%(hN zvJ8)N6k^BD7%?Lrp*wgTRTjpi+)&?ASSdTuMAW6LT^cNsBoQj@c5T6zJbr=Wf}8=6 z0VqVD(nU>Z+Sc_6D9H`2OH-jUdtocIq)Q+8Ko|!g`?<&nIL6V!?y4b00-!gkP?FS= zPowGfCO9l?$gq_-@T$r(2j`KycPAY`AT<-^G1xM@6&KK8sY^}OMHv$@?AoMFk+de( z8%Y~UB$X!zIR~tbjjq9##^iqKYp)cxwCfe9$xK>C^?f^PF#hZp%w+R{?ZNi(`09sZ z1;=8HFbLtTr+PJ{k{!gD+D~X;GT!C^n11-rkMGkx)hw#lvl3PA?HkpWDeg;7!DF%~ zt0Ko1PBXmZo!-zmz`?-j6Dj?*xTgrdQN%FJ>plHKX@%{fzoEA)8xM~MIRF;m@^R;` z2uY(I&c2x9mX!=KLq?Nnx>86mM)f&nIRtqG@^F9a&sqjCAkN)Im#e%4RE@PQPKXFC zr0xI_o(n7Gix4t)h2#U1o}^CYF*QM@r8MHklSCferS9~^J46wTdnEM66v}cvrG^R5 z&Zklb}#d7HbCd@E3fio8=h zK_r&tFS>8*u|$yk<*+l!;PHY_Rr+MWC!q?cn`_yQph~J^+VVW48$RrQqImE|I3xVW zS{SOV=2j305}m6lBUft@%%%@kV9X;N^SQCSAJgsqiR&D_k=$LWO$8YeIH75o$PTR| zewH|6ji7GH03RN5MmoyjROm}CGGGzc)F!uD)H*yg0!9(!`<6CH4Urg;K-hqeS0^Cx z(6Q9F7W>OJDK!GsWVJnXrWj80DU>$<04k`w06FDGM}U0wMrzTS_}u4R$@OpPZ*0`n zF<^U@3)jR?OvL84_lx&FWXKJ42sO3tg^ z38~X?mrkFeTC~v5Bzxo9RuUCjJi6^5Fu*50CQRs`xYTe>>N4#Qt(t$;Di`AI%H7eU zt#V_UeR8vDKB3yK3PCG+=i0#ffmIuD19nL048 zV-L9S&Z~lQIVXSz$0MmL8FCV6= z-*SON7G+bou`ACW;s!=K0Z0ZsekHJiDN@P1ET}?nO`%ID1?CI2ecZPPjEr>2eVB`_ zO=qrPo@s8X#|-MgGRpfGYX#+#1wl9=Kp4&m=(-cJ)Lvvlp@PcTw;XQ_jq0qM3I-xD zgV=!kj&L_*i~y5BY`+jO!RR8G%aC5=a4d1jf9y2PLrZB}-zA9M5kGK2V}cJOoMYpT zxdrtLw(c&S>uL30eJw)M5i?0*oIBMNZ3hLHAT~fM0BjBhdXu*frHH+VinBODVFPggM|c?_%+1;Hgi0OaR!;NTuH)p6%0DAD$R1kU23!{yiFyIqkp3#D+2Pm4WMi)<90TM^TE$EGaB_DhOQ@? zwxg_RvejxjcBxJP(LrLfH1f+30IMSAJh23V1~8-N$%b2B+dB*K?@_Yp;{AAURIy^M zxBjH?RT5KnnU%KgAmwt-K_na>k~+|ETI|kEHswa8eJv3H&3e3cy zjAd7JjLjMk-GJ&CafNF$owZjlHMCQ9X`aN=SVrF>sgyLO25I&*rN?r!9D@hmohUBxaK#1Zxr!=k+UKbMt@~J!nOtVoks+MD7`0 z`o^~F?6DTB9KW>i%;BXW#w7zF14XP_M3;~i3b5oLP%TxaVS*<(~8PI9c2nv<=n3ypA zW7)FVHFsmj8lyis+DzN#zPN`jBq*%pJ~rV zNo7||iJ{VIRc9sTF|3=T4ypkz=Xqn63xXK%amfRxC_Tw5u_Tu}!HRVX zw6T9X=Z)M6TQUyvDNQ!<&0 z?&VcD+~hdUPI>&ECi>ZuRn?1``&-Q#q!Vfs>npT_(z-{d$=aUYSb|&nXCRZeox6n! zgD9kdVi`S1Xj5mu91yH3(nP}!jH-8>0g^s(^SEPi!0L&EQz>S>%}sz>y?H9yMH!FN zYMYZJ1|HJbBR<^x;BNT>%B#lAP^FB(p}4C}Qc0_p^?$0t1F4Zo8^{~-G2~$2AI4M= zVv$;s$yiM0TM@||r~D?th^QO@Ljp;_9PyvP@z+wr#s2b(KXFG!(#d9}n5nF(?1?>d zVKL>3{(w&Wm%$rH7(ICln1eK`3fK9KD{He}7ADf7wd#qTEc$4N2O~IMc-lxDf0xf% zk%2W8n})79u8$flwhoz1fe}V_mn)8UF!P^wGw?7vvaLyvsiBi8RC%X@1%UfZ>9q?5 z2i<@UF_VmAJF*8un2vD8mNil0e^KKN$WmuB(JG# zI6mf071435Sem&=$EQDB+QS}?@eR9RAH5&Ey z!xebYJh#kjiwNMT%Hwf4Jaf(oA3b(F&3l*ND_zZX8LdcOSZ_^cbz@#i@H)vb+=*+MMM#k8l`Ms6 zZrjuKJCejc;h{$g{^9{dLR4d(a!yCidbk7)n~h+4)RyJi5ZA1ey=^wC&WMN*jq))8f-U`R+ zGAxng5>dwk7&zz2&&E2=;N8@!xFfhpI?BgtDSp-sUc~J2vES*|IM|zuk_k|Ag#Z>A z<2^Y9fDFh_)d`DB)+;Nb+OrvI`7bl?Ns|~Z#l3->gO40|^V0+aVrUE?&!JOk{WYOk zYRzMQC2v<`r0*q(2Ml)ex7)`9f!1T=sc7V$Gv?BSH|)h8mrmd)vb@ZII-FruN&O(; zU=j1vew+=6hkBb?U1jy{P{HL^s|mBSR-6+%}AEBRS3wwZ9qY6yzhLk8xd?r-kKPMuf2}<)H~8 z##Sa$jQ*pYs-ywQz!>@X%!mP_J0T?MF)tC)<}}J20!L-PG2Qjx~pf&ROLgG9EB60X%p= z9P!hw17;U<)Rt?(YfMtKw2PoB8H^(X;5Z(_NzQ&hzfSHCQ8*BLi$pq{DPn6@gmA{b ztH!`hmn3^bAj+c#-JIuw4;?_U9nRt|;1XBWZl$D_cCjf|XxI-|+!&nmln5|D05=5u z^ntAEQHrl}8`?Wrpet&cciuenJ0Du8ZOX@$XUG9qX9REnT=F_|j>Q;-m^UHRn#PZ0 zh~a_?l)y4Cr&P0IJ=<9BaqiwQN{#_jgY(o1SE$$alZ_Hd;Soz_wR;CSS=MD&x^=w^>2GS-?!Ty=*V_6AZP#a#OL~p9s~jLZNOr2=%_bKi zJ;&Nk&~kd79~&!acQ#@U=ZcNbH}^O3Q>9(etRJFXrQRP<>#|8|maF`p)qNr$$dUvK zLP*mQEG@TkhTNxc1ogA@*fITCjs65Hfl!Hk0pPaI*&40QDwMT~!CEzn1EJ>=MI&9rXz)YKX;yY(4;C8n(!{{T&tu?r>`24LGs9CcO4tC;KpLEOcu zb$CAABfoa63W27B(MtljVlyHmDaj{yxDYTwIpFoo2r_^GErgve)M)0gA!((7WA!3f z%u%QgRh(e{q3+5IXN(RwsRFiUZQR&*B{*!PRBKtjL31w30!T{+Im6^Az$HfQ+=0$U zM2HnGg2hNI_Xle`Nnp- zUpo)jw=NJUvlCVC-r}w!y;tc(?#TN{eO?%aWM|q7$|5*G*e#S(i42~nBLQ7jtD^t)zr`c%{6R zy(d26s-bYoGCkiXKaQbte^jUGIs}TIh5a(!G@SaLodUa}kNw@mWVhs#=Rcma97TOf zWy`8rZ_?w=tjBd~1kMlhBs{(!`ISSd0L{-QBidjSw%#8wo zxHvxm9D(usar<;b->Gy{>Q6?rHl~tTks$YOA8LdtWB&4ye$L#P?j{W;RS2|ksj z74%#zNe)9OY-5rKo^j4kO~r|!9?ctsuAQih@v_Mlp$suOf-@EXUB2Cp+(tQWK6CpH zm2fhfGGK=4Zz9+A;jwNgXN_c8og_Jq5;2xc_+ALd&nKR_p1n$fAu+YNlGT{>?K|&b z4d^j)?I7KZjhNbTkPb=Y^~R3ZxK_QupXyUTjpVZ%kL~+K^ljkR% zetN$>cP6WE3L1@Cm0*I!LsGnLE5QT^sv?XI0wCaydHj5QbdQt;h0w(-s@67a(Y2<< z1G5XO45)FmXZoC`6}%r$fNfOhH1)3Dk}{TUNi0#g6`bQ_a)4AfurHi| z3BkxcNatcY>S}BtEtp+RZ&lP@;i|GpU`DXRFkGU7H|ACd00WHkljlAK0qr0o1MN6C1O4K8>UkIqm{<8gY}eOyny^(@nrPBv>I7tgr;rb7G|UMgdve&v z#xgqVXR7J~Li&xBH2Y2IT%v5)cVA9FU-~v!9ou3gZa8Faa0eOvx*7vh(^cCah&xP-%;3or24gaWv!@5P)Di;RdZc{oXmMD2*BF8 zA&)*h0gUx5n0V|)86diSRMYW>tx3#4tX2?un?j!~ zPB;M%$C9MyqD_m46b(Q$%~d3f!#$?0Mpam1aW^uYZ^?2=3J&ZQILIH{spHLNr!rpS z2Wy_3&l6G;NU}UJwY)AvUL zw{bZGBWU9YNycyyTkj~<>Mw^^^@x>aqxK6MLSs_tEUZVgEJTIm9(mvso(brH+`keo z;EFQCX5>F_0Ft!sKg4r}jFMGA#!drg0~j7pk=4kqVf)VdrqS70*_x~(A6T|aNH=2*xsKz;c8+pR4?JL%J?cr@cQz>q zR!Z?f8H}bPgs_rjCm8??5yNAT$3H!H%O`WWYT&b3C7IG{$s#iqQ@C{}1=ft92=MB9uA_))+EAa3Wu!OOBGeqd8wM0&02b#Eox z%}s7NE@5!HTmXHWK*&TGkmH3J^NzFpv3D#>Fd|@S?WZ`oU3Hr}d)tn*Hz$RjM4i>0 zodjWut~Q0+?HL30ZX`0+#C5)!4QE=8oUp@6xAUzg270b#wIV5Atp&0twSAT zr&CQfwN$ArGss>kW>WG&<+6a~auq<3ew^Sa2a2h^84wLrnr7CbmY$-P>*?{t!KGmG zZHf@sauF1>zw{)VP@h<5mfq3Z@jH%R@CiSmdwf{j^)5=q_GujMi?%_ zbCy&%B}V1IIgvm0w(BGWaQA%=fSSlXi~fMDh03<+?=F$2efGs2p- zP+O>h2^%uFG|fC&ip}VIisT~9m!Ym&NQ{ysYZ#QHw$l4@eLchg0}crSM{QtEEI^=P zI=lA%pCtEZV>IuIoY=a2SUV`;FSY7IqHh-?nojLsXb~C#?;wny{b(TMI1_0LmpJ0 zXq9%h5;i#vxSlfIPa_KmdgZjG6SG0dp)cQ`F9Hk?xfhp9Su#!JcQpgu>-c#MX zIOD>uJe#B385Ak{VPjsUQc1&UcV#Zy$&Q=Ipx^>GBy2pX@^ks=n>89DAjUrO#ZFrF z&={oj&g^U$!~EnNXXDRax&=MYJga}{l1*;a6d*L7zE@~=AJhZD{{W72)bf%}^(>>0 z?U|C;#W5=EQz}OCMnPlyeEgsOI-LbIQNo$^MYk-*D0MRxZWj{Q1)Y?i@q>??6OMO& zdYHsdMmQh{$k`Cd6>=e0_o~S$PyyS@;QZskKc1zNC3P>45MgA*?Qt~3w;a$|cs3TZ z0DQXuMs{b(nUXd$(0B|qtsV*rMSOk6MrsZt7if=ZONPrmB>?^t_LIIsOA8R zS!VwHnp#&EQlhaudjiiKQCSK>a}qC*0LjnK87IV9c$oZ$Ug2=r^yf_K5sVp$pwdlUPPNWgVw8mc1kRldl0g8dV zlY^h0e*>);@qxHYlc6Woph;D(NbuHyLWnFp-q%y%z|QT*4T459{rad%Gmf}V_) zr?ICg5xf-PSI$rPvUe+-k(_~!dKtfNr2^4|#{q_xp&o>ESZ!FDjFugm!r0FN0-+h& zbD#cto-_3IFF+m547S$2nQTu_PsC+jc;Jx1mpfJkP{3|Jt{j~P^(9G7hKb{+ZL?cvLe0R4GF5_ zYsCy`pl)~TgTUS*p({P zqN%E~-kG&+c*@Y(Mva74j>Z0=+@Nj%1y&~*By*0Vg;X}?kCi#g;uXoW|h_E zor5HeTw^9Mcqqh$RgqaehcnH2igh+Y@BCEC+OKJG%>j1|Dh3+kQ@3C;$5 zpT>HmS%j6Rh8sm8Iz%K#3^GscQO@UP+(9RIEIfb*OrKN$R4?r}QL|%Dt6n&r8fEmt z#KB4Zc>pm|Kse9%Pd!s7D%3(2=*F8gSe7N@zP>q zG-Wt|C_=1NhQ)X!jjC6xD>Jz={{T^&1CMtBN`_<4Ho{i*E!w%L z)I^Qz!xTXg5k@`O9_7gg9z1^IsWR76jxVVpjjifBJTS>Z3UEihRfAxbCg3l)A7CIT zDo##7Jt`v}<52J1-dOc1Ua@DRZk-ugc&ouM-7<1;Ndp5Ua(Kdnj;a^bR$*2o8g!ni zmfVq@GR$&3Fjr(oKA2Sj+^NE!_i=!E<0GX+sW)NRF|E0w`(UyCtY{%K85r$WlYhZE z+{YyC@>_w`S*|WSlU9*0SCOOB<)>OV0XE1O*c^hyV3ff=bCHwJK=-JnN}Dl!As~gE zN@bMJs0>_hC5RaS@G+c{Fn`ylMI*SZqQo&xnGJjPWs^C8Hwv{K ziEUWFD?%u|Fp_4F7}psM$M`_O$^4Kw9R^}e$W3enp3+^3BCjMzW3qv|76WlAcH|J7 zhX;`U4tmPUtooY6P^h;vPAy4WSJbRQ3U5f(xv2V|XUjCe|t$u{Bi+rK85qNKi@uGtcQG&yKlJo}`rADtlT*d83JS2y0s_Au*Tt zqoDy*WR=>WkU%4my9c7jRA&QYQEWP>qY>0EY4g*C>Xh{?qi57r8Qh7IPS8iNoPm>{ zgc-Q?9k7Bb5ghfcTUqE)Q)a3=G=nR>*WKwbT;l|RjxabG=c=xDJCMzQ4KHvwcj?zc zvQMg2eV&@ifm9=({)|8dAhBReag&^d2MhC98oEI2EUvImA$UGJ#kYwz2asdvJUc7gl z>5%EnlFFsv8!#L(_b3dKoVT|da6$5{2M`Z%I{idnP&uI0r-`k{>rASC@P!VpFd2r4#?ermX{_X#Cnb#q}}5j=W~_UD;p z1=S{f;urlTRU6u>!JCW>`Oi2S2aJ1@Y^~5SgyyB4M42m~mN!`Ab#UM=a@okhP!2{h z&mMZI!Z}k!sum)}O*+0NuS7=}9+nesHeqIYIQTk3h1ZQ7WXr+=_>b9l{3-fu&YQo)W% zEUZT(&PF=p;OZO-LUlaKwslkwJP&cmsv1{{CI z+^<1$YtjnuemIGhCVJA=F7B*HD!|~E#~v~G9Yhk_jM!+{k?MEbPm(IhVf4w1NgaA6 zOQ9|f8JMY3LuBOqK2k@^Tc zjoyuMj2V9qW8+_7ttLo9Xl>$1n_W77$OBV&L;c2Hj#&Jd6f87Hldy$KsVh_azH zlC67|C5AEk|}5Hla-{nA38saERIR zGLj5Qy9zQgaHFcbd-Ve7+|3)p0Q9K)uTT`d z$4tucMr)}x)G3l_31wmd#uEyw&IWOw4;=MZ>f)}A2-Z0&$5m=aBD$z5rDFkQ&j(=n z@_8qYr$eDAR-_hw?kzec$6@y!J}tAw&AkTa^7 z-Ip-{7z6F{4n}-q#(L*!?kFVn0fOxIEka?Oy4IT}!a!14i-WsPb;_$>gs)JjN%E zQ!$W{?jcnBiN^%~bJB5AY8Ah6MxjQHd4!W`_oR3gW;Mfont9rDJ1}PiQ{qQ*MT;7N#v+2$vRsBz z%@l@f4-<@SY_GJOxjEW4j{_$i19vPN?+697!EQZ2M72*{u>c7vRwHIYu|P>GbCHa3 zk&Nf6h5}8Tw&Tg7n#~)@C0OB?(^&ztosy_b;g=haDoGac#IR5~a zNj|5h5cp)Qt06P(U@~LIazRs&eY?77SXkU&EpPp6+P7|fDuhh?mF%ygk(C~vNF$Qp z;l}0p^PY_uqa_NAVy04et0c8-`l!WcuAuB0J9nam0G+Fm{^?(F$>^vj`AIfXBnH)( zXGDa|7R8=aiA~5zVZr~BPVDt0T z`7;x7fD2U^X}WZjDpqq* ztw`R}8Bn9sIohNTfE&+~&sqSpBKZfXL|aFr4jb6m3Ue0E!C*g9;9&d@ZwIU4Rpm`2 z&QI=;NhKQ5F&>=zV=P=bk~IyM03LTA^3Q;Jncy1kb~GecQIyVByywyqTxLmK&d9*$ z8=t_y;14GQ_XDQ-B^MJ8jE!TCTbCc~k3QxvB&m*q+a$hxo`@-4?%LwDMW@75`vm}1-iWmd{ zdB===^Uu#g0Q6;4l^YNnx@^MK73u`314yotsdWVd46a5$sB%W^mB!>Fo{-|RLJ^X` zbQyH5GZ*BVCbMDV1C|jg#tV1@!R5DO^$!5&g4jUO)KilFs8o{uzRffUl>)2~s02I6 zHxQ&T*hvInZ8-`vo;smhPymar;J?fM&RHheYQ5=9Mn_g8?+ucJ2RH-91_m*Y9ZhiV zRAtmPuf97lHo( zQhE#p+;B;n#Wji$iJ`Ab)#=JdT1Zc(KrR(ow(oTtOo!SJgPa0SNX{!!!k^-wdYWa5 zms$(IWXmC%dC~ntD{oLs92O0bP=S&#Gs);6j4t|sqzx^4NlmE1VnO#Fk`0nHZ6?oJ zAi(fRQ-{Knk>GGiu{&PosXwm|Al9uVhMq}ZF{A$ghhcGyoGy4g0L(^DOs;yIR1Ju( zS+6_Vf(X9mNWqISuK3)pOKfex90fQy3Oo#)Vy&I3q)s1WsT=BTBtLTu@Nbes24s)Z z+aWmE1OBF9gV~P&}h3RfVMF^HnF%Ca#+pVbT)q5wF+ zCw4r6)Zd7jQtxvzT(52BwGG)Mp*U#K<6Wy7s*u1C$`1tPfd#Rgj~zppKel#7JDaU_ zv0j^fwvSa-Ad45HC@}y|0UqEF&IUaA2R%14Gkl{W)U_QllvU>R?MAUl5L&?psQl+{ zGs1v#{f2%z%;a`NzBVVg4b4Gp*tHg)BcuAt&bGny#ts7lIXFL_GvlZ;pHjmLgK{aP znY8%oikEc7VIs_9lr*fl;fNrj{5C%asJfqWxS3DwF>A{;C?dGj?9Zges|@U`Bgr}( z0Cu4LIRlP7ZAKMt+X^$Y1N^IbPy+~-t@r7kbX+%1b|0O*XmBe z1Q@?ktu2}`*Jsmp^Mpuh!lWynH|}*j;O7}Q{GO)3gLfzit?Cz9<%Vlk?^0MRD`GV= zGoqFN6g-@^SM?8R+mGL;#z8jFH{wliQN3>HG-R<=VTXK$pfVpBMM8v}A9hde;NbMR zZo~saacxSMHH3S8&#CG2Jb3kBuPPWPat7F@@?3%yxIW|OjyiP4N7P2)a!G{Jw_)R+ zD|&qzH8w}~u&77Q2?h3oKv98!etJ5EaZYQ!o~s57#!oz$3c)2aTd&J3sb1p7R0rh{D8X8CRn=+954HaIVV3k&ssnO zQ5jgx=C>S8TG!uL)Cosb4H(-S3}?h1y8@Jxsl7YTzYxE8n#mY7^=@#MTj&D%TQ6D6}_YEu0ORB##Hc_&rF1 zO^8XaQH+opx^Z1UPKp)&pxGG_s@` zr*dytg?pDEW-{2bIS>6z?h&G3V*q(4{JJ9%W4X-hESgLdYs8Z@^2K^U3b`euRn9;J zf=D9(f`1>5wJspLkY>0*tEWp9MyXi(XxrmSQ!b6j&frc5C;YxTqo4xd2BW`T3F3y@ zr>hI2A52CS9Fhie=N~`o$6CcushcJ}#mx~!u=7A=WRu^)Z%mVc6b2TZu=ZcUw-LayoQEtLC+ zCP@}8zM0&oZv?xJa&U3}dFzM-Mo@aFqMolzol1>;LYy{esx0M06TR}gah^_2GtO~= zo_d%cXzI5#v2JM6w%Sois5D5dXN1KKcgazPAQL7Fwj09ZC!niRj?^x0)M?OJS5%*S z!w^2auFBZJ%P9RLI0S*o+n*gfAnd@I2^41b6YhkHvBB%uK=R75?)!+giM zZEj{6+IrHM;ij?2hGsB^RT#)*P<^>1d$N9b$5q0&^no*zLPvIZEU1QhX<}I$J6V|v zZObZ#7$kW=9d|1{TT#^3#NNENC7tbCaVaw+ZfTp^Mslj07EzF-=jZX@Z~|yxyeLVh zaRM5Z!^<8$Xu>!i2^&c{1QLAy#N!VTC7fz+O*1F!BhL#f%LZU8!@pY}P@gSVra@lZFhdoIA*Y z!7Y_6pk$~A&f(T(%I(ykRK0gIr(TfP((TvQ9qTpELcx`qF(Lp=nMVZjF|_VreY}jG ziZcPY!9duXS(Yfsk4u{4T2)m_k=TtIsBS=FK?8%}o-%Rq)QP^M1_QXlqDYNaI`G`C zVpyYjnn8)O#O6lHW;`}Z;2eXGJv9(2Pa=~mvf8k3>Q?l_aw>9cLF=@w6dbT%s7S*c z@;}|iP6tS@eag~+0iQt8*;wS$^eul(v=anP7>U(*1ZNG)7Req!P)-5nSIa_CKmy6R zv}8|e)|+0_%G)Lr%-gG&=4K$W9iU3%{o;250Rxal5ie4CJdy`eb6Sj6Y&^5QxC18i zEhMVzfC$^l;CTe#;2h@|>chn&axI_l08Kt!Qd=@unk!eLnn@5m;6)ro$=Z?<7xaKI zqbs=ckU&lhbrj?zYGtO^q_a4qQsi1hb4m9G!n%tYQUHv{o&NwNjHx*U=Zr1Zrh-%s z_cG0HvaP7&lUAAn(zuaBp^h?C1!f?LNZ=OW24UkJIKJdcf_8Nyfa+SDacXeTtgz$P zRNf9@dv&!wxpGY>@~?W5@}ezR*+gS1dn8dMkYuG&1JU3V$A=Z}Xqk&-r7g`Nnt2$?m0U zeNGtCS@jEb>E4#W5mlN^uVCeHhZw-lIOlNswy!nSU%0Uxj33alLop4T@@F@XciHbC;MmB<@Dtz*J&yO8fBN6n7!jGtnw{%;YbTnp`g_%qexhXxB zmB|DIXKxw6{Xd^4J$5@$5uUr7%c(6XT3gUYZJ=hXjXO&rEXO<4U`9p($mDzw4^du3 zni1BFoZT`d==+mOiFDahJ4A+4Dh;?Kp)5hhKyN=d&sIkQ#5q2pOB&W)LCkjR>JdSv zu4QQ~K)$Taw45w_kOn*f;~flvk5fK3y97lO*wr-cKD>JDdOGQ{GM2Ftye~Oyl2SF` zHgSxuGsitgks31Wiq$l>DAu(eq_IT$bg_C;#bQ$Pvw`{yn~n(1as~zleB+=Njmg<2 zs$$39*0EN^x+OQ$WrKP9i%Pa+BY@jL$pn1(!Nxiv*67Np1QQ&{X~|*=^d`LoE3{^@r$^CHNs2f3GSM>S8`RAtOWfvKT>9=gwu5awCb``*Z-HCZf z&RKv5jky3E6UXFrPoCbS@+d#L6AM#WYF^Z(vbXG`LgpymP^2-~6^7u4-ZH#nJm;rU zr)Efk4T)o0?7Drcaa2fEw^giXm@}qW4+A?$B$J*`@1CDBfH?@b@-Z}b7bdcGD;6|& zlTGdoYN~=_hA;#T#y1u@-~z;)a(MICE>1$~QGin?xA!w%sZzy_EJGTZq*!V^alC^V z%!GrI#c{#O1M`lP5jCmJexpaM)?;fvnp3+Kklc@QkTHNy8TSBjpOMf}yAhNd+$I>S z#N}P&mDds@`lN{gU&k08PCsw*>p>D(Tk5Iww=OLvlT6YQ)KX_}pqN_c!C($SB$9c? zPf^JHcCwhj9muIqJJ+{fv<|kTMZ?J_`$q%@CN;p_$-wc*>BdVOglvorMm7vMjhit| z=}QZb%T}O>4NpT50C3Mxs+#SJTQc4YfhfAmcoL&#gH!im9Ao zYqJNfsMC&lCS;PlOc0}4LAp;WFb4ph1_vH9^Vb$y63vX(q_V=$qz(5eR(CBJ0}e?8 zBaT4xoPI|=aLdQ3FP7z~$YtA5dQU9%(;lhamSe{(0Lyd0+CM!@=CmXLI)Yle_on4Y zYTBtFh0r^N+|B@O2-||VAa3B~e%b0I-sMUFH3hE(HR@cRJv|==IG1dJ+~i}yUTj$eZkf_=k~ecWJ?)Cgm#qQ~`F ztw2cveaPy`_t3jQ>upk2Xx*eFi3jRnN{~=zgU=bq!Rea&oXiQSED`B#H{469G$~+i zh!Q;~WeOMv8DiP+elwi&$73`s#UuDc9#68Gb}L`)&H@=+;7fo=UOa)n`ijz+hAh%Ye_G#If3#pbM z?*xR96z2nxxBv!lGoGDW?+{llPpdp|$!a6E%Dh`u!N72%4URY&K2Mx+k%Q1VYq&mB z&1{H$TC9^BE3uzxZ%fs1Ku|+qfy*4=a6WN@A7huO_onpp?4S~$E=gAXBQ20Y{^h_R(3#wdweASDR<*NC?!6f;Jd0sBDG~rkWp(zC zX~18`2T$_F?khPXeb1nV+qIoaYt^1lOVAU?Ut$1Uur7(Xj41#q$R&O}4j6v%)s1yN zX(pC$UCiHZ#hf#hK_26i&Oij5V<4V*>wzPw%UEG%5)v7r{& z)T4${)!>5TsC8LPhF6UGtOnDbKnQdGA+(NqA%TUlSJX_+nV!6B6G07_4lyGFHYq>j z1Prhtu)Jp{o;lD|P`N*JC&_9xihES-%LIz@zpNb?pL>=iK*8Eb8&4VF=N#E}`(gz= zw*^~kIvR5J(@f zC4@q3gye<*u-%Qp{PWZ?sjj443)wQcS4^i;S+xrFUhMHkLP>3bU`GcHjE2q!2f;pi zapBRpe^3v5khN`bFPpU{l8{oY+v7rHJZ=b70J+EmK5?F=JW@|1H4KV6jUkHs#kBj7 zLrS!c6+;(iAN|aEc5IxVIXrYifGMMg9-_@!^iX>Abo9K|Dy|$tzOE0>)!Z`VXOc!d z{BxRfXA1uJA;2-bt)s>A! zBX(pOwbCMr_KgKSUr}dOLivS}0_3Le0DwMDInPL>5{IUX2BkevCOL}UD-fE1$jHp z-`k+K$^~zhqMq_vxjebCK4Cr8n+H#hPu&4oPda zFBh*2rLzM1?yLYU>c@<3Rw1yXAY-0SQZ9B6Qq{()HWO;%ymB=wcB2dJAyU5f$MqS; zGC|;fbB~Uf8KJ~ZY-&srotwz@FKWxIXfw#n(Z_j~Ng@Og0aeIck`6)fj~xVVEwdfm z72>maE=x30y$cdFQB5RPUEI8K4cvv!PBIP;S{#pF&^>T1knt%#9OnK(W5w zK6exT6a9_{N{L#4=-7t+k+pDFj(5y!{s!CXHaXg($pmnJf8VOIeWx~GVgY7&<2K=f zTUV?qY~6wjIbf?EWHm>!_N0Zg3cfcK zBm!GLaBv2H4O06YBhG0D%4m5oMKF-*c~rm+>c z5?E|t7LgD1!vIJjut7U{P;dg|`RPJ~vnb8?liRl%&1OcbmMcbp0+0uoDNuckq$;ue zlgR0bsU|nnlTCtnl}#%%t%t9%r4nTNVX|7L@ zcCS7G!({QtS3tl~>T-7%$r^pcG6%gqzeLs&Z~Y1w9hkujoPr4Q03@EWKXSt$5(@^U zOFw-)H~kn{i~AXvE;4cF7}^HW`RAZhh=`;y+*36~YpG_1xS*G6xt1#kCuo?v6bM%< zhWC59&p$m&lQ->}6mi=|w(3QP8g#ZGwpHgvY1kI_k1`T@pnf!9IUIRh*bO)}=aL+8XAe7NM)a^wuz0B-7`zmMZ@Mw8}Yb zAPu+>LT)U>k+gLogLWq-V^$2a%F@PE$5L75dt%Co1cA_a+JC|bP)-jGk-#IT;6y6G z?8a6#8x^I#H2P9sXrYam&bwAw04?dpk^8U|5OTwIFfqyKyO1tR8iG$F7fi33dTg^x zcThWxsqELw`sbX5BVYlxsK!d*fyg}}Jci|eZpTo|7p13iO8SMlTI`$P3C=2W;m<7d!u-yDy9HR?`M2-j4}Amc8;t_?n!ag z!Ynn3B&!v9Wz=lENRJp+9?l8co(U?hK;sz6Bgaht08J4SN)5m@JFt4!9<-FLY89bb zMR{Rq=9K03d}ZRI3n&kXU>kS#o-v@{hR))Pq=;?x2AqNX(ElvHD~Wd#NLK zJ;B$H0A!GQh{;VT`0Q#yEZTmkFWst^;E|PPd7**F^g$&YZ2tfND#rkTbK{<@a|*kW zTx4j*8h6`Ttt8c2eLvoI!m^1ZjPiFCe>o%M(P3$!Rf?a0uXhgV0QrbrFRC zdlMC2!pCk|C8=t85(Fvs=DI0hNjp8`1LO}RpBX(9kXaCsbSkE`wG9%}T$$(7u1o7M za{J;cc);0?1~5nD5Po_`BIk0gK_k}VMw9L#tzNPdzCw144tD~6Jo)+S(a)SHqde?5 ziT$t%yS_Wp){S&%?&uZ|q+O&6TjOzDfq{|7lb*a^)HUC!>Yo%;O{@0-x@>6>v?k1! zP^1+Jkgx+GODW@kSp0v-q<6&DIh9&2_)ut6NWtDxI~oPtX8 zpP$cMF^25Qe8Qj@+fMr`x@0yiYdyNPH#Ae&mO&!R8XRtof!Z596Y>4WM<@%_RVAB3 zF|DYr%{s&vE3TVWmSF^uSl-fr*xV*3jOTy`agaK&A5EBFMuY~uFjbBjEL1ivn1fcW z6^GgYAE}wYzv}VSvSA1b1{GpN2w$FER%;r*rKnWZ86z-LiL)Ac0D~Ai@JkMUIXzDr zwepl^2qPO7tvQBIwuYI1>7&WJbRoD5Nl>64A19*5`tAi=JKWN38N{-Z!h7o&j>n;6 z)R->ZlvtDixC9W#_UiO1%!X5{20o>xEn8M>fm#h6-52h2wtFY!f^tiEQ}Ofts<^@2 zq%wqjD_GzD8}>NeXQxv7Z^h!T$YaTz7H9kS3h72s#i2R$zv)CxVxQsoNTP5F+iLcXHPs-~bb2=Z_uC9}McGwnF|<8Mv! zLD`g}T8W^mp-rf1x3&Eu-1>sZ)#@4QZ*@|m1mhbQAd(oL&m48Ya65#l^kwef-`de5 zj=rX}YEDu!EDE5HfOE-q@y2`u)f)Xmpac1ptCh6fCd_qJsl_yIGkY^#- z{PT{aaFM%=GZ7@yAg3Oy47Fg?t=I)sXSKK>EA7IDVlvq|W%JL^Ll`7k?iO-(VKCUC zAFZLE|Qv{8-aczCfmF@#!Y)#w+=#lEZ8g-Xq zkh@H*-xfqXOmP&@DFYI!k)9QL6+}z*aT^Q3DLLX(XJvY@d!f z>KK%(QIuy5xofItu93$Oj#Y(3CTBYoNHL5832bLM2OsCww0eOZ70aa55M-0qK+s=K!e{g!BT8IhvC0!;$ z+hm2>NZ)jFyqi`MRAGwaa|6#OBy+&ePJ+eE`CNvmTh)b`V`_GzhtZZX>N5o=k9G$b z@wEB#(R39V9FZ5zigl}r1ac%w-mJMNBmscGI5{Wx=p*rmCNOq;l2|^qR(QyPq-4x{ zIRp=%7yy5tJ#0CcixVXQxh)wU!?R9-0DdwL`E-m3HTrU^v=!HniYd2zi9!A z(OCpU4{V!`7>pr7;NTDV@y9`EpQ;ip>P0k#=aQt#vV|yiB5}B$Fn&Dz@$>Q2@;~3G zcsHn~#+Xt%(?wvVS)F8^1ERc~VM`LFh!|cJgXcYC2tm}=txHA&sZXg{rCOrt^SsLn zmQT2jGb#o=w+A4p4ecHWJf5598mKYiA9zpP#a212SBgn>DJ0k$w%KG-6;!G=NX`aH z{{RRl9y(kY+O7vPDKm*6*&$fVss!`_A<&#QK+ieJ&z$wPVj!Ieb6PB{*DG19VwD|A zI4vnr!5BEt&%oekreXl&9HE-Ei!()9@}kENEc^Eo9|3^}1CzLaY#;5?5`93WM{rF= z{X}T)S>0U9k<2VQY-brOw;+|zA0wa7j)Sd;>&KV#mFcTb9m!WtjZK+j+aoP-BMfrb zQ~>QGEX3gBKN-l(*vQzG4to1ZC)C?V)1tGfNoIXP$!MTSOMTR!vowL2`$y`|Fc&9| zMqiKu+BY(H>MoLPM_pQXblVlH#$$@>!*eYml;q?RHtlGb`Cc+|I2}tPaul(w)x%6W zgca>PTCB0$fG?|2I1!e96t3nuD}@>5usn0qfIyK_3D}AIf^9oW)wFoKlTn_=qk2Y( zC$$)?9zEbHB18dqR*3RPf}C;(Qfnrq@x8AhsipmPzOi#qsNKcfdL&ew&m`*`p|87m z{-k>t%0S$}a56`pq|Zj7OgvB0He?m)S+bGTpzEksv0@PNo20JAeC|OQM&)C{1-Q>3 zh1zR@kMoI^#?Nu&b#5|TT|Ui~wJe^j^ClK)w(iE?D!gqOz|PMYJ!l{>h*+=YSFIyg zsd~Ll2UwCxY^2RAGC03vf&FVGeESr$F5I^Rj-bJSqcmexJGljQoiv%wrEF^2jIa=p zM2#BG6~jri9004iS%71L3W4#|Q8@xy2HD~|o5>}cvdKo)n$~7;WmqD_W)P|w8*-JA zTP27gj?zX32TUDE<54S)$(!lgtC*Ib-)Ut-%o08B(wXu4c*lS_zy}7{&;bwETRNLZrdV-W3U)N~ zTFU&Pt{7mI{aDXbD{zlwZ}%jMhXBvPTVkF+@9X zBN$!841NLUnQ(7XPCvA?F?njrLKd$eL0rb`h(i|Kz$y1_Eyn;H9~@^^G#-S{l$)a~ zmuo{5<@C)q%=M%ms9_fzp99$3008oQbx+cm6F9ESqeH5F{{W}=20O5%N|Eg$(tNMD zvM~0Jf92L(iW{k@5W4p<7Mj(Z*riYIbdwO@G!CU>V0j`l=R5!hu<1y+T();fv;ezLECKR-^!W2`%q*aa zmH{Pq(cEg{52i~9 zMV8Yjo~VC@1Z@fiNFR@mvb~nMrLzt^Pjb*T2h!oEU$!)ga<`~qm9v3kAd*$b>TKb^ z9y+jd)XRT)w9!+LMX1-)wGCESWfMa3SXU7?gt?_ae0xTA+~hXrK&U0`=t_8O6+5Z7evn_ z9tPw1z~J>4uw&z&2sA-fw0ACFyO|i~ySnX@$PP#Z`*1io;~excfmUTA{Z>6r$E{yc zzK^DfmdJ@<+F^xqMhiS}Uny9W6{l(OU{LyF0y4-<)I* z$?HbUMmxExy**i~&qmG3;XtBhHJUAoa!Es+jN>@+2lwd|65E83JC+$XK${jMB5)@6 zuOp5Fesi92`*pA&B4uWyW-A(8c5_uWB;^&PF+>mOTmggdayb70UZJ1I1Zq)Uy~W@8 ze^t3N#WXa8OCd!mn1`pddq@ECM$ky{p0Z=Y+_y9~Vd`1rpp~f6l7v>~d69i@R!#B8 zjPf(j#@rw8*0)L)A_ll7*EcRnr$uK@Skg~NYO57Y0AD8~;fKyTnkx}za68FV{m|WwtBiY4=O4Kr zx9!r939}y>kZx$Yg?d`Pr&CL>R#+iJP?vvrW>8be#z7tiGmP{pLDaLI*^adN?gV=7 z^HH(N74*!C30?^}0I|k-{r>>JRZv^00>bW3_Q-;?vMOGjsg7w213cgRS+?y|BTYi}9 zDTY5taWrI;un+M3^Yg*##|t*3%LVr*62%)rW{DVvD-M0aGy9(mK^}U!LZXa_qqtpx zc4+?Ac`r@1e&uD~?kE5lqbYQn+tp&REsLf{drY*t0`f9{`gJfX1 z_yfnDruGGiTyMymjVZx1yBp>xwKdqo=LGSEIl%-Ddag3n5Rq3?aUPn}R#RZShCq%Y zkSGiedB;Db9Dkop$a|3pNxxBjn@?FP(2H)^(yHH=OIuGpnH5Q=Bw)1c zK_C_bIKty3`09C1wq-HnH*Er`S*liL`X64`8{76k8u>yxg4p;l6dQS8-Xt1yPc$dk>mV1 z@Oo|r2tx|hB(Efbq;DdBPl7Tscp1(Ma52&XM^liCvm!G*5KF#KxU8y{V-YB3ng0N0 zQ~v<8fDV2+>y=D##F~lbxVltv-@iQicr!X}Swv!K00n^~1<3t}&&V0-Ze;-UBH6TW zw6Ri?NvKCs7XF|v!0vX=WB>&#$jCSY1LXAwTaJf_Fw44?JvpY%q8e!dQx#1q zin$eflh>{Mf_Sx9rqp#ASJ8wqCd9Cg3V>J_@(4J|YywXh=wrfG7swn<>uI#EKCyFA zyd;uD`*~vwP*;gxbcnkS;@D+7zCisz^4p zo_H7mv+#evRv1-!nZ9cfMDpj=&*H!7m(u?LsK@DS{k!#N>UVs>rdHOqx_zPB>#3a* zoUa}vnlHGC?95Wdd5kct6iYRC6?-+|lBdVLiEJpuTNvZ&57y-D*uIZm&_0#@PWsUq^36b6KU^>veUwNNrm5b2U0Jv~25Q#H(7?&$abpuqoJhrIBKh#J@=Q zXJR;JJ?b&+zpbB5JI6=w-sJi_-QAP*Yq#{S-mK>zQIvfNMsm?ERgWdi({()AHU(__Z-RIotp?y5zQFOyC`p z{8ard@B6*EH`Co)>O38-tky`h&i$9RgnBIZKl0l5Dbgf~C4EV5N2AJZJaN-#jIha8 zDWsi!Hmdj}Zucq~ga!Wqj1Q)8{Xgvv{eI8-rFTTtG%ZWKH9JGJqN{VVyLz6ux;?#X zU8Oy^q=FHD)nB)bY+dxyc?=3FOa;aUsxl!5BHrh&H8_o4;lH9wS*N#JPa@dTqd8Tl>A^T&>|k;u@Nbz$6eB}TH*M=jcdIZO)^ zuGl1Cz7+=~f9~TxI?J4r^m>k|OJlhfnD(bh>)2Rj+bnp5`$FMJU5ow^!8rZ;&QOM_ zQIK^S3o=Ehid6Iq8r?b7rCaG5G!YT-0VMDFg{ za-Gqct)uF#UaevF7u(Yd9M)uTVqK?&#us-xiSfr6=?E{gC|xWqMs!Gf9uwg4;>njX9u`$n@|aB)w`+Jk|>B3j=+Hz9@EEwd||wi(&7b$ z_ZT1@>JaoMh1z&iPIyBBA#zexecYEm06g+Kb{sb$jg1v1FS9K1QkKfvI%b?FZ&114 z>iG9Fk}z|R+XJp!BIUUns*Nkudhy8(eMt<0LdVq}){v+hMn@wg5J&wwfeeF;WdLkQ zX02{LF2W=hE?AZ0+R>;2Cdv0N_88Cq0KckZQpRz~_l+yW))6AHO0}6`*s?OQ46^dX z&Q0f+FX_n+@#8Q`UR44-&Ad%0WGCcJ%LhQ#OOIV9%vFjKa ztx08ZxDrPnoIWsdt)~Ok2X@hug4rkI!8rNpm20>l^3a*;86%Dfgc!BLy&_6V(`PVyUMmeQ?%py134fMpE*4bl}B+IiG~eDH^*i-m7P@JJirg7P$88Xn5HOcB!u*xF^l&o@roE!k7Xg`8U{j=2@LZJi`aeQ@>KebzT zj8+yiB*kFnW4CI6!{{V~J zagP8FxiOQYBr-99O{YazPX?cAl?7G+HrD-F1d<2pBxD2mB$LvzaT1G?#Y3%sErLe2 z*EJa`uG=#Dppw!t_JR&bQJjE0bPhWg03;Ix7W;ZA9@5#XsCx-ml(R^(md0}2V4iSK z7~`qBiWMo3At!PQu}fZ@*Qrv7@GWZ1AWhPjEr`Lw?+1c%KO?N*(fVZ( zL5$7Fs>f69ZZuF}$uvHhc64rit&RyGpV)Ep(}f=7+Q#G(SkxZf2_i~$;Fib$I=85x z1MOTKvi|^sqT4$I_P88ulz{&kzzm7VxxjjJG>~V$DYVRy@#XqE7H@X%70Af$H?tXGe z{{XH$o`rgVDvrz!CZ$a+g|QWO5(^R-_M)+4fJQI@B!B?%j;TNji<2E5OVmV>*EXH3 z-PNYFgkf1dNEOJ*0PVt`{loHl&yj3K$CLB8p2clO{5pP;i5!$Hv6fU>8aYH;i)|r+ z0Zstq?m70J4_TWX=t~9zW*1R3swn!VoW|~=(E!T7)MQn`+<}j@U?-FQBgZ{LC=O-KMaxaLEpfz=EPV{pM+B_Gab34OkNsB`B4Z9vd z0Xql);X%gHfO+emBFZBEWY;gW6KXKWCZJ5SA4x-rhp1OMRb>Hka^Hi;j~!KVYG%lk zderh))gR$&wSJ)eUe$iA=>Gsx{;z!=`hjyvtK0h9c2=VmQ>sM+*JzBgI=9=*sZxx2 zKD00PwPZEDSVXTu<$?6gC`cQfis#ZE#hx#sbu{LZu8-I~!=mfmwWrjS`f4<)uLV6e zMvyzTK@|9Q&#YZpy->yT6w1K;%U39hxX6Woj0)T zcF%bC6(sH7r`@;o>rJuhY+|Hh!b%vCXT-^52oxnN2TIV2WhYt4E{u>cL%h zn3qGK6|lWcywhDE0hiKLn|3bDkLK2D)_YuCBYJG=Nr z`g7b}rKI-tU#dS;^e)qG-qyQMxoc^sPIm8J-PJpR_35;`a?4zr*|{yKp|@9MfnL>w z<=rKWr;~6tQV!>;dy`kxF4~V)o>_&Pa>-&~v3ep&-HRSLWZFw(pK6?Zj-c0bIRM)M z5kvg8)RyKE>edgcL36c~oDb03xnKEmK00m(?+|`W;JvqEYA`LR1Um#}H>)}@n7%@Q z4mJ~mj{~Jd)ZE~<{Z55TS)H|wG-}nOio@2Bp|0dOIKjd25AWx!Uo5w9W6D2B43)ljfzw)DLkBmC1<)HNA30OB|yS8)=@jspO2!;b`feDxQ>R~H|Y z=GW9iLQC{?tCl98OiJ#pGP4OJ$tU#=0bVdM@PBTmufR%>7;ZPxAhoZUCBBuO#5s8* zQS6TcAoKdkcali~45Ev9V(9fqH#WrC6hoSSQj~9!em_2`YL1-BCKJNct!=>T*d=wTY}u5vR+ul~!VM z0Brkz1LGZN`9KgRT#Vkjg}k+45+swTf#YU~dH|%P0l2vHpMZWkJOoheHw@DBy*dpc z?P>2MRgf{o++l$oomDde|1_p8f;A0?u{PaTJp#XIo z+I5ZT(0XdMBavcnOpMM5&Tx1c=Yz*n%$kjj&vQX`?xK)t4Pq@TPkqPIbu!Dl0Du^9 zFb9tx9YcX8<&K2<3NUIgQngNNT5UNM$ekM`DCcnpBr*MmT6mB(GcG_*`--dEs&!kk zwx@Pzh9a*Urc@E&kMGGj1LLUjH#9Qpz0F}+q>@|quS+Saf;kxhUs0G5!si(Fx1N(G z_F!Fp>6+H-$zGj^^hA;*jH!&s6iMR_JmI?TD#Q6gZuRb1C~2oq5Tx?K3yEE3u8{ko z!utZrpZ9UdJyRwgN-7{~G-NetNvu6{_p*tWLQIT+m2CML&wqk6m53BwohCBg)F@iFGZmhVkxt~5zqTz~>#U=C!6!9rQ7&|LQSbXu%`E=}rcLL-F zX)ux*g(^}i4@-DMoCA^p{{V+S@akhN{{Tr)E*P4PT}ok2Hv6W9!m|YQ#AE_6zd1iA z$48haQJ6A|feEYW(_Mn@nM`~3L=ckfbMoH<+&?`Sp@R*>;j?8Ug|4g+7TqGOD}_vF zIX+JS{{Vikfwx9WE-(d%zDpHr%+td(cPr$UV634R13uH|rX<}NFbOe~cB;c&XGKct zv33&CiS9Nw<{Q!?a3J+5O&~X@=3_)!8bdJ9Y$J*aXnUeBcfQO zZ)olyIvy~f1GJuelk?JK$wyL{(2;ZMSC;h{?uChUG8b`naw~@Je;A~VmH@P0h?LO~i403POa+L0BVqNM~gvWy(zGmH`x zeEW}{IXz3|o49$IKc!-}ptK>DtH@zVE1-lTqyyo+?rf?2XFnZHlkF~Ifl?KtLX7qhbueHaRDZgPeb_ zKo%`CvWnzn)}mT7QjS^dS5k4&FkNr#ZHm)NAK0K;fl_0aS_r= zm7@j6(wxSl{{T-CDNto%PV5u8xCD%FKRr+6{Zh@I)X1iOwam*(GWYdGz(HesLM{gL09yuvhF*=_)-A}kV9wS^#}3hoLNcK zh0O74)M-IxS{76&-ce7i>=~1OGD?I00JvmhBjc%@exg2J2IBhEUcR#=w!~L88E22w znkWJ@C=7+9CH>5Iu1MqBxKV@DT}Uy=W*6#d>djFtD1sJgnb6EOr>sa)8OpBWzr;8s0D@;;Q2mV8#Tr(GHi7<)GI|sp0#5_w-gpPN@bQ>#gK;f@T}~7swnU= z@ID#Tu!;%;xnPDFs$H*sTT<1dG9OmC_bHY3v1k2QQM46Q-~)_#$iy10rgqC_gb9wJ zsnNS}>(#kUCrIJ-+i+mJhV8iau{*K&$T{Px1AoePa#yKE?w;|{J1UJ>t|hfZuQj%k zsNB+fOPp@n0T?I8oxhHkD{<&e7`#K3NhYFeTI~~iPVS0lT4l0gSFFCC_IVUClFmpR z6( code { + padding: .2em .4em; + background-color: #f3f4f7; + border-radius: 5px; +} diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/_static/image/concat.JPEG b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/_static/image/concat.JPEG new file mode 100644 index 0000000000000000000000000000000000000000..e5e69974b839849b5cd0844b06cbfc90f8380f6b GIT binary patch literal 45505 zcmbTdXH-*N)CL$Eh=_^cNX3dY8xmowFl#_G!K6jtJpY!Z}*nikFptHsXMh2iG zM?jz>zzf742kC*19{GFxy&nC096$E=dgA!;W5+p8aB%$3$;ow+gOiJsgX1LkNv>0W z58&Nt?o+4#e)!wT|NiypiDSo3oZ{r*{GTTOpDyhGKzt`bheue)j$8p9ECpb6(F}Q&SxU+!x$BqF)p8zBVT1NuEgO2l^ICuG`F2{Lu zXU;4B7nEKmemyC1r@V>ZqMszG{50Sd*C_!(p^L&&SEa9Amr+qwQ`fkqsdrc3!0?`t z@uSCXvb zDk`gR)it$s_027XB>xhIYbEW#>;CH;nTZ z$?P?ri-~33V%#FKBtp68L*6f=TE08>Mm!;ojo*wJ8=5qp_DJBYj6E=>TY1ihT*p05 zdH5fAARYlmHg(kRY4S2I@HRud2jM>zbk5r5R)i7O*q}L=V(Sl~#2MJ;&4|*_!28t8 zbVG0arMi(lBu67Ldc2KTU_Q9D|Le-wrkzjwhj*uU?iZI&=9tO$G(GvXJH07Om!uvl zj?EiWBj{FCRQp&7;tKV4*;>|l;)_zh-3OrxRpX8i$X21Mk&w-E`b%~nJsN7cxmt2_ zBe&!t1>d#oNc-N|^zn3rdy=LqDPqQJ*ESVOHeFFHmXSk3BRR>GEY@uMN0;6mhKvXl zWL9Al_Q1zNbK1mDkkwJdIdPO58+3B|&w3MWr)R@T zupFM@DRpzV9eE#8JGk zePIRKtwgCY`Q`zUcmD=VJ!?%&`pp1zI9|)lW5~K=)!*ags3u0kd%noFdR5e$eeDMf zFU?=Q(JIBJ_U39|<%Q2;TpHr%mAkout?tWyv<{*oxjxO}vF}H;!@=e)6V~s4dOX0C z$6MZV+@0LSl?)nBxerso)oqz?KMMrcpFu5-vq4474GbfYm2RDdZ0_z~98uUuRU5Jr z<?bFguk63pUL>@9XS38 z*3c=J-mDg3@~+^na_5!ORyoqlckxK_IK_=xN|J&rBcR)kL-CM?`oq6O9DvMj`>ga7E3GyylAC@>M;_E9nZs;g1jk+$%shx|1 zEh>&!>-@(Cy-oc>OWVz1g9zpQLG$wBjD{ObHi+g5|Fgb(XsX=tL220@5veGQkEITi zeo(yo*dTXE6dQC_M`RVrr**rGaVKlv4+j;5Kdv6CD!nYPh@LWXt*PwGu1&kh4f#nQ z6qU>M)rkuYJl2z*F*wLTnN!!>&gT~Xj=wGCo43r|R?Z_|Z7hVYqfLuMkhl&yVG>zf zf_w)#(O^`#5)3(?+lXRbS>Ft?tvs{zL4rU_??gNJYE}PJAQ^id54? z5x+|2j8SC8?sDhwCraQv4xHf|;Zv$50|2#9yLz zkZW4R&qFz=SW6P(Q+-*%xBK-hY{9$yx>qe5DT8j7S8+N%th>-jCwXUZy26%-B9T?oA&REs@LD#v$JOT7e!_SD17CODIq0lV z8^fG&HPTF;zdG${i@1Mihqyv^bsV6X_g^RT95oO5jiJ|BO8&5@s;as;e2fp~3Ub(|n9ubD4py z)=oUt&{Nb1%SO%kkN9hY9633;Y>-Dy%;|2vYf}ytVF9Pr`1_|*!5?Pur9O%*({1qW z%w<}Bghuy;VWKy7h+xS^Zg}WYgx=^G=gqPJ#n)^TK~n7rM`npDeA4u1jP+Vfj4A z)jfc3kR7F8B91WpsQ&Z3LW~P!)=Sk9!VL2KghzVrk=)G7$rrPe%{v~;y?H$$dVo#G z{u}C}^f|A5v7VaX7(^3~6?``fSU|N3OqmHwTVjKV#*?XYZ$I0;Ioq~m$T>cr^otEj z!K_P`SrGqoy3t=Uc%RiVB!3C5NZUuhYmk~NE`79tFr9CL2{Lfs(G4}yl>46ZH*bV^ zGJ6>)J+fgdm}jMbT2JowAS;|kXkbnRElph`>!(!CT~u9#|CFH1At?hGELFM9`T(sb zkRh4aA;>uFq}LZBXM4r%#=~xUR+uf>(ay2fnGVQx8MI08i;`zjerd%)WG@jO83@j{ zbCHI3D7TD{nByH^U`^pNA`6lw6OEjZg0F+(@B9u8;;jlx?50mwwV622XuFv7&kXuL zt3@ivqV~&?#k(?&n!Xc+%~SFAkMN|_HmUxK zc}>(hR_Dsk)y{Uyx%B>mNkQQt*RpKZQsggvxG9(+2%|jtn29-#kmVaCRH!tl53;VR zmICqWL@6^J`{^y!kgRay-Pm0te{lm!tKqih<$v2v9Ume6DPsIPDoBzo zVKl|?kB%7QAD+*k7vpD;JHZ8PP;>{Pr)4{ox5F#&-!qvm#KG9EyjEKYHYk-0Ab@Au z$nNS$H+}e5X@&rp;{Nt{Qm9DBwUY6GvB-DJY|t?oAvS2HV!EOyRn|-E^8CNQKA+DY zolh90J4(0O3QslY(Ury7pv7?=TsGb^nstgsS9L%ZlC0ws47(65Pn7}(Gk)V15UOm@ zYc#qUL_)W23Itp?^Pqi9j_2=suGquaS`J}7ZZukyE4$aVkg(L z(y-dhFOZpPOnJe^e)b>lu?E21i3-2nt1jo|Lb^LP_@vPmtQzkWO`;xUy=wHbRr9oi zf5^E?`7>kFBa2c>FW=wirws#cgj1E(G~%!IdRDP*G>zl_^A%0or`lmdZv%u27-Fda zpkNJTqdnQ6XKWB^>)oGe-q(BiJbRF_&uq|sv0S%&#{$WseelKL+HeZ~*`M$aZ9mKp z8hM9|Wp`eUvv^Md>i9osb7iIj?(Us&HE-rjrliZxuqhP9Imwib9u88z3$A+I1}HzF{cs}0CS<2)kp z?IwyUtrne+^wcFO&R6IBrbJTr3*Z^}{z59m{&&G2Z5iD)+!885aRtWd3mih)rNUa+Aku@MJvWTX z#7>!Hh;)MQ##&Vgr25-0(Y;aEKq!rL{Yzr|^j>`sg3p0*0{CNfd%Dy#0Ggb`UN}B7 zjBP15EnOYEjcYFufB!5c<>o}mfyLb}iMWeL4PF_vZxj__yO33>O}yirGt!%ev)FeZ z5G~=b*Vq!cs*3OIpBX&5_fb_n-4WN^yF6{8Hu@CpVU{ZAX8rL`OAyjrl}ZLQWP_;j zWmE}D3#ms_?)Pa7k6i9eUYZB8DoA)Y(7~c@O0 zgL1cjbCY2m2-9vT&i|#w0^f7@ylV#^oG_B~XlkfYsw!LgBJkU0r-gEp`PEA5BOE+@ zL6Zbe2K;yN+A7imDQ6O)}js)4VQ@B_)DmJNN8BY z@*S~@%ZMu&N*Nj3O5kTaAS-UXoE`s(zqH!U`_7KL<69}}T^Y}uQ^snw7q=>|W6UMt z{#du)YtMgPir|V3hyMakw z^1}_c{c}PGwnPTQzUgKQ zMPXb*L_@lLC9m^0Hk?2UEqi>4hi`Fd;Ezx($(05pcrl02X^MSx+n99=nAdB}?`8HH zrM%l0aS#ZdG?bsa@0Kv59}=+?=6pf(PGPcSr&G5#S)_g7Cu!1K5N!xP#O`5f?so|n zv@C*(&?)#`+j@yE!0Icl zgNhK{7;#l7Czba?tzRApGLvWo#|pLQ$ne8(3d{&>vJfoA+l;xs;X+}#SiLL)MowLri3Sl+PQaM zfA3gwQ?L1S3o>RsN9ed#rlnWF2xflLQY~hKnmy-!S+@lX98OsGD@N~|{CpC%wf5h* zQ#;~CO;u0G)zJ62Hy<@_9e88Ky#>v4C0bK?*&tt6>_z7H20m9Hdtts`f-tZaNY(^OR~6E0p*x|m`s zM3(re@l6Faew&h$YJ;*SMZc}uggman{AaCZJr?;9<58K>9PTmrqRxd^R$(drZjzrB z@1?GH#0Bh?0hX|nV<>{|#0I@7x~EvO$Q(v(YjpxhBP+`d(p0}4r_yM_eajiW-jjOZ zWPR96cQ?+`M8BEn%n~mV4(_9$Hy|%}$P2GK4Qbt`^Mr~M^~k|*c~S_o-mb$Z-p&e^ zPw5AkQ7{;hl5)d$^U0USe%!xnU5EHRrA)1xLkdt2=H1%?%hvnqjnE*&>)YBfZhSQ_ z#s9Ho)OOb;X!qPHeEJXAm`pehHyKfAkoHz(sGoK05KL7}^QQk~)DP5C30M<%a0b>% zE3pk2Ou}Lw{3=8_)6+P)ilA2%rO2(Dzx^W8)_eY!+K7gtLVn_W(t$r>?Uq&-PBE?u zS*`0Xr_R?Y&D3OrO0cL+gyK3xdJNwW25_u4GR*#vx82mF+h1-9j@jvg;t)HUJIN+u z(_=P>>6iOFBZa5SX?-*Vl&Ol_StwGs%SSD;LEWDdI{{G^aoEqQW+(ywle5E+`7H>| zQ?80i@QBbTo!VK0>6~PPHuDU%e`G{Pa5Fj@*V3VH-Qt%B{d)io!MiiU*`VT*aV!Tz zC-ngGXT%enOrPGfVS~b)_=3pp&3FlPqzO~Whz(jU3Fs8F!%@lkZ7@l-`u0LrarX{> z*d(3Vlcji)b^R`-5OINVGi3^*Zur;7q&Qi#K@HeYAecRb-=aq5Af}x_!+R%P=nkBg ze28_{S8E(ul7K7tBbguqtwwAq(iX)cF9N{HnVGGsD1vNyz!C|yv%eiWGsJ;o6+sVQ zI|1retrLzC#;>nZQkNQjhkuGYU09o2M(KwgfC(z*`iPh?XZ;Hgk2oEp--QGBf9p*5 zg>w#;ZS`0=$yJbbq63q5wi92BeuX?z;rqe{l|?lk5+IP7+APHp9dT;Q462ov_qIIi zbZe%_s+!hl2GsWFe0Ik?vt}# zLo!^7SNo<$jhNKDcvn4Y@u|MD+^x7^$Tj3l1%}s$CCVBiQJ3V;sn?LBUY{5@tr02n zCG;;K`JrW}#ayMyWq& zx*!5$@}`$mb)zrGvTA<4d~Yc0>cGav%|?K_H@JIbPhn8ZQ`7g}xE&Jbv7|=Ntnr z{1Gx{dOcp9|H`9YVoYQ~`_jnq@W_V5ErZiwA&#PRo@~(R0?Z;geyFTBR}p{xr8|G? zrt6ZmxT}kQ&_NLzez)Aa$KLBXBqgAWF68)k0kl4T?!eoow2bq_=1*_u@hs)~(5v2$4{SyAvf< zKafk|Gy}5?XL3>5QT3$N33~mUSyagB)D+xgg%C51&y@^p`bo32id_=tD0cmMZZemPW_#2TP+SD-|H69=k=qiLvw^!ibL> zo<|2+tds9T3bwW{RN)c7&d*(&Ak^f%IhN@)eQI-oV`QNHVwy?X*T;tAe_VxW=CJLj z^|k0FeGd|-HF|NsAX7JGLcVb=N0g(dpulf$B z-}=zLZCBRk%50c;qnE};!|lSJN;6OK$6~Wrj}?T|uJQ^^j}wx_Ysqr=pX+7ucsc>B zT*5W2_;7=yXMrN0G0pFmRZ`pZ%%lTW{rQd2Bb;k62H(M)Tl6cWXQRn%PZI56XyqNZ znm0dh5YV1iqb@DnV;(M%h1nsDiT3Sg+ z*4c*17(rxMXdgf4hN^_M(H6ri9PuxI${6S}-eE^H1XT)9-OCSdE-plbsUq8r4KvfM zY~l=YCdj^&m}}TNEmC&OHPGJ^+Xka*bPY`+##lmplQbyVs{~jCW?{%ThrB8Igmb{6 zc0Mv#UY4>#!lgyc!!cYX<&utsIhP{h%hnh(`QR;rilh1o#R!)fBe)_f>Hs}uC|1Cr zTfL}P#iSkYM}FHyZszGIQjJv2l!WqY69GoEIw*Xk#R8WA!Rf)ksej4lZes>5tkUnE zQN(vwO8uDxdB(Pm5zsQF+_JXe7VjU%N(tkK5oEC>(Gpy<-o(I2|Exl>>M#}bv+~TN8r4ivT+4ql%bG1XZq~k92;a# z$L?oY+V_`e8Ob+Lo^{=tNB!k8yN!j-?JA9N&*>d^hODjp4aGiJ8d3c>Azj2j2(g})+OzXq0!k{lyG;8*96nFwzlG+nfrZTS@@!3+w z7MRvF@S)MT4r2EBIu$I)Re!+JL8{PgU`V^V*iv58SJ zq*0Zcb8HsdB<3>b_?fgCuDy`u09}7tig^prWB@9m=6LdM@_t#%G8}6llsk-diXxzl zJ+v_eI$H2JePp`xd*$xd<|h1SX)bd*%_-+XeMhT%66+i)x;g~u9^1W>OF;FoK|hl> zhI(XocO8XzinOPz6UdjohSewB`-~ zi};Ay)ViD%O1)NyH4>I!R%t{h><1m{&LYpRR`d1F{(zpE@U-^tUJA3YqakVM8nRfx z(xaLnE=^68&rCPV`(q?pVaNu_OC}v483C!3+~hO1**7ohK)L8)d&y(oCXVU_=#{YE zR#>!pmLO4bP;s`#q?SQY-{=nuCKGQVXW~t|5aZQWJZrtz-gi`&8mi?Rw_MG>P ziLg|3r}Xv}Hpu#ZJ^6(uG?fR-s7&qIMTS6?txBXUJ7qDSU62)PPuFU7zwO@k_AdS$ zPi!&i<}p{5WpeCjXkquwO>7Rr&y-(4slMIpNCo%GS+qIgE9WD6`2fUBR;ooKY3oZP zLDA7yhZS_%g2F^U7%kEWjiruWNmk4=C*I~*Kz}T0L=;2SAELcWpF$=9vy|b8=NSqx zbyfjFx370Tu|edkXGoUWpM?aCIp#)|rTKT(FZ!J@J01Ex;B$GK^kKc2SMh8JO*H5^ zZdO(;P*a^DnU45MAI(YNrMWF4iwhO7`?8L^CX_I(TEgN-Xal5(p?*bxp|&t zi!2!7DEvZ&wAB}6ep$2EBG48U8I@%*L3!$4v83}Y8tq#iEg5rX;=TP@$un#a-J3r$ z69Ah$Bk;>50AKZ(#s90o*1joZX&rCP0KoAFfgGsp$`nr&@$qgxu@vAtZCUY)hEkEh zv^y?`>nlOd6i%O-vF~oh@8TxL!~EZonQy-l5gb+G3h6P(bBz8MD@KmhRgfH+YuN<- z|8-x#2~@=b#sHF@zy@t;67*$vu`KRYHb@1-nkUc`%0#=FT}Z`WVj7TxcMLX2Vwy06 zM)E^u-4U%$!sT_J8<*co*Y^2U&}6==t#GoA8J&ofho)mlx@=Gk<_VdQz&bhQ07_56 zz{)(yslZJ9KuCQ4&z5nwFVzs57ZfU-MdTLTp}%i!LH{sJVogGhJYg0PT(;j$ADQ~; zU^WNgnrtdC=sHRF`kFHQ9r?pw)RbzyQ&>W6X||IP%Iiy#AW2b8dvTI2Kbr*IzJia%HoV$WPXC&6K+U85W-IP)>*5+R9UC|`x9?V!sT+zk; z@|wF*^>y<)WN0}G8doUmk+G|^=g4W%Shb7Y{J=NN|DAKz3Tm3{cVe=1uC4Ooo$#0y zYuy3)r;U@v-C7?=M`$Lz*iv^wbXDm+kyvXtk$>F6G!s{yki=U{cYN$ z1i4K9aPHvb{Q6V1a@Op;x3K(Ab#;VYQB>@Ur{yVX&rN0R`zW&?`*U;T9_KCRW3IZb z;su7bMs(eBJ1@xoq~FcF&1bKPY~k(&Q_XBh!5R7XlMW6xx6mWAO1~cm#WMqrFk~%| zf}?AdU9g5XyMvkkzWhN1%{==`s(H7M0#VDx9s6MbOPsP(lZgUbdzjYbw1FXxTCNtCT<>mHa7d-^H1JtFnoXr&mf8QM9%|pC|zE2M4qlDX+ z)A5$gbyflyZI8VC{rt@*Rko6#QUWNH)_Kx-qv378@um=U&CJi<^w@XKGGhMihR?*- zm0N;0fv&67L#%k-DKt;m{l-M9gSu(Y@YDSbwllm@hbE!OmP1jpO;1ybcf@_a`Jp%# zZg+cA{p+9A`mWHJ+uEBV?`y8LCPdQ)6N2o8GEc>Rd1m`eP(JUpDI~zy7|N6GHHNLt zpu8EXGJ*#v?A?Zsb{5#9J+=m#8DW6DNb>)%zlheD4kJ@B6K7c!gvtu(2#yc^ALnTfbm`u>e? z@729=>&P=`kz!jpJ%@W=m(3>1*YHE51J}`){V%wFIr1lb{#%&`bjopD@BH{HsoUS} zKXz!8_HNo=f0N(!3-iL|=CU@Y#$K7`$4ps=xt2K>jjVh9QVs5^aGqM>;t)FnG8=IW-jeT--;_%5Zm0=1e8J+yldZb04A*Yjk?#+A^ z$||61&3tju`eGr~+eiT6HL2sud7V+L(|zxCSHj#(Nq~Cyd93bMq^!ZNgU8XPFjsUg zCcuU&YpKfLdalaqoSOf_M@UekzrBT}m}<{Pui&V&=P$f{!KeVSIaO`V$8XD;DRs|t z9@dciLX{~5IBg{z)tZjpKi;!M6igw-ey>kraNnZBj}0=xU7d2Er^I2}37>@;OiJ|q zFFc}!K0J0&;u!cX;y+|l4Mc1M$U8-Y&l21VhF#Dw-t`abc5sn8d+)02myY$Ba@R0; zr|{b?6TbI#UaWMkS}1PYVFA`Sw5a3ss#6xK{Lo+jg7sFkkh%T^(BIQ~u9qQdmNVu> zsGfe?@W*8u-26k!7`Y+wjku|bkVQ7N??z@&x=4V7blru_4coFos~r|t%-rKCo6Sjt z$4YaNU&XSu&ZXEUfq?C(k=<@4aT^)Isz{T6b5lA=Fj+bkNWB}`*zr=5_gfKud2|$0 z(2=9j`-^9|s9>U#k@f_pA!U364;=-1#xPZ}zugc8<<|rBCKIB>hMBV?TA%Wn69H_{ z_$`^?(vb}LW+#1~Rv7qjnzsDV5lZ!2oBoX(Y9J`HY`bm8-qiDyHc*g0N$$*m!b)8K zas(2YEb(-`{gI=QRA%WiK*ijjE2*vWeQJ-=v@JWwyN~hQMGg;_a*X`851m!EZ_l#c zMlOC%uA4y}p(f4o#>l}rXSCIJZHI!m;I*Zt*?)23U{uRc$L~fnFD*;52*6Rjc{VW5w8ZspD@dYP zb#1I7d>z0x0gO|V5?q$9?h%YR%etoLUP2wYM;M_wxrQT|$|yD{!J0Ty4R%BQZbk0@ z#?uvM8{}n-g9c%^J*8wz!L2rlIcPM5q^~%-ZBU7%-p6g9^%i&EViYQ}dPM_}<1#cD z>(p&PmgVRf_vT0!8<)G(++&0G93zmHO^@=f4d(H?MdW>3dHmV`q3;$2 zavZ_KFo|O6!dve%p?^&bM zI<8#OTMWXX=U-ZJ`<;_uQYUUG7br7WPG#(26I%P?trn^xQG?&9Yky>ly4J?Qrf^rX zCrLk}Est8Hy@TwA(w4=ZCn7qL;0AjV0FyjSIh!=!9z<_Jkn+E3wk3hS!oX<&2U&tu z?8j}Ng9nn`N;C^TrKlkr0EC~POwR!Zh)x1i)HkMQ!90$h<*Z!;x*?GAxxauml7$OE z4bO*1nhCzUsS!g(`YHx9RiTAcOYd)?XejrzH<%PZ<=Ej)ugylYAWFus@5j;~-9vTu zNtR=}0IVCxs({c|p%u`o8pb)^S;h7=#Rg{*K9r$cW<>9Fi6Qn>)+Vt4W9Zgoj4<;E zu7tSh3IHNW_aP)gVLxB99285lcjdMq>z|Vsp54+>KB2B-(owG?bHSraCOfq=%lga8 z^q+w+^$-lu&sbQH-@5LIyenEv(E{a)*%|!BTu6?0mV@{tTvU_?aGE;nFO6E3uqNAG ztie;3M={dEDl&aT#rvl^v!cYhU@0B9=4Ea1y<(Zeiw!V;psYc?Cy7;W%{6DnLEW+c zq=Mx3vPOww))BuHQqNL&;at znx~eEEZl^wAZ*vDk> zAy2)Y_YxGb-jiaBT*NQ~i-4sabFzeJ74Q$ajsAxK%-F%xPSRoWXN;Z{cGy4?7s)2` z-rOind9~>t)Ik7?f+qlz?6j9Kz*#c*O=b-#Zv1~XAIz)1Xubox^^=$k-}<0oFEn9)w& zwqnn20q3c7O#k8qb7+3t-&E}X4JKpzjT!5J5QoTL(ri%eHfz2(8jv9X)4gl>fR{N3 zf&8Qyvba{sHgFMUCH&Z2cWiRZz_zWh!I$5E9`Vd&JTcm{m5gUXf@Kd*5i;MVZ%vLY zH7|b6QsX_|J)+Jj(=u_EcNR(jSF%BE+|YRBu?8u!SAQSsEH!-K(DFfIx1_PFSrL9w zXU!WWT^QPh=~?NyT;jH)dB`vWGVYnIQ+A=Ulvtoh(`eLVJgLn|1ap^=EIF3H)(%y> zGF8&=m@J+>Jx1`6oh48;tY@TZ+F?{_tMa*@RV&X3RAiZL=&Rucl-!4CwHisjzLS1V zdgBLreO3-;Z)|!Js>1Vn2%D2g*(@JySMM_ZpyLou%(j6&dl?soR{byoexOkje`hPw zPiklbs~`}f_pgaY(x2{N_in!VJ2M6I=R1lk>eF+(?5^ z(G8`g~ZmKy6vMa2bUvnhhKkpcKjd7 z5K-$i8FaxC>S?ZZ00j=0GNC)=b&m)<4Mbl4jeZNpZQ_nnc8F!{O!qlX_N|J_di z7NL1)tg!9vH4P(d>jZ?_eJbi+t9M*S-Xu}6_5`C#17lL}exK$W*SIWZpi9v@YxsP4 z6LSWC$&;m&(A5I%&#oORNj=+nCFPv&ypYc>^bWE~88Vahkcw@ugojWGGkewjfBXja z(ww>Zhu#D%nmy$7+Et{=DE>#sjvCH0oV2L^`P`12kIQ4`Ok|yRbg!TjerleHxd)!Y z_LCaaHQc%?A6!3x7|%So{Oh~T&smwFfH+Ohf+^1~mNMC|(K~1A+Rpru`E#eQS!f8Y z+;lomH|gpvTVsR9dNMXPADX?#AxAV9WiqL+xwWsyV=Xk*N<>N@q?*{I9 z&en0r#i=#n|DOG7wefA_Fm*|N;#Fd(t)CmR2eIhYh~-Y#xRX$1ZK}Q1z%+AVqLA*8 z^Na|RYJT*L_oB%t?%BU0Z>2a->GtB0dQL<#jK!r#x1;K|wP(zh-gc--IJ+H~-xU#b zg6nr=F;G%E^Y4vSWvYmsQfx4Dn(ynou9I1!LT_F|gURYsW@2|j#x(YAfS zx=h=!5fOccclQ1u*QvqWp+!pe^c=>8)~%f-&HMRro>XXArrGlAb||M-fl*(?zWIss8+Sc`cQ!O$uiwCTqJ)UO25i%KmUr<+mP_(y(@&Tuy{gcNV8c2UKn|X zVL}Q@$rCNyF;#ieeaxlE=-?wum8MRQL^i2wA?ec7Q=#iE7A7TBO)H4|kc8==jh~j` z4YBBF*r+*0UYRG~y7}HQ6=cnzsM>GDB%CZUKzLxhsW64sR&Xm=& zs2L2BRaQ%OExbxj_J8?UQNQS)>}ivoQI>mXWpC}_unDelK&bOX-;0UkDO(Q=ymwE1 zERsyG*Yq?<;2XEX1nlqQ%A)whM{ebQczI>{vf;O4f#;*Y-aq-2{qvKkTY%<>y=>T~ z==1L}`XcWoH^oN*lH6~PeqWc%!<=>3e<({s_4hdLH$y@8^qSrE)xHF}?hx)GPXndj z_nfnvE9c@ShKwzO%tS-aX3V%VE*Rb^AA+Tf^|p^W5~H_1#K{_WN&m|RO+QZt?+;Gl zeNv%>etEZw`ZqcgvMGAWLcIurUDAJGp&;H9jDPxdu2REZIflB}R1M3IhDMI;lzaj| z%tJ&q`e1HkK|fiv%F-)L524UTht;+x0UJ*Ph9N_&FAq~5wg!n^_m=)IOLh<*H1kWg zJzcfA+q2lS&&jT<;Broi>T@J{Z3Mff8Sv;8T1UV5Ms|$MfBzF~$cn5px+#2gU#bBw zj^?K$I0|7s4iW~6;aibZjjewGngjW!RC1e?-a|`70_4$$gkmV9wTR}j4^7Lv6<~az z!&~)z5CE95HZ;cc)ZdcR-y?2B0!|B4ZKUPT+Gf>Tnv@EIZ%U`^E~8kso$|Wg-V42= zYV|2VA@w?ajIr2e7_4rW6H*tx)($Q=yun;R?rttx#&Y>=`bqDU91xf)0DKP8ej8QJ z(~$1^u$=`x9P1o`E#Vr>WKEe9{$T+fa*rROKMqM0mbjL_z^VG-w!a&R=TWq`r4Ngk zARu~5FjNNNZu6#r^Bk3KJGN~U6JA+B7U{z-#(w?CvtfyN^dAn**t=ic>k&bnQ^!yHT$a#jUuM)H1 z@?oSso0debdl|uIQ|>WF@kWs4*@UwXDmQcE#`W6m+wH$v%t}47XUu2d%w01VaV_Kz|+>7qs=J zd$S`)uP%x%-f|F}4mTgOqd|9nYtp(|TnF@P$mVF0HZe{MSZtJeB^qiIGuR*ne*pVx zutAi(NWB?Z+tL>r49q?nv4c;#*4)jdKLpe#Q+4EeF2}S*oPwT4tnhN`IE+$3Uh{8zGCy;Yx8*`h}&vsn!Gc7;ZpcuaHeD`>4_XS-Xw5Bq2RR z4R+SMzt^D^_hN5BmOm%&no)jzuHI!9o9-?E8IulZ<225?B@;Z~@gLzlL%lxc)_lXj zLc(?A@@LG>Z(|mc;WjI8((H68>o>6{VQtD*qMo_^Hz|>lOwuw;8El%9(2URq>Yw>6 zPOS>;oAIq2SRw8|Ld*lJh!nyPwArD%%})23%7V_tu;A~1msX1aAYM&9%!#dB&;=hB zy8P|R%GOy#ZZ^Z6v0sOVP7;3yti90Ohqii+t%iTB&^@gjmO|Je9ymFg1qJ1=2D9$t z-6n~Qid}hRh?w)(^e8Ps0V|2>W=^Lrv9@B1YZ3j&m0IvXUB+VRDRM}78Z+SRh^Cs( z`s0S30Z&9Pu>WHhUUZ0MolNw0S7%%RU|ThoKNEwIrVuma%#lOF6#QcXSyZzXdJM^TtKZn z1JY{44=(*rO6~tEC%s!nu!YRLC-8T~Bi*>M(SoUNTMdG9vazo5^|AJ-y>A(-c%wcf zQInB@tt8C6gYaiGL#mc4N%dQcDa3Xi(PvTG6RMLL7hQz*cU{;SI!2%ClhA2WAHrGRF*DFmbAH*81DaBKk!&{$J()lh?kDq$B*7H zlr_F&otr^z|@12elanKXg%105VCM^d<0nK;PD4mPK>i_WO6!H(*dQM%c zt$Ht=bzNFtBXs&iYR%j&Y(9V}a2SqF9Zq18qRR+`@h9bBVkKX<)`!2j1jc>c{i)4+ z_&9`rXDPA3(XkoklXIzA@5CmuaaY0n)E{_r>8awGB>UiqtR>Dr&gsdSm|`hk z@u}WNg3ZjMGokY$zv=%CC`%yR?lbXluX_toRDf6qo5nDLjw<{a&wN-67~ zsIWiV+lSXoTJx6Db^Jlt-H~4ca*@4eKYK=0evk`|U?<-GXudIER0}vEA~c&oGTNp} z#}i^V<&hp{abqGzUP4!+b0-ZGZ;DCke3H0C>_UDIX^uQkiD%|&-6l<2zUy^U-A?-k znl-vBQrT+`ZP2+u!pF{RiWBel$z8!k4V;MTxl?=Xb=;x&E$lEuXLikUu6*ArHfM;| zV;TA=B>N}y-k?gv4_OZZp|4%7F-80w8e^dC7#fsyDv7SDPlnD+YXU3+QnL&-Z3NGiA0D_S!~NLWA^t(C_AX zB}0$ji_>wFsJ%p7|FfYj`anEixPbY?Z|RnSyH?JPg7*nA>lwX6EO-26hUyRlTpU@= zl`HJqBJeV-HB9UJro-Z7Rm!2g7UcFl`n^XTc1$_I%|#*F4w23A!MhIq{m|yDOKG_k z@?Zfp;nxB^@UwVk@h+h~OQexxh1GchxPZ6*VCMR_@SS-`dQ(r={_X$3O zZ3_Wglft#qUwM9FJFwKtz0s=jx95fdAphiMyrDj(hf@QsUB?ODwq9TUS3%@4>FwwE ziziAigfEK*A7J}Kz4=ZX*b8;!7tFCPKXKxYaTlzAJGRw8D0>^Pu)`VGp>)*$m3^N6 zoZ?B3ESbJCI0WmNa!@V!P|IR$@~+rL8|MmMr`wN%15AW!UrC;L-!77XvBRC98yrpk zPe#e)=rQY|J2#&%dh5*&;g$^NYZw=HSzL4%`pwXLLMsV}esjQEah-%_LPA56YY+I zS-VsxY0@4!GG^?8+i6=}A)oM9p{nVRULPp*SNvM^GitLRVp0IBlVbSA&Z{%dFE{m@ zgzq|qJ~W+OAH3du#D^LUu{rHd#+DZENU(ScsY@dWO=>J%)2Yq|w&4rc&r?i4wH?RI znmMvwHJ;84!LBNtZ|@fS(Equ{1R(6j>w(heo*nOi`qGYP7tHk|FMj%8z8lIAeVNxC zME4DSw#BnTW?Zdg0*Te{DS6Q9WGX-!a*ABcXVx`{dGe;QL6T*^ja*_`&6>&ZgY#+? z9e(S(dJgaDUCT=c42ksQ;f3>dEg}H;oK}M}3#_vTwzYohOsis;m4#aK-lD0AVTqkf zH1yy;M#femT~X)7=TA0 z3IrI$8DMi~eUVKXz;05(t*z2Mu!7>Ddm#9*p6+)pxYQabf=g7$r5%c6vEIsXouTsv zgpBUKeJ=Pf1v-FAK16mL_A0XKh7Q#V!@MX2uV`0SXUM$nB{|gzGR&&X)DKd2kiGX9W3vQT@TiFCu#SS! ze6PSSQ3RMYfp@LwR!jwtxnK(s*SubNzX3K3*Cuek%S~`JR-}ah?0E8wW+m%XHIVcVJts+k_v6Y%cPpT z>TQSeYv~rz62k)8E|mWPpdY%wgAK}&O{1ES26d?>vl?2mGn48YDT&=nCiH3L9(4V< zqkC84Y|d*^YIxk}s*@%#?Ej)yci|tp3mv3^d=-6~=6-)j9(E=EY79(?SDgvlT8pc(ORhSn7vwIQ z*rB+Uhhn-0ohWNmPh)0yyVHLHn?Ab1hqdeKXIum&i!{dCOGo1K$0h)Zdgt$ckn-+D z;xL$W_cj!mbxjs55uxAI1=biFG^n}>PYViSMKkjR@Q8kl$f#{|9I971h-CMvr=qT|^Y6qj;n#Rgoe!KMOS?(xpZOL1cXSJ5<>4J(xpocgx;km1VSL?+~s+=WBl)Zy7!%ot!uBnzWL2>&bbolV~opm zj&`Eu6`S^7j-^A>$T?4}eq~ZjO+Zr3dQ*cBe&jr$Dpo_*Gv*~Qceg*V&v?~ zS()pFsoyH_wdAQf8F(DGIm0k-{#UC%y#Z+y<;kgVBzg2_e?fxP-)HqSdrt8-{mHV+H z?!hh{_3qYHhq6A8fh^N~tjGeMTKbVl=@Fn>b~A9J6xiS8vE^6I!I?ff zZj<7ID(w^O@Df^ldgAW6-`WNv{@Tlxua{Y8=YRI83u$U4Tpn;T)G*{MLm!6EL!y6D$~>>JHul%SH2dV=9*TYAqRqW+N7K}y37Xr=^B7_>D>KIQ zR9l;m^|K$Q?MEIj5danNrS@%AIKji{X#35my|SLa>-3|>Gj6e-!)bNUk*wkM%(QEF zU}N8?j$ZSJBUydT$yGm{4HY*7=y}FI2B*h}9#`tgyHgR;n+)^1y^8(2Z9?z;EBy+URIU z)UWI7KVpcYB*GoPp-etvO_#BLs}+2Nq39H1Ooz-uJ|H=V@!a&j`!)sAZ{*It6qfT5 z;M#k&5KLT@-+(eMPXKH*A7I%kc($Q9QDMchkRa35@l*55namPgzeqIX44CSD;Jl{v z@1a*B!U*jl?8_S~$s--J34e9z5}sU_|@AU zj{!t^LI`JHqDR=!V~X~luOv%+wqS1x`TIu+JU`YdaD2jY6mkgVwjnf^FqC$VZZ*rO zdsjHC0L8kUa(w%%YVxC_ub$dWeoWB*@mWHn(FZl<*uoGMGNSO)s(+QSHT+k}se)6x z6KY?hclY=jrz6q48UrByA?xpYJppnqe5>Jg*EzabV{36 zt$(QK)H3nkLpa;Enmi+f6c)|j$2;F=VgEl|LAG=z^^C63WGKhr{tfz>xs#&>hEo6g zOW=y-C&x?mPYKe%6M!mPm^u&Kgp|R%gFNZON6>2N+OzWiJUQ$Xlmn4sDGgSv)N9Xc z@-=?ywU$1j7M@|SEhhWcL_K4Xz`2Hi~UVXxhj8y4Et&o912#D5GuxnXmlorW#CBOZ36 zwC%5Ub@DY&rqbz8n{lm9;dji@*Ge5B28!k7gN~8Viw2K8J;q%=v$#rdiyuuFaxxN< zJv>-1)K~k%FGZr)hB}A+-W3k0e@N2dC~@-ctN!kh`OD&egAXoT-RDU>hK2PY0z+k^ zD0Tz!V)v(;&6F;3SPgkR=qu~4^EoSgKAK=qYgxMdLSwCty%D+$Jl{}AR0+Fpx{3%m zH-QS$-5j@|j$a5D;1yHJcd$Kp;`kRRJ;WV%jNj?y=n7`~Gv(yUgzq+Xu5n*Hu~u^K zf8+1@wl(&@y4ZXb@)q?FR(8C#iz^0F))%IAW2NY{y(oD&m2wI>UvFTCOfTg`5@zvQ z=hH1uhCUfe%OIo5Y4XBIF&KZ-p}CR( z-Fn;U5IhSg=h_KUgx!HObI+(h{= zpt`IlYC)&>-WG+AhJaaPpV5^5{n~!Ni^qDHSm@7qpla+gNG3w-hh%4{4%3W0i2ow0 z5Z9-Axl->?Xv23DtFLl~t(4b7S(~Ltd@~0B+HEcf3pDzlUsB z-xiu`3uB!7nFK(tVq^9YPu7}0vC=piOiuIz-qL1?y{#{uAGz=uw$uAl60V)-chOWA zy{+c~83M4PfCguuz|PBWY>gQ)j&JJcuEy!|v3zkYUS~oclHXWIq{=R!9I6so(g47w z0Unp|1Tqtk;X(4#eVY%803Ug>GUix9zP5)}7#kye4x|e(qWQaS`2AziV0hlz|F`#0 z-qqnnxbF(&-4r+EWJut@hd4NN%HkO+W{GTx*m3A$hcJdZP&R$UUB4E$iiN@i($PI7Ql%w~OBevu= zA%O^4xN{bt$;Ey#i{h@iFaJ;p-d;RNz*IN>K=Lu5k4<>e#(si7OqY;i-`MSZQ8?g3~xs45F~@;qX0SzqHw z*1;)^jNRD2rGv~!Rhs{}*lW~2TsYP&HlWHR@_v_W9V?UNo(QSS7mT1KylwgSkX!id z3^k#ZqR#|cS*~wEUz@yM5(1+SI z^^+l68z2~E0);Ice^j`8TTG!x%(sJ`pIcF!+|(oS?;)5lrVec+Wi&Z_>(M_}c~g1~ zyooO2m{fn216v>V8VCUzo+jT*LI|K{zN1bdr}Okh+myaDYs9FKCC=}l$urnLvj!c) z%1I{MsESg>SwqHMjM@Vt`-H=6)Z;%kIe~!uMFD10$2OW?_FaO6#YO$ zKP5_K#`M;LlngUk-=GowAIVmSf)H5>uF<9|LE*RckQ(;0)3)s9{M8@9Cq2eu>hZgK z*+_L%<7KuqS2&>(ACo>g`L2-ZkUjakEP{b|HTFJvkXUsBvX-}t8%;j=SejejBtCw( zwe2ptF0}KYD9HiqnkcAM;1*As{6U^*yxiF#bj`wa{kA9tx7bAsBOl9SKtdcTxF3@y z;|g?Wt7R=LIZUR%`KFkO?^vucI|ep{MV|e$uOP?9XSp_ZQS*~GZ@&U#f%jVH%ZJ4i ze!byC&yeolYVWD!@E7w>(OjFazx$&JZ)3zgQk;MXo%QS3qQ7%xb$R!6o41rRon5`e z`op{J9VE9Lhs^aVSjzBpe}301oVA{MGLxSV{4->=DDI{ffyn`xp~bJ4H`6kJYR@%L zuE67|f!f<=l$TsZN0B$&RDP|s3>2s#*Vea&>atf4dZseCWg3M`Kb7cBh&*tcs7-vQ zsA&PU(>Z$P*ut&8wfh6|JEJ&3S!0{i-}q@?xu|Q_$1MjoW51OTTFF}w!Saa=Azen9py-X||C)(WBEgop`68nG(t? zpVZF=4GCIC7H$sp{T)HIgmX?fnPw%(%<&mrn$)5{vFN>L5~|7Red(Wp&5-k0@dX8y zQBR3B)ho)HZR&z7le2ET>%MKq-gRnqSOfj{Vg8dL-yIJuqu9StWj%?lp%%n;LH~EV zhC(ete!;Q`S%oKA^P4x?ZH1jKoxtg&m_bL8L&}5%%&h{C_UB`His0LDRa-DTlbUaB zdqjGP!_{BG$#JPM8zKJWb7p_q4KmS;9-9+DB0iwEKCfg;*gSouL_fqkC0b$5zO1#d zc0l}vJVudAEevoylXBBt|8%euzs6~oKOis-Hye7>zBuuALzRA3m65`6(Ipr{^Gi^4 zsN2+sJXXBU>o)$gwV*;a{O?cz<@NGKks-hDQCGDm1?QxPTaC?D)p5VpcV#^wrBhRL z#;?Epyw!NDdW$n4jkeaJVu4=h^ysiL|LBBvnAR*Y4>2xcB~SFE_Rm~XoqwSqL}4Rr zX1oVIEm#OWuPfDQEAwdqKmR(*l@KBPxB70;7k8hEt`qGgV<7_MqHHC00q&lEm#?bs z=jdZ<-<0^m%7eKs(8EACD8y}JTRPVHjdJp-&=7p^4FYhTwRSCOwVaDT#y`+qau(I{(QWAC9W`&M*Oiu~=E#_99* zx-;spXn}qko^!(%_dKfgPuSnyY4!0sT7=6l7+*W07il#leSW5Lzy{0nc-xkK3Y?8AO&1f% zMwX)*OM<>y*D*cuRX+J^v#I1WJWk+9DuP`qR1Ra$;L2tMhrfG^-?AdPK^HOr=~$Vm z6{s%#8;R}L=3&e<{daUEG&^KXG4ueG~ zPx+H~zqfymnFY4|{fEQwA9J_zoUvMKRBaup2kni@eAb$8zoLVXDTJ=XzDRv>D#ZVx zTbk&I_>hPRA^8@t*dP|Q*+t(X*E*w?`g3A606y{HJE#j-Q(n^<%BYGXyAnLZdRXhR zmdp8{ul4r%%&!lW-WRW}XQ+Y6{hU4=3hgIU`k^FEQ+`Gp?Yjbi`5lZxC-slTe3_4f zOhGT0|9a&5d>`drn~aZ<>tbcin-slw_biXN);St-nQRm;yBwkO3$VNZ2H5!eGXCJG z%7lYZAVTVJcc{@}}0ObZF z`O-oM7e*j1F;U6wZ#)Uh37_%K4eczyXd?x+gKjxFMJb>0J1*KiD|sw|-&5!Nci@3b zzx>uT)dC@bOOHtVNM{!~si*`Ya)3tmTG9`uy}-qH{(4^Rpug@{vhN(z~J7!yk3dpRW`z}MMMLznbL09?l!Q%R8bKH`^BHn(XZjeu3AM4I zqjx!a6{5Ukvk;CMMKQFhG9otXkLH6E;smw{4hi1T zp$aa#&Mt<>-px8OVM*IL+{$VchyzWAo9>+KgO+mpmhx@?Qe zfUna^GW=RrdHf#Iy|fj0FN;Pz3pX>Mb>Bm(!8b)nFf~F`&B8j+t{O2@$URV$tYBZS zwtdE341JJHN0?K!-MptCkBC>r7>*!51m|wo_f1+vg-7AE123k&+2an)kl%%^iRR;F ze?5KIcqU3Q&6xmv1TJFU{qXtV9-+IKW}hCh=eqJq@=1ILKbd`#Y%1G|c-L>=i^n&- zL9o*@PTrdC+Y$@B1jCCBV5OYn^xd5aVB>70h63XJ$o~>eQrM?b$Rq*8%>|o(54rE( z=zuToQ^54bO=8XF?rK-^L~$*e*2 zpRR(eF2pc6jIpH$9*!$@q1pRi?wEM~WPkXb5y{f{FZk{MyS|vYmJunvXyiUZ7Kz%0 zh1;w3Yl;1KZ=yDn8Y*+$&lhNL7p5a4JONkoo3A;UojBLW^(Y|sZq0csHw$Ast8Pg2 zwXVUG3jUX!ImXLVZUt4RNpA7)Wu zS;Gs2Dz8RxkUqnVg0oVxao&-ZR1w25!FqkMW-^d_pEiLa24oZ!Qje6K&Zszs3#bdi zTF%8NbLL5;#w^Ksyr68H2p?5@)1@o$moICbYXvI4v3^Sa%8=Q9Bv%~2!z+;olTz?^ z8rOmmJWFq{zMk^yytmElS`WRIF&YqBBpU3R%rDb?ll$A2*4P@XXKMZhl&prVG2wd< zdW!U-4y!1!ez(S)>?EZi1rN2i`7mA9c(X%(y5qhR!}0F|Xg-$C5YO&-wCH2)c_`G)jh$`+;_)Q+@~HtW79OHCT}$)LTAfudkISQ-}b$0zn(>5y$RgoKm$%5Iyjc(cqZMuo#szMuD?%v6Ei2klKvH9 z_#|$9XrtpygPw@rh&{oQCdq2S zW1?8>D`MrPG&rV#OX|HEVSS52ZN_VjxMz7t8gqCcf^NQ}Eu4-Y68;$E3)2rdPse-< zAT%l`M<;}yzbkXOplSa!mniP8RfUc@>BC;#MYqCvp7VnUtDMlo)nWX0_;0)l={4@vW{CuYaQ0%jPr>sOuH(N&rsb_;v zFCcj)nOXyLzmOth*4+6ex8MCrtuBJ=6r;|On!R$nTe@enU2bti&o_9{H$#y`T|4zz z7B-u;STxbB9#y@0b1DyB68|B3d5Mq78#7~;C~_&jP4M})r>CM*+Orp+ebBz;etlD# zie8njmNOiQ&5JD)PY;O=+koXyldU|A=H*{&om_AO^NHh^uXcwhMF(6)xe=Fyt<8HD48Q*l z%6d3yb6WrO8}7Bym?i??t7b?__2ICn7keY=!n(7G)I&c)fqP>1fY|V(!jWC>FoOXT z(G_luRzp*C#A+y*>HW%i_1{eI9fa(I{lh?VX3wV76?CWobACY?YY0U2J>k|My-Y_xQ1u{Eb*YM3Uvr(1e!u(OjICIuR~_}l z0NgTZU9!MpdltjH)C+n(>B}dv^UE{(%cslr{AxDE!dOlwsKR_cm?K(sSDLb%g`!gB z6NM4vhjHjD8k4;X^IcD!t)rA>O@e#mI_H1J!KNlAy4a<)9GoU1W&1ZmJE^1f#y|wH zY~8$I-Oi|k9k|+EN<^KlqSmi64B+TE?{y;?H6cgy@Kq>QWdoyu=m_WnEpJu4VQmV_ z308}a;q9Wqgb-)B>J|2+tah*!b5I^fkV8E+H0f)CU6ib+ z)>rSo%LIQCOI+~+Z2dVFen2%dh2>IB3G(%hGVbR$l0s&Ba@olkK8o@(mX!b@>j)+@gj7Q5v|= z1(u3v&@eOkz-#0Ej(~fmH837pLbiMBzfOH#)K_A;yM}-8{g|1PI|j5emT#9*pcs2< zeGPk9GDQrwJ%CRt{$PmSKA92G6*ooPBkT;+AE$AR_)XI-Uwrz4L@68Xns3y~5JpG# z{Hy*wR0o2@*Wk;GM+MKBK&(34ZnX(+R%qF?ig&;~;bbnY9Rslc0ZswNjIh5cWBu9w zo$!j;!-eq%b`DDG=vP5arowH=f3o)k5t=y~9kPOWH~**4FuO8XE12ps zuwKE+QyT7mzOU@t2p*f*2}CeuxI%DxyrY$AgMdq2x)a2oLD;~lfG?pKs0X4D@I1Rf z5hsR5T%a*~8?uYoB{0v9Ln*Sk%nP}*sf#}PH~ z;tx!`$3%Xmg0m*~Qow(LsQ*JP0Lw_&CxVCW{Cg-Fc*4A=S?!i*oY{+z#HHx3B8(O! zDt4a;9K@Yzqn5u_m}hDZpTa3^|ODRy9J2zVHrhSZM z`09<3yonXEJiG0$Q3{x<_C}BMG;dZt`x^6dIEmD3G^hcU8L=omR4C(FIXS)`xc1H^ zDuBI$lPvN2+wI_E?08S!X!GAyqa#b)zkcT8Lko6pvd_4(l6jkHU&%)84f6W09#ay- zZ48HsvYvmSHE6lNZUHebjR?UQW6w}dq|c*AU72pz6zuCuv=hR6Yyw#7mei@>L=X(W z%#+ui`C=ys*Z7tC){IuG6mt7p<7}4=Y@_3oVm~_Sa}-WM;P}49i?ECUuZPJ~OXG&y zo-RLC1b)uvG4rE__Nvy7r3U8eUD0<^Q0kb!CHfNid*R&ZeB^HpzU-XT3qhfa1dO7i z|Ca}U=dFiw2AWGV)ReuZG=+H&^H|7{B!7)mR3$bY04sXqmN@CVDF0EPK&EymI1rD( zh&D~IiWWf^TD&fAO(yj8NH@|#bjeo4@E?TR|j?dD)E z3ho`s2|{Xb1(nh3GPyb6i`Lhu3U138Iqw5OMiu*%CPn%OOc*abX&oRaU9cbZHLK2A zK?WLqM(>9zb8Y)c`yC+K$CilWFTN5#(VVx8Fpo{RrKfjNgU0QOZB#_}@=r!ZpISiRnlW!h z$fm)4FOkG0Gv$xL+2#c@+7I-3=p~Oph_z(3LNeX;pF&&z~32)^tXH z?8(NU@dU1C6~V7tgyB1 zG^t%;v>)qkbC+{o{{0eG$6+6#Dy4p@7j8P`V!0Br157@8-VBPz^`W&*K#{u5xZk-h zN6jD6&wdJroA@9r*6c40g(zi@>51bjIlVDS3&I%tiv9Naal=!v_x|n%2A*fEK7M%; zs?gxQW{YX_dSmUGH!Ji(eq+*tH(5ejyr@9x`usQ5D}p zzQL+b{UoJMN~RI8p*LoG{HASMp4Lzge>0nwmF9_5xhz^@=&Bs1{h%C-+cd{b?*m9WYYIkn1pr8*{);c0BAkcJEC@XUjhQM)W>-_QsTzGMEa$5OvsAHg`F&TO&_eMb zgRO5fpAkBj@=z{?;~g#+rX`tRk#Z1=QwjuzZ1WiKzW4%JL$?o5N)%ODeQx~@Ns!+X zMHe%jY3eNbjYQ+X7?3|9Rh5N>ud6Cz)Nc_u*aKQpWJ0UvnLe}dTQdV&{~ijKWbRO< zch_hS=~$OJO|^mZflHDJOO|uWdGO3Nt;-nGTB;ZjOCV{4I{>*t_$7#$<^4JJ`x}ef zi_z6(g;J^FS22c7s!;~!X@Wk}?Bn|)-%DFLM`A=a3#jt!Q(oi)4|&?B3W`J`(EWaQ zMXi{C(Ws_q9N_^|U}2h$9B@S~W|{AUCgW%_h{zDnr@OTA@}lbER_C}>{ylWXGAYqu z*O52$4C)9un#C0lvZ`d|_aNXkR+>YYVkmf7w15|oB%H>pl%e%x=2A9@{vwoJc_uYu zmF*?pTU^gtK^S3+IQo&t7@>+egF5FE3Rf@`MRo=_2%U$Y}X9NM;{xI=) zA=84GF*t3A_L7AR@0$>B$d%Cw{KK2z?>Dy~{uk^$v;(RPb_KpVa#3U>&S2eI zy1#26&w>wl@N832BbGf+p7&QNjIva&woG%0+?|aXhqhTK;ND&C->K`nA&Rc=`r*@) z?=>Qq4uTr~#14}-*(%U8>6E+X$?U6B&|uso6zE;`5t?%3yvpm^B^$#2c>IH!^OJy;8s5PL*O2h07-~u1A($qUps6`-W ziSH~rJe!k;o75L0?GU+jz4(9<)Z+!ZvbYP+j~^4q9rR5SQ!U=)dAb;xztG367EkJm z4UCO9YG76g!S@M~@PZSq!(Y7{m-fA(!gy!P+QjA{YTpF4;7t`@h zf3X8d5b8Vg{f5ggH1y1)Qj}!m(kr5GIoE@<-@w>FZ_8m6`R<8upr1T?hmI&n>57?v z7RJa~K~_1x6Mw!aZKl27>7si|f{}{`PNf@H0^sRenxF>&oS50qH<~E*sBtX|)M74E z9|ymG3VJ=;YYCQo6XASIv7dF^SexhI)$-d5#{WBtM1a#M6tWZ#q^kY^Uk)E!`+o{p zpD)2SP4|({RvJMNukP}`NUEQ)c1YgFTQ!b4me!o+U4xqIlQus~Nxcn@33^TroBW6A zrq8}{4FQG4)#VMlxdISMu+_Xp;sC3QeT0it@=)G9pf;=Qot?DSAvsTuid5HBD(J{L zgsWz{nxEFO>p2TkgS=Mc8dskOWx#7o0~*M9LSHP$LUkfc(A(2O&Ne-54$Y!Gch2`y5NGr1+T$@fYpp#HO5ALb2XvDiz2mrIj74>p{i zy;g(rTRr_)m~ZOjQJ(c$1@RRhC;d6|6L+w>ttlwvm#iL~ z9la#&`xK$Nh`u59^sz=cciW&=YhzFCAX5By<%48Gn5Wlbe0RTWk!SfJhmF-Et9rv5 zRKb8W*(&b9!fc1n6FS*XCjg8No9%neJem(Do_{8|74Z*>*M5wZ1_yU8JyKQfpUS-L zajN~F*ZfuP>b!a{&7N62>9wz>o*c&Nn|;s7yynwZD9W?>yCz#)^6K{~lYXVE*;jK0 zKi4yl+G^g{ke>e>pOEqOakc?`EVmQaxo>W<@HJn4cF@pDVed`-#f(vjw}HXrE~62d zU5*Xa-i#@}YNs^P(luFZzlr6mIYd(Oe0My@Ei`{;J$fL0aTj*RW)kj^9sq_`{eHm> zSEk7J)~+w=Hb!kGr@+{5{n0X7)p`6%*K#|fdU)=$2SeQN;Tq=R0N22?mTD7TOQgPT`uoqR_w&<$nQ|cz2l04X7 zM|YW7LkX;;3E5n{!nhk_8Dr?7YgnqkT>5qV!}sLf%hg8ZS?}S&4VSsp3h%vHx)&2i zC&FjL2RBGYM_gar;Z3TcE7N zwlNm{k1t(Jc{C{Z_zKY&;_O>FY10|LYrsm1nkzT*2-?1nwmJzpa!`)%eKbDuS|$IpQaKaM_$XzC@K7w_99JiBv^S|OH^ zoQf0MaOUq&7l+sQ6xqY8DKl&HqKU=@yvtp#5<|0f4@2@p`faEAYG);XX7X=Oncp%@ z&K{gFT=>RY9X=)De#}H&CFmf6CO(XH7;xzqOR`q}Ux8qi>BRhyWeW|dK{*pE+{CHv z^!`*E<<_QpJ&j)jj@d(gk(~LBbDyC*yNw`aw*%>ZG8?y_FG9IbD+4 zQoKX{N^Y}+W}SJ4E3m&UST>53w>Pg%A7Yx-Ra810EzFm=r^&ykv=^QolxS>4k1_K4 ztd=Ud~5Jc?7Bh&N#cS)yF^l;j4 zD}5c-*pU5UGYoASesF59c-PIuH~X5~h0KlTJ=#{StIS$9K6mx_de^)X>s$SKSBkSn z$K|@z{zuVf;g?w_M0a`Crfw8h2nJM0FCBf5Kuu(FOSil@m|ib%%|6Mm?~-SCSIk6C zK?IUumBgW!xuptt)tX$7MJ1FT=ay-*9kQ4GfbLue&J1 zyx==Kbm@A_ia>~KI^CXsX?T}AW6oFj-Wkiqt#$ayw)HmpaI9ubYyF)j-Q%eS zW;^B=r?FU6jcBF0eLt)oo}{?FodYl;Wm7c0U??6g*3-L}G6VMCT{$C*N> zHCQrmWS(2V}{0+TaAsa>(4L6FSD?bpq9OACG zkl5KC-t7$?hAc?OrtK~*(yje&^>N-RfMLM#*Ae@k=H09>FW)1QKLo<-D+AL1Bdy<~ z$Ue%LLObM`Jx}sTUw1hOvWGp56FG2YWr8%hY@{5h{Vf>xc-lDmvswUamJWM*AO8UG zRBQ>gDB!PCEbT!hLz3_UzUC$S9q`$%uDr)(72$taYb!?QZ#le(A#YkdRI|lZ*xm_m zYhH%>emA${QnumzxgE-=z>9&nk2vOrd`F~L zTCFSdYHz7$yndRjoL2v`Hnl^Hu)6j;hAz<_wWXm-*&e)I3$WHh&`cpcLT2SR7k~m4 zXfKs(ac}S{DL;@J{Y>-h9tH!*9TjT5ubVbANXgFhgz#<$b63X$b-Fp?G+v~f>JY;+ z4`~44t%wc<{AuEP1ftNom7S2e@6UGkMp6M9M`QNSG$hiXlx3zQ=F|?>u_LCtx;Q%U z$sc=JCc~00+JfO@|3%0A2(n8hVDg_4uUoTnL9m7xV!PUD<%t{vlka$~OXSVxgtSB| zB<-S5+ksaEN3#s^Jb}5pT~Cm!@FRQxDSvO`p^9lRmQP^-UN|7$Y)G!0zg+0uR^etP z^g=9htGg(r8B_X4mNKri@#Gd{g+!89FgVEn@=;vvl(`rZDy$jkdmC%2P;WCodDtut zQXGic9m%J2-KJPZF&*GJbo<`9gS8D^z79Itn>KOmgzXa5QLgw-4Cd)|Epo_72 zUmjymsH5zlq+WozITBk0aV_#3V;u-rAk=X~ljLu%1CkuPwFHe4I4@q8l+J32MfJfZ z0g$(ou4knnwLpk)C|w0HYiHW162EHwmKopW>%lHE+IJB~{Dzgsyxa+OnML5D2>zL? zT9!$&AOCc-f4bb}4?hN$m($hR61$}xlA+72?*flJaIsRZB9m6A`?W2> z0756i-R854qSClVq#h0!v@K$>UoB&smvtopb*lNK;6>@Gr6u01A30|<$p)HoMK1v( z*_Kas_PPr;z%^$HcU@R{K>%|G7t~Flv7v@^n${~hwfW_DRPO0u3yVhsU!dGs%L#za zDpGs+*)X-^`mdg9}wvAZc3Ep1f{TCQcg@a5J}5j10QE7XzY)u z4z6!OKn(Kfion8o>O7kiea&|X)h%R$NZT^w|OxHU^o?ql+@`2MEojOej8d)e@^>1n0HMifd`Nvu~pfSj#vU z`E=-cgN=%-c{*#lU5Z7x28G(DtusS4A-h$RR@@#9^IgP)iFm9{j+SRBz z^9lMkgtWajSGArukM*VbP&SsbXF0I;t6dfq!3^o+m60b)x31aT_7;9em2JBJ3Kj7E z{mWZ12S;&WyVF%OXOwyiT*>3}ZAoq~cuaQQE{mU z8*cF?-b)1O55r>FPix|AiKXRt1uXgAEf#p{j4zhknlqDKH|lZHs=6s?6B8zdoP#5G zHL-0c>jUNgp@pJ#y*ZR$CHKp*0H(AHkw%JBaWrKq} z*eGZoo|@xknOK5t8F@BnW$5e=&u!)@KYP+zAd5TR3x45oW`TkEz1N&E;_a{AA%rd} zZEim1*2{cyB>tInn(s(fyk)SRY9TQA&f}NlUdG1$QVF70(<4$xGV=1xKARV1Zt#!{ zRM_J4!EWhYyjzxrYc1cuOm2;V-5=C-OLraE?%}Jsa zByqUK+xD)<+T;ws<_H*055JCWxMGrZ{l@xZv!s)kyfd4>8=V&`a=!EC><GZCG3V z?;T1FIGR6tek+Qp(hx{ztM=abV(>-R;IrA>JVL-u8~-%k2L(1*4r@4H{^;xM@T>$0 zqI}cDep!gaJ<+cP(4KGE``?wm1$q#wk`C3c|n}G z^;e_7!HD+9+UySkj>5A4*0m_|HKn-Ou3|ags|!2*HF&HLNXaV)PM$5O-5zN#kgxFTwf(gSw;cAw zKWXY3Yn+@~p2U!KJ*)_`M=6kq5q(kRiE%)QGu$l3wzzgSH5yFvSc%fJrk4U7oQA>ldTwzg?`>gtB zJ=KpGqZ~b49%Lq;?dhEW-`|4HdocVD$!cG}*=yVZXrsnXISSCZ&zY)%__u#o;EhF_dseLNe92iUmC|18v*Qmgn*CYBK1l6x5Njh}M3M54UHUZ)`-4@c7?fFi(snkMawFnxb54v0=u*VHOt+0ZTJYyTs!t7mgp0ObUozg=6mk|IxlPV_L4#PoBPAq)fC515<_f3E#Kn zHa4?fugWuA?O<=WW)k;q7!ta1zZ}?GBDb6Ldw=K6ZK<#OHrvDDvjGSUKd>!{jER2O z;BLM4AD+LzHWcE_m*4`~LWgEPoJL9b@WJ3ge)NP0EiP>T(P7OVke!>ECb=)$f=w$y6~ ze{NWwV4(ye^CB9x9i~?7a(~T0W)0~8jEfR8%Bb>1;#4_8@RGhHOA-4F&vQ4s{Rolf z-40>-;Bpa|u;+9isEy{aZ7NG)1X64Z#)h)eMIYEF=P6g#8ht|aD9cfPkkjpq8X1{- zeUz|2Uo04wfWiZEd6{wb0p{(F@LOhkDEWG_q_EsY`!joMBg-9^_~@aWg{UWqdzc0m z5$v7XHLkzLTD^`;g`|a`w)?>Uod_w72tz%Y4o^&zD5F#6FJ9U*ud0aS;#dVnw>U$& zZGBl_rehG~jCN7my+#Mi_L7D4X+sJ+PlI^iQL8UjifaE1bBSTIMy%2V(IThv!Pkk- znw9E1XE~OgZhD)QCP6_bva+*RsxZ#c*Tm4v>v$*cc9Fy-eK5RYc8;;K|6Kb1hVU0t z5KbmLKDA=O2Wgg8~v zm~NonKv1Q7Lu`1n5xISVfa8{hEN2ncq-DXr}0Kj%E?2N`-ui~A^QFCLfEZB^#ffVC^;_cgQ^@YGAY$I*g4GVRlca1Q8YnM4yJ;~;{xlF>(w&&{uIlf2~bVgsZ`K?Dp% zK!iX-wj!d`4G0ksqGBjgB28K(DkV}w4}>0iPk|6f@t^E-b)NG*&$;+7auY~dD|5{? z-tmrij0)HPeBU^|S{ORDF=AaLQ~dc;@v%NDVA2Rt)Zw>D>Mt4@<%Kb2qXMXZ*@J-jZ{bmhBZ->dMo-GOEvKs4 z`>00$jWN~XIv6GO-cdMir$rgO=idUCnv~dPmslNaA~9?m0dfnml3{bn~s|Oyj0QDFjRK^YgO6T>HYQxU09q*O$F-{iZZ=1j1(94cHf zme2`$X9*qXuz_P!az9o^z-4&`uav=lre1*g?BJMhmlWP;-u(2$3Cp)jQU#CyYzWn5 zPQ7s^J?k)}LOdIvOCsfXYjp+M^%9Gc{%i;|93!n^IOK7@Yei43sWTz6cjPq}D%cms zI6e|OU}v26xZ^ePed-}y;~pOz^qF|7u&Dc7|I#_!^Gn%#-%e~W$oLW733Bu@uvant zjywD!d46i{+13n8UD>T-@B-{b&MzXtM7<%fack%!;*q;6P#ogA%{0mOB{wMPT-ieC zMZ+kBh9r*s<*Da0nE5xSkYn~%&YWJH;{S@$*5vH~3iv3kc|8bH< zG_*G~h#WwySR+cz?aZ0wayzcequ&pty#sY#fYXj{t$YE9;oK3_jkwq{fJiVbNTv^q}j&$doJCCFSGmATAmq)ByQCpKhxxBfzeTYK%S0Pfc zt2$Kw+WTnyZl*nSMTXe;hW^*DPMKTPcNH4QpdmimE8*c)7;2>E?BX)!uK_yk=M{k z`~Zo#?y>15MQj)B?0(?qBqFxIzaZ0%k-sGPAuT?04whh=OLb<=eymO1@X>NSlu z#H;-t`X}qkETMi1F=wr>L&5ogudWQB10u(@DJWmrdttGt>~QC|_!Y?O)f7_sTTJ6| z!1R?((Fxr&k@EAlljufKkLa`dc!6o0)+fLMz9?_lJBx6OYQZg@LHRjsx-#%!?|DBF zGN$9rLNf+Nw<*B@v4Y_7!21MbX@&i~@2Y1Mn_}f3Xf~b2`&esAW~L7jTF#8vVQ_0? zHnlmS65oG>d!w7=#@J!#GOoFOoUg8j4VbD2xD3B{=5R7@R5KJS=?*&Z{H+Mui`N9g=fd@VeJs}BWatS`43g$c(?>Pf8%Ics3BWyM z+E9~^6}5zF^(bD9YM{$0a+*%1xP$@CiWU6T$XEOieE9GY{H=DhSN5EZU+D6JoyBi4 zysGFDF`+?YS^DPX{$|s%{DR@>k()_}+9$b@!{tVKlnh-a+DVGpkQR{d*Hqn{94MG%3Z z=tuXtp-V>AZ4h44;DE`bFazM*T@%$3e> zb^_fP%B@?IzL+#HfBP8wX!J`NO7&a;$GD1DrWGdh0kem=;lQe&G69;VP)C1}94l!m z7nN;+%^JMAwokOy6xG58`(_51+pALqH++VP=)<%--8&ihXy@UFVf?B*W*&Enmz&A^ zs7Jaisc%U`+waQCfM}s6a-c^F>P??&;Fypk(wvCvqxp-8d6Jobkpv0)xfW?>01 z)f7-xQ9=03*|Kgyf$wIKmVj8bngMl~&w>Coin&6s<-(Msnv#Ist26}scMd%O5scs}TgpTzV1L3w2(xUDWSOVS$Vn9LCYEZ( zHxTWiq_)9%>T5sOONELpu@{NhPZU`}$iERP+2JwQdLpU=fciH%5m#!r3e64%1{`%O z|4aXle`m;RvcY%N7g#GwlD#CPb~_GfJk2=UQCNzTd~CRS(|_m2l=#JE$TxJ6J$8@r z(MO1#A|)P{k^B#wj@B)p&-toHFSAu_cVP~y-Ck%Y`*6G8I+}l0{WATw?-KT6>E8G- zt5Qc7OO|Au2XSW2+=#GR8Yoku3R4Q$op1E&Xiq9%dphnrm3P5lv<|z2Jo;X?=0Z`7 zW|ENTT}#uLuqw= zr)xqY1qQE~=e|^~x+55CWF~~C3z<(k+0al!#Ae&O?+FR~e8>>z8ZEZ-$J$EaA;Zgi zY6`=A<5i+Bz@!dRb)2T_f|n|DO+5lU_M7gYPY4^g-Q!W}f}dsv9O^j!I^DAWrqs!h zaaN&&!mdxsA6GexeH_-os=kP<{KZ0Ok#|6ndB}8eJQnqbgPfA_SutZ_UZ+Twmsm@e zt=l7UmqqSRN?N<#9=}#W_wA#-tj%vG!X)FjC6R1Yr0x7L2K>YH4w9lCY5yZy!u4O@ z)p5{p{Zfzn53{W;L8>=OLE+Ba)q-oP-l-z-{@-GXB5lDXo@;>XI6`cYKBpj0J9d7l zeAlj@e3atS7v;n2D8ns`UkQunzF0R*tvI~;W7?T9RFxhP!_qit9B{Dfab|bG3y{$HKaL);Ia6J zw;J?s-13$gD4Mnltc4sHbVS+6{c1XxlK6BydTqwHw@R63&F#JRa7f~M-vdj^aLo{C zAwTqaK5WFah>I3UJqnJ=%Hj<4%{lGqA?$hDb&b%%g0emgP(B#1PBT1F5b_b z>9SXN5HxX6Mc<9H6=M>LNgprbnGLqVC{^bhXD$&`#J2@>HD~hU5=man4(5v6WUo zR?xmSoycA%%UyX}GFInY-`T$%RlE8zv8Yg0<-7BvG7H;PV@uv`W@dx21P|Qc)uCED z3{5;chreG7*wVc;PA9)lKMrF&KX7;iKXrUL&{Otiv|^oHq#&t+rAyP;G`K^=*tsBTIu(=X@P1NeIHlmUxrGA9>X4l3cmgNzHasx|b z`R$pY;|IOvxD~-o9XZ1O0;|>X4>ar#ACmUxVZlMZ*C)SHYn(xrGV^A&NP1ly6S%uX zYVhx|i}ylCvEkUu&;tTZW;3^4m{F)v+G;txs-tQ2;;dYNx-Y;&;h|z^U!Effc!tW9 zjYKFMk8tgc4|Q$`a%W$PUyS5y?X6QUytA(qcX^Uz(|qba%eSF?sZmyRU{2F5xAC02 zmE=K}55L9qh+a&vN%wwHzM>jTOA)oux8w63*_G0x#TUR__nGL}y|3zn#a})dAHz70 zo9kxyU(|o)ieOxSj3orvP;Msc2VqO|MI82^!YN1n4C13a&8Qdx3^<(70J9}HTvJD; zgsc98hQwz?h)(pZ>t$Z&T(OP4x@C$CIMLDG3d6LKI8$$Jz*v$+lk?dsU2-_$M4RAt zXst!c#t5K+=?XXgrGdwm1#-E>Ru{fckd4LmCmiGb{p7dM9*`q!dW$%yE&v8fVM$Ni z0U-5;N?2%6@w_{|Hz^0fSW#>N;!J(7?PZ_!%=Nbn`CNQPhoeXFAlAb?9|iv1kVG#> z=J!XvT3|efDFHsI8M3rEuWR19QY-h;{8wiXxJy|94YsS#&?5!uvi4EFP5)@O3$9t% z6M83W@}|MyR5U>=I|WSov0mmf8g&-6+!s}W^_mQQKnNUSJqr*LqAqM{`AY%JIF1N7 zMmL=DO$t{Y5cLat2LPF5C;<7n>Gt}q?>P&FWbEV+D&Bzt+Pv$tc4Wl>_|_qGP?!zV zWWs+vRWhBsZb%%W05-_a6!C!>d!o$kLbu)1b)`(HRtrSsgrc-FYwFv$`SM|2GXW&&paco6ASCRz&QlVZ78s3|i!Nfhn{h&<&AA;1-K#FMPag7jXz z+;N`~Avnb4rMZY#0-v!85;SwRcz!Q6P`-eK1GnxeQ{|^D3gDzxsLhEYkxc>qNAn-u zEPCI3nT{(px*b^M&3_<~| z*CA3FygR>d4l(OwgOg2@HbwG}y6Y22{^sKnamRsSn|J6BK3Q9YYE%h26l$Q=oWq6$ zd|PqbTA8&J&spsuCaEUw6;6QQmn=tf6eLBbV6x7iA}U`4)Z_?37+>lSerBZ?8EzZo zArlc%lzJ10`NQ51*{+{ARwq4Q8iouv`fuupH8x`!1tr8whkES;6tLeb8cHHfsb}t5 zXe3xnh?ZVIIrK46c&g<}!neGOJl#SbsBId1(RyzA^hU_CH+GUh3K&5S?_(;y5YU6UzcvBDFj#gOPr&4+U@z-L7;2(UsS4!MU{V1wx#+ZqnM@ds+j@e)FfyQK4AVrx zjjA|B0Vg3(J~gK!I=2=1#YE4@b|P4+7XSLNWb8&#!8A_J@3+`zz+n>dXK1M}{A)kk z4q~cc=tgfeaW!e)ZdCxCFRd(8FaT_$7a0)VPYTWJ{4E0XF?km3X*Q|Z9>4AZzSN@K zoilx}FAtlE$P*6hc1V5XN^QkUiQS^yymeGw^w#w5R^_A6) zaz4X>2JjcM8-~__2-k=QY!vW*-ju4-8my~aMG;#&XTOHJetYrQ9I`1XPvn`~rkTZJ zoO|W*RqV+);Ll96A8U2HySMRNs$qu71kjEP9fRJIjI3T;9c>G^g?xzMA*AttPjE`f zngU=p7l{}~1t1%olsZ-yFE8S_ez2`z-B5ucbC+3dRirz~IMJ@3!p+qp2W-wGfX2)n z@3Q&kJnTY9Zf2|jcJ=EBr|r$G83ExwG?Js%YBnh!bXl{tn~uP3meTW7-g!0>7>g7E zGxh?lu>@TX+GNrcB8;kvK@Ys28J13l7B!!H420|PsAAAIVESame7XxbRM(onnmte& zf}p6i=g^I3kD&9XI=|E6%A$2ZQfY6RcRmI_y8)(Kij4mA?{v%m?H>34&$Z?hf)^(l zec605c>Kvt6zxQ^>_sw^1(VT%w6qe<<%q>k?LOsl#c8Hz>hb>K|BNuzH};YB$fIFc zXddfJMpJ7!V z_q@Mh)CB8nc{cmcSwy3(-~eRo4!Kx&agh}@s+F?Keqd%;vs(1)>)o%V*0Nh2ZIihC zbIj;Cvvp&w<{@u0rqgQbH{ulec58jp>BvOGPi2S#cDW|`fk*m`~!e(j- zk3Gg-XOcX;a1Ye$uY0sBkgUKY#@reixzk@U<)m}{n-$f{jkSW_9-L+J(y4ismjlPf ze8rCxasKe_tqIg3EM%)&hH{+j-nz!B=cfg0?>A#>LV9`Ee!SUI-5$9D*^b=9kbf$# z@owQCRII`Dx5qX%+hzw3c5va!VG6>-g+}y8%nwB2uAr6*vkb(XKA6{Z764BBf!(^4GF`cdtdeTwlB2 zaBw<@7?Yg{h@h0lM!#$tyoGN_-1Dr(|IwY(WqN~2^u_tU4Ywckuf#0Pn(RIGEtgN) zT~j%h=&|)L(@SR@m!nCJV`MprEj|PXySFZv3uj&fT=z0z*CeA)lx zvr(Wmjb%Kg=h9rXBz<(nQO78X3-77f8$@Ib3VD*U zM=tm608?*O8tiV0qCaQO1X)rYQ&H2E-M?hGV?l9>s0aea$d?6Yh{uIWH#67ldPGAW z9|zYP_vfBnIvNHn1LMQO+9N5fh=_avj7h8Q8mRXPDBk|H;#tvD=JsDdR@G7RfS+Zb z4qA{oLGW~vO@Td1oQaLTQndG{y7a9_Yfr1z-$e-aXO0d{*N(j0pvLMzWk!yiimJR{ zUYc-wa$tetGX^~m?>;PO0M%+%ODo?2Np#DjT-dAC@1hvwa>4l1L-Lh~*P*I{Q?dNo zjKe+B=Nvtnoook>wP;3r-yTp>OhQNT%|yS}fal+W4)LJQKXEoiZ%b{QEk=;hvEmLZ z2Z_BE2Cp99jkZwl%1J9ruUX2H^Nl3DWe%S1P8xi=L?ZI=hM~MvPs1uD1_%lPeK3=9HJb3Q$MQKtSGZh1U%0gWZZNY%*%E{mB| z#zpU?jw9f=_g=qdY114+|4zrtf*@=M2(LzB|S-p5G+|6i1lT4qf1) zr8li0`)dUV7s^7<^K|R}DJVX8T>ZxnW!gfZ;PCiktpLTi+oy83p*7b`FN0xgG_7+I zi8~iZWxJKwQ6;A0dV%3*O$+DJzpimo*wIPEfjK+b#n#bzgB>+6W9fbpxf%qog0zkb zD;_|O;%C#e`2jq$LuerzeWHYtbe_8YYOaT4X!nsFft`w*az&)|&xD4UL;8J`Q|;B1 zGRD`yz>`%BuwL{;xQe=8LRaD-t%*&-$&2s%oJ<(vslUZ;#hNBS){&YvF|v=JEf12%0jJ=7_;%bQk2X+4>Cb~e~J%*zn!5=Tu0y3kFXNff`2JkIzGW4w-izu^;kpRJm>mg-2|AfJV9 zcpzMUCaHDQne71uLZkd0x0V9uwdm4s+d&3J2=tT?0!%vA$t2FHpN6LQbp7SoL{ObDj|gQe4(e-0Bet`Zxk(LvdUPvH`WX>@18 zp^i7o-0qpj7VMJuOwSubPIeXnS-(UMH3(pU)zrtrMFl{%uP6IjO#YY*<WSSFHXN&Qtkp79hF)D9p;lcoi*x*EP{F2D)I&;GQ#LmBkIqM z7F{k2Xi=Q(s~csPT|nq|@6#fdO|9K-T(Bp-p5&qI z0<2Kz^O0q&z-UOpFy{`faIZ2>WQeA_zQpMW{(0ndl7IN3s8G$YZyYpmX49WMtvSma z#`!3pnc)H^YcjvcWXed)h#rI>jAKzvBVk~`Rj?!%ARe+`4%MP1t1bIAw8n@^R{reC)H&7TFXJh6SY7cLyvTgFeU1|=v({$t4_hEfZCtm@kHzO>ER{p~4PMrE2 z`J9K@E*+)N=~Y!wr_&U2{;TB%8!tF8%{|B~%kIoQFzzS-BmSqbM zKqC;m^W@)R6Y9|a=b3eC4g*B`WR&;1W6!X{rMXza1q!zmba5h>^S{Nm(*QFOZu;Mz z>-NR|s+~Uvearh9$%l&MT=;g_j>|j%^}~?A#YC4f|I1Tz7swO$S5p;RP^N1A8R-32xFS4hXf@s9Hqvix!|0TC+ zOiLq6L?a6aJH9;e;jtIKEB++kE}iN=0)JNKK4zY|EA36WUq}AiBhQn?lG+|$6PFJU zv31n%&TW!b^74*@9#QvCedE}e#{`!5l|r1Rp(5Wd=rWhkaqqOEe2^($U-;AY0jX<( zewY7N%L_LvRjx=5V3iAGD59OY;}D)s6cv>wkYH|S|?z32$b!7d{{Cn!V2p!Xkj zcYVn?%Vk=w-<$dFd9>7QrxQp-)V%(1`;9E&;$i+OLdUi; z@s25#4vhOV99@pOWec9?`Pbr^kH9D{@){`tiH}j-cHKLBi{Zt*eLFX1T_SnAO{Jzz zquaA4IwmL*wwOeA#Ldd{yAO@BGn4E+RZIT-mBsthrZlnM`YTOh|4-Fhv#O>yP>Nl2 z^4%IW?2l}GIXo%~J{A`_BJM@Fa8C70M ze%H_taFzaeDUq1s=5lJ89ej_NL)3%QW$QOA%OnY7ddAh*)*OdP81xNY`+BfnwqWKBrR>~=xv+Z z~b%XfU1@5@H5;*-6jg*VW7=9b%R$wJ`33+a5@eKLScf= z4q{WjcA zmo%(vLoQN+>v;kEhXP4HCh7-6`{BK2Lv5vF#zUv(@R5qlM}Y!q2ty8*4FSmX*-hI| z&tCX2Fm9^y6`jZ!EKj>ywCmBDg2|P+uDuhMZF$x_q?|ajnF}`h9yDC!-Pb~omiB}XIQAQJCecJSaUUbfg;sIU%}KT*P?RR55D$?rs)jr_Km6Zd~iAd%gVIE`0dajV%?$7ux?+sUfvg! zZ6}7oGH$lQKVW}+8e!TXB#%X073S1P*k6I2IEl6|_#1X`TBIUJ`-9nV?(x|VFVmt4 zJbbK! zqcs9gZr7$1q6#43pqbv_dJh?GOFWT224IBhs7WU^%vUm#Vf65Bs# za>D*sOtpoZviWnO;DVylsikE_@c9J@=3U6^&&f4sgancJ;6TCm_9tR1<~QD3Kd#V_;yw-LbdpV5{WR!FX3{yg-ee^akRC|v z52#v0cczW3T8D+|pun9;Z18&cvwI*rLMtQ0+O^_UeX_Wr9LNsEAh~?eo*le*_`MN} z5(mp3gedZ^2pV_W|A`azFD@37((3R`59(sGCQ0E0(}&{og6niVa^+l}aY1fbDKmrc z)`MRjTk9K#0%d;M{(K$2QqJM9I*|#UvR0S0|7VQL#j@4kpb6n%h{+X z_op(t$iJ&==H2AnRt(GcSl$Tt=<>z8bzM60&PH=7G#n`L783py^6_qh;y{nGQ&=HT z(fJj3k7tz~%7*iI&Gqn!h!+T^KTUu)1wZ#f((K5_lb*ck3xu*H1ME4h_F>M%Cn{Al zc>XTmwJ>cO6&R>k9ko`2SenoJlI+8~3TmeSi|u;Ddgx!|=}((-1p=VC0LcFtvvY)h zi#7#i&3g@#&*~*kof?@r@7IU*4DxH^2o*)aAULh6jinIfmOtxTO9aDpY?CPvj@Hbc zZ`X1ZQIQ-)-o|SF$dTV-lx$(~ztr3nd9$avht9RHV`KDxy5m!GOvuKMP<-lsh03C%weiU4GGtr?6^upQ>Z{6@VeTRnZRKW#vG?S%!9OAe?|s zmr+I0g=V!P!==1g_p8jJsLxqTNiogDy{#b7BH6t|3G@!UZqEjC%-xjrvCGPdAbgG7 zYi%744h@_FhdlGKHNi{G*{ko4FU3EuvVK_q!tPBKnn|Alj=uP{gl3hqG~0|`Xwi@b zP;$sR3KT1O*qn8!s=yK(5x4H<FIXMp;xu%T=j+WR>Is( zDt~0i1cp4&*(e7Mje=J~gnl`oTwa%?V`+9)2{+rP{E#m8SIIQektu=Cq}ZVb>+8 zw))h;Zt{Eq>dlA4L`SL`=hUOXeX~!4+mci3%H!#cZv4gv2{oWN3B#bbX@Q?;TPP30 zFuHWNW9b;}Q!E|S9e})$H(kT8@2in#Mx7~5)WG(b4%cjeTgt9M2SuJb<@*gcf6+6B qRz7xSTTbs2N2|zi0)aLaGlTYBg%UOZ3Gi$6k4+NbfBBI5-TyyBS!l=r literal 0 HcmV?d00001 diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/_static/image/original.JPEG b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/_static/image/original.JPEG new file mode 100644 index 0000000000000000000000000000000000000000..fa23a029ce8fddc3efa4405d354a612981d8308b GIT binary patch literal 9414 zcmbVxXH-)`*KR0+g(6i{Bq~h^MFpf25djea=~9CR2@r}j2@nz$0qN2eq)RW6-a)$5 zC@pjdy-Gq25R!}UyViIA+#mPeeb(%C&N}y2oMT}4Ph-4ro`LZKBLlFCcfFrLAqP=?gP+ zsO@Vzn7xCeho{$DZ-kF;z{kL#;7=i;F|l#+35j36CS_)Q&(6us%P%M^uc)l5uBokS zZENr7?E2N+GdMInGKw1;pTN&AEG{jttgfvScXs#o4-QF3$NzZI0O&*Wjd9j`G zqW!l9jQ@Di(0ZRWIyU-qx9%~ptLihpbmO>v|NVK+N6{IjEf@Hu3@G05ITqQlDtVO>k9&Cl6EZt{tm93zXZ( z^aD9a`6xNm6iS)oM`Z1sk>+k5yS9%Fo^6{Ev5+pU8QKfke!0sZbK&uiSSS4~J&~xd zHBZjB5Ss%aG+;;82`C;MiV{Ogkpl9j)$ocICn=gbF|(G@N!Jzm{iIGyu4{?iJngA9 zs;Lc`{#;PV{czUw!pl3gkKVXoYc0FhselkFz)_sLuWC7rDATqMsay#joy)NIJ;tp> zb-0e97ao+(>s_#KE)VX&&)3hguR>T;4_gP})xsu&mLypu#_=^!bL`ulax;k98sVoRxYv(k-Hhs3~8&bJ{Aex&?#-I1=?4Rdj9&G z3cmlV^Us%bv9j;ceWur|f~4rU$g+ole^HV4X_X8i1=yv|R~utEH#`*(KH-^u8A{$D z9O5bQ^Rt%`9PQTNu=$aoR3$m5Lx*d@pUkhHYHzWBp#py6H^7MzgEkSNs>>Bf;5_gX zp9m2ZKQ|4YXUKMbP@J#&nrUao>PfV8nJ9zpd)xSTVi#Wgnc6dkzVk&@exL%R;-0ji=-7DZfL7_HKLFRD&TT4+a!xLzLndF+(b=X zwgi!&SgB9|mrZi8w~d@(6`H2>l{vZbYg~OF{j=Y5B6zcrxN`7_G?|Wo&JPqS_hUkp z6qPAo<$sKbC3r@{0!NA~wJj{ZOxuR<5b|EXwzV_q_r#0N-FuOzZ#3clc6QMi`{xpH z0`oYINg9&kFi9Dz;}5r!9B7glgx#W9t}5Ohzmkv1rvgk?*|8YxmQ3dRQOSYK*n{Lh zfQ>6F5J8oav84#r2S3L6rO}0P^!^a=mUc#``!jHg=9Mjr3@NB5I0YGu&MOzWnbfD_ zko9?gLsv;gbx%ZNWy;reOFi?c1`{O3*#h-vLaEf2r_LAGrXU;g^+}tlD}KuZ8%}$w z&L!vak-Mb_Ti`+k*cXdvpuLVptyZ=`S?V#*ASno@RlDFfMb1yV`WR_z@iF-8`Z5!I)xw*NLu2(-mKu6^@eIuX%_F+}c>P?AVN=&s_q?I~d+-%}zJh9Os5q zqJl6fzbRg;K;~wL=_>}&k9ca8BTxv5BI36>obenugv0(m2qf3C2};5+8+uMYVI9HJ zBZ4HkCQdV_!kw|4rlF|rX72CJzXPQEDY;GdnBb1rE-BuejEmSXvJWvBFT17YLtJS| zWv(0vo{(^J&MVY%K*K#84{+Q&pB5t@#;A#vR&{C8gqRr6F*Vbnre?3AxJcHn=MfeM z&IFK}vup?&e>LG?*6=G?o~8EX!6lNoN1k&u@*S;~XL(Oh6SxEJ&L(O*Lw1cx9 zi4S%yv=6T4q_uT3ZQTjhOY#d5h~SY;ST>2D=A<3(<%I;6qkeGJ%D7n*!hC7}?Bs1VgrCW1b53u@_9#N#RLrD*= zTqXBl-dnKS44T$AyCiZX%f?-M;*4~`sa)=IqylL7PPRYcW@L#~mHzYfr$UddxW&9XnV3{nq_>+{eD)91cctcS zBb(1|rI^(1y3kuZDN3>K?GG;~c6jRiYm{3)`BL>Kau-j@LaRd96Jw(tH%s;9M7QgB z#B%+15FhLC{`Ft_Qr$NS6JeOGosW4YDeXv5y9&>w70fe<fT)X1J8I$~>1aYa2P7hCuq{vc?@FZ)F0%Rc>C@>M*V507keEiM z!l<1hs8;u7g8L>?Tlj-ZK=c9hNh)V;wfqh%9-;{cGN!iTvtq^;`4B~*?V!bP0>Etl zMkX9$v^&i`AF(wx~sYE6h*5b68l;xgFw3^HUI>_!n#u9W1Ta z+s}XC)R4LWZMk(IfGa~@Au%b*h`EZVX<++mF%GeKuf}(~`kZMmO+9tEUsznaFn4TP z9P`{TxB-4az`fd+(?q{a?4sOeO^IT=Imqg71n2IW^&~iuc}{Si+yQHDi%W1i;@`n! zdm4+ozrRKr!IzXF~_~|KxTQIr2z@6mZbsIcbk@C$Y5(auQzfvv|9hxtme6QoJsS5BF zdIS}(zsG&H%0_6>f#ko=+)46rgJ$*DQbE8wo+Y8;z^Hvst*pp;kVR(W(6=O7jX%bG zKkvz3ElH^eCvZpyt0_P7Iw%ZXJp7c+C>kuPbngJi6P3S*@gXr2PEFuG=1g+|zc6w? zh8|0N5BHd9#NdT;Z+s3reCLVzrKEw)*8P~ln1779wD_mp+(2`5_M+2PvF8&Txb3ff zRV%$vaXehLiXJHW(i%$G?jR;37IwQk=8&B&B`{Zx*qeYkXxI({uAKzZCeJnDr zenYqw@624o{_ta5rx|0mH8D4EOGf;@sl}aRuk)bP-(H2v;fka_VRRrcrB zcegFg{E^n&1rDiPyzv7Td|X5|{IR)Zzg!iaJmuO>VZ?3e=-@bo)Uz3w*R6K-r>jZP z^_XdZnS6!D_uO5FA8WRy@_Si248y|OuS%AFpU^BuULX9tMg^?@JW{+U3JJLKaqVo= zDOL9mf|{mX6^iP2)Oel}#au(cN8j!4xCRLl&q6l%pMpwLXT~I_xKca>9&`nE_y@7W znS(txy?zJeONR=pmxVh|9vc=b$#4$Q@^5VE#2ndvHTFx|P*SujX7A}uLz*s-UFQXU z$mVLT``dAd#$~(>Cy^zc=^Y2VhJ6hM3H>Rvmxa(5R~xj&?wK)%(p)l967~nQe4*RN zbS8ePEq;24+g5BYLq?)@e?~$jZY}V|W7n~P4HNVX>s7`@rv zf%f=-#A_9~Jfn8Kx8t8WfA5m>2cBgA}-M$6UNU2oH>*>6BfH#YGu-Oni|mMHqS zNP>y$OZ(u6A#ITh-f1K*b zNtw9PEx&MzOd@wh;#PdDyv+P4O4%oJ7QA0Fi9e`tO;OWIh3}zR;iEw>&n+X-SLX8D znD$ovO$qwi4p~u0F!dKC&d_5U@ZLZu<=r>lC6A+pD>|ZEd_JCTeDDOKb@qV_v_8Eg z_PKvF>@B<>B@AbnknM%iHg00sSC_c1DeUBV%(FRmLD(%b?xVSUVL|)xlmSKrd1D!& zWM@;Sy1cNwk9kH&i@N0}V^Tv$#LTOJx=h-acXif0_NOG6HJPtKhh?-g(0r}q3!||Q z2aN%W@5u0wCSv8cyTKM2-+YJ7UE4{S)-FH08hvVfIzfgdscd=~Yll~~9XM{f9pOag zBfQv!PFKkrF{2a_^(86su>oaxKhlcm@>U|#MWyohMo--E{DCr=8^Xw)q&SXzo@vMD ziZ)+GN+BhGKiDKD9Suy*YxhgJd^z1V-=_jTqWG_U1bqbuHD@9vu#WS?Hsb^(XuOz` zWsI^Qf9p_d0Qq*Bt-*`(x?BrStWa}CmcE%KIS8AQSskNCi0VnQ0h~@W-O_K#Ta56z zT@{y^;iuv|MRte#%wK-$%DpBBP&IHP9enRLZ9E z;xq%J()qg0^rNHooqGd}UuQ=fVF&3WBhOkOU^R&Jh62$dBXN5JGLXvu`(P|z3pk>5 zE-X-t>89_w=FkDDW#B+@*Zcu+XN=;tJW9EF4m^4>zCp7BS%#`4?X+t38u^n;?<(tJ zI|DVPP?ax{o^%TXr|@nl(J+??9LRFz)>(eXgEQ!jtK;<&9fH*(JUYP+b>{_7;nPhQ zr%p?r?0Cd(K=_bgBJb`x&3)g<{k`oLE9b4g$1w`tm_H;AVYGSpRykN~p~y(&4=Q*T zO?z1);Vyb9t(a?oOh>%xFUC#<>|jSjPa`pZm*>%nuS7jYoEBv?T=_9F3&88!w$(vu z^+0jI2ibiuFqcm=jJ~sbkb#5s9!c)0hSHIn@AF?}d300)xs9 zRG~*3(mIaYCT89@1C_yg#7jI3g5VT1r<(@LcPaCjYQw`RE*`xL9Amkpuim$vWtAC= zVf{q*Q>mpXO678ZWqe&9TEVcmN^-o7*-Z zf+;ZeYAo5hau2+T^$?uWKZi;;-EQK2nqbze5ZTp1%r-Yj-&ZO8?R86lZn@Y#SKy6Q zQuZ8>ZhHM~*zLp?+WKTj;FZ>!OV-WM4&sb)&z|Th`A}Fmw^#uBcrc%m3k030i`QNS4&tB zjvr?^DWc?+lY7<&Dy>bf=e-m@j(#CP_VbJP*>}Qa+}+lLPi9jb>~T5p;c&jn)tBy- z23mY)J+kQ;jyAEkxcL;G)Aixn*K3QoqF)s$1lVKDM(&>|uj)t?#oUJpyv3A}uV!{I zH-Kw_Zhn5In@Ym5nyflcyB3jFDN4`2x%hSc!} zZ-v0|%-co{7%$~_TUCGMW=3pp(b&I!Pol7{S$BE<5w%U)1&4E&&a{(Omexq7zj2ub zOS{i-f(2{y`ot?8S}I+6fl9BQ_6$`bH5kYyxGfqdXBh)dbnnyMJek-_PU3^jiKJ>l zy{jS?`6r|L5r%E*H(J*lU?}i4+q&O+3nf-wEfvUp^KEMYuQF0#CyIY0fj`kOr{>o=UYd3v<1{!#+!AG?q}aat$@--R;xB#m zI>`*pZbF()rIjYOE)UzhpBX#-Gi=&DTPFJf8?tX}hW~Z{@ai@suet274K9AM^WIWm ztFLKT2QNed_#LlnVo%0I)Vx)^RL9AH26nDxUK7Y8tV*E2_U@HvVPK z^5o)TgT;d~;@waN8BzyIsoz6kUkZz5RNw} z-u+H(%NSKcyLrI3ap7kxgxmV%_f`_(~a$J@awAn2V}S&tf~la<5yY}P8U-BiGY z^@UebtYU{-5D`-E^O)??6QvUU+F>T^!SyNR!jNfAoGFN}xllq|<(vx5C z!U712Y**N=S7{<$$oDyZ@h^c7oTjEk#;|~wQn+@$oHSU5-%b?y1ws?3Iz5vt+KV7SMF(QPbRE)wjwBsAJXutkvHjR_eB#N zw0qU@*khri&s#^z#!F@=D)KLrd9SlFXs6^(4~L5hSoha1qEqC7l{4}qK-s7cXZF4g zC;I^N!4$pmTqVB-n0BW2I=(J77&}%NFXHtU-!Pwyhc@LQ9vo;>0c<{s<-g|3W#C&q zs;;BCBt7A;3+HasP`q5v=fBJuTtz0pld~D^P#-8QDm(!v8Z%n225cE~8&L^Jsi28g z&KI$C&L|&L`ytzSzvCEPG{hUB-4Sx}ri6ki5gRgAFd`LVXaNj`k&bYABUtDUHXWX+ zS}sScOM3NDtN@wr)h3CkZu%PXh7Qt?xnQww&UEFALKHKod^tbY!$X%~om3oG1&nu8 z&oVa?d3m0v!~hs>QWbD6mRK;6*jZ?%|KZoSW7AR#-j!#YA9XR$i8qSB*|BS`Si2ET zP0ELh!jZtZ!y+s6{G+KJD?8x(61Daw8M93JL~w-1>o0W`ZiG0$6t}^yFfyFb7{8YI zU~WZb7uF6AFiA>Q%#PHPn{WC~JjlPX=Azo~W~$&RE6>0se}LJ;wS>cPB*!>dVfCti zkeP_+c~VqoT6y+xS-slpy(9RC;byyOc%O4Z7v>PYca~{e4ytdam5H3mx}%S&02b#L zF;0dQ5@zI{nYc*Krf+niq6<-BvLzAUhyZ*JRJu*Z1s^wc9 z06IN)F1|h+p)#X%u2O%`e5G|UZf)RzQNJA9aEm~GVcwa^bXgfxGgzSo6$Pv+gYrrV zKh6U_p=c|Lk@6KQy%=}t5jiv7L2p+zA?VG{J#l?&Fg1lw}gc9q7$?g97h61B%~U%E+U zVpC0&>@^WXW8PaLSJ0KkQwJM>-I{GHY>FgPM`EbaX#To z%mZ(CjS|!<=#EU)32qW;9gY;W1^$%W(UkAxdqn2`35K${R0PJJq(eZ#ujieK8WV^W z=T|!yV_N%Ve($(>37az=M;(}td>>LTzssL%eq60eJkrS>I8%3%2{Y1vpkci{q73T3 zInndA4|&j|&7ou+JWf*~E(xsAcSmb<_*m^JDXts9=R;mQoaMOQO9fb?6$q1hSNnM( z8(?$KR9P601KOM%S!F6LXzTtcCCa#-lz2;+J#8Rgi+Bsm=SBrI74~Km-TBH8_MLng_>Jf9H?Q!i%zXUXY~DKB@b>B(;Zt~z3+2}R9Hs5i(h@Rimz{|U z(1lO*)Hr5aKJ%2ROj5q0Pn2&EK1?g-md$*0f4<@58Ii;4G96AW*HPjpa)c54thJwK@>V~ANNKAG>J;m(!4}>j(3h;^60xCO>mhrH7SmtDBA018AF4y<# z-uS&hw^JmQ>*^5=fxd^-LhCf+5xFfntM}MY@hd8~N%p%v?QJbofL&8`!|%uGlueXz za2`V5RrBe0?@x-dnyB3!W|rp0hRJbJ9HBg*>%H$lvbX(Ofbfco$6t z^a?Ial;#I!>(6xNe=kDVMG*`-hYP!NSroTQLXfgK4%{``>ITY%gOcfy6AY&ldwV4u z9p0gICGdnQ_BeYj0Rsp1%)m$tC%dy+IM94eI*LsMa1_6MC8@MBI>cpPC3%7Ea#!*( zZYL^KNpe4QuqYFGAj^F9=}K{RXuI2I+f%s8Lg(B)2FPf`u`3T_c!1=FnJJ&p^sUpg zk^-xJ(=BgEGeVRbmRlWBy_dXS)M(i8Y%W>?eQnOP{Ue{?sbk||NKo5XDH0BKo&T^( zCXksW*nk*T$i)F1$eSeP=YGbdq;=Nln6?Y_xCw7)r#miNd~@A~WD=U0T1$jvt#lTG z3}96xQHoY;%>MM6hb*1U5*5HIDfPpyk^f1eoFk zV*X-T%vKq$sWTkzhx#4a@l~GB~k(BKE*5z+tQig+)G%P zb>8%O^gmCPzxspOTkVa=bgk3ea@pUH0dnT!C!8g|-Z{P5QZSK;3KnPxP9W~l&kSC#1)@1L2O)d8@|Ptk)z75!$#Ojv zaNWV+COUDnp}IYxTs;=9Cw8wb0RRG`G)5b%AyP-g}f3-=KcM@@QcfBQ;zJ zI?<9tft1Owm4sO(&x*#cUFY>!t#;aXv(*eoX50Tr^1UIi#~+5`FM7NIE9{&^@j;8I zfMQ!m6@~bnd|B&nxOlbTf_zEOQ67)qUXom$Lzl}}h{Ztq4Jpf+t>=3E)~Y#>^}2-b zv?{6?2ND>sqv0^96?-#!qS+L$3tWte)gWfs`JvkkrRW8jeW`#Tr|?o*)59gvIbQf0mHy8UGRCn{u3puZoatt z=%skh+4pah6l^S^hn}Xb3sk_&cLO_#QoG+RH8?pt!C!kan|0f5{RcHC;5AFL>Z$kv!(_HVORa7U5e_}-BE%+BIj?@L%~n? zFOXUJV^jvv+eukogz3LDz}MizexP}VbAN^>61#w9aWMq}9f{jlR>j$DO!qLAWKDw6 ztVl<1GTdm0Bufd zAGp2!j5%;PO8_9DvqM}9zYHqiW*hc|YaUwG$0BSJ-k4NNys;9z9dKFDH&L79pSNz1 z!FsD3^JDTrmLoO=)O-laCRA_wR@J*@?@w|6qathPtn7a5L=fkp9CVV$|c3oL>Cns#Q)iS>UE=%Dk;|`x$J26m#&>o99T<)M#SFw_DuKZj<3=D zihap+)0JrKIGQ*SLAiF9kvo@il*v;k;=*{X?j3W#ZX!O|d^MZoewHOAf++Y*zuS?JAdbe;2V`{d~?tpU|PY0!tTMs(^Y2VH6$=1uegk3+=Z5B}@eb`epk{p6=?n22Yr(Mx88&f(8>r8=G!5e*B+z`r3pA zP3Uj*thD~~Qe!IM%VSo;kcM#e>8>u2$cGPePW}C)jV+oeHv(&GAr{^FBOD<;t)(P@m|H-Ymd${ WZr4)FnmVWHfKjn=|GE}+{C@%2J?EYP literal 0 HcmV?d00001 diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/_static/image/pipeline.JPEG b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/_static/image/pipeline.JPEG new file mode 100644 index 0000000000000000000000000000000000000000..aa7df13b11150ec1428ce37955e3097e66d3eb4a GIT binary patch literal 19054 zcmbTdWmH>F)IORPiWf?8O^Z9li>5$<;-wUqphbea1ZZ)mK!Fx5?xYj~MT5H(cemmW z0Ydoa_r7c0`{jPP_fFQCm8>&4+5607?`J>roQK(mRlqY9MP)_6qelS1Bg_l%umF$; zJbv_F`>%ieU&F@wuYZD#jfIW#1PAAT4qSXZ99(={92`7CJbZ%x8s;4lApy~UAO5?@ z|9meF##TvKEfh>^w0$WV(fhKKjvYK{onTJ zF&6d{99)bUgqQ*FXBhLburNkH!B~tL9e_Cxz$Sh2{H1^_4w;rYE|V*{;FrW)JZ8D- zc8U*UCoDo1Zo&8jlvLC-w5)9G9GqOjBBEli#UyndHDr}Ma3nhWxs1`;dS+Y8X7w~ySjUN z`}zmQCnl$+XJ+T-*VZ>Sx3+iw?e3kPonKsDT_bO9|MTk+0PBCuU z;Qr^=qsQKu1B(><$x8v8=dxP3=B{K+f?x2+#(A9$_XAixdC?+@oM4{7CWN$M1jNM>aFK77gTCAz_~^ zu{|FEQJ)HX&tZuOha8~K%(^p*f_weAQvh|HvghO9eP~UY!y2E$I>}{W zTc8(u^Tbg2Z1xACGp;xiu|CrVoFBYuz0VX0qCrZod(~8QuHH4Ze7IJg1RDJr(lvCr z)_%wLd|s7I>`G}_@*8`v{{b|-&^G#j+q9J^;CXaeEWj&_v{FkvLLh%6*?Bi3uCe|AR3I)UI zv)R%~$Wg+llr3vdEjz{Z8ac!FHGZ`=fNKr+C5*i_fj_J)ZHL}d8tA^;=8jvDzW+LX zh9bJzH|)=tUY3{^{$Zp$;S?Q^q`wt4u0}memLzad2){3Z2k;xGbx&st2i2Kw6*WML zc$7DHPB%_qsZx@wAPS%SwV5a>Z>la6t^p8nK&(t$>3$J|{KvbuMWBn<#Ol3iac5?6 zk+M4?o&9T~lTg{BU5Wa5-eyS~f3Ip2h+$i!zY#0@4{Ia+qVFYpZ(W=GILqU;BPum# zr)%?W^{t{zLzb{zz*M@UQqM@Q573Uyk_&X>xH;0Ti>Qtq3;6#`^SArtvWY2xf@IdrOJm$ z@JFHy^5&6x?4i6S__lvTMbi?SKl#4o)A<_a7JZ|=iV7jPDH+#t z_DcAbzmA|-+jAfHSVxt1^S51wBPVjJ2kj~6*x8CSWBf!0^C}N>2h8Fh0EVXzfX|2% zjYG{Y%ny*XlZ1ikWW@i~X`X8Kpr8Mqg3{!!EG0bCw{4v2Mx^+$Bn?+s+Q75L6V=Aw z<@x-`Z8wvu*+3lho>Hy8Iq4~7?*`}73Wp~|YOe_2rbV~1E$m5{Ch;8W&MRBh*NnZ)wzlGV05ChLx%^J7 zy%YYYn8K>TzC*n@NcP>bII7~9vu;J*koCkv@75{D`DeS8ywO7F)27d|yE%sc&^=eB zA;Q__?bceEdJHA#?m$V+c<^%>1_PAm2_oVmC_Ip?`V<%`rCPP$nAM#;ySTWk8^ywl zlU^@aqh4wi!AZr!$vFR9GBcZYnq>47t{)qSvnJ>yt z?00_>1+UBCDwnQ4w>h(T0H{Xn&`1Ul$!I4&0H`Qb(@pE7sa=D}CQ9`;pFTF(kN)5L)7L*gK0PO#%IvkDb;0CP5SrfYmI_3MlU6 zpDS0oFV*YHnQx_;`t&13o)=!um=1h4*N4YyRfQ} zb*G9loNT4_+21&<4>tdG;)JMF@Ges-QL{M|f%&pwdwXu#HB&)}PCI>Bso5@&W&G#KPW%$+hEv6wQ!I@s z-k(#{QW}LH*tJlqe*+-x!p@&(=4aXwQ6TbKV9dPJ?`7;shr=WWNv+Al&Bprl-4{m4 z{67;@pX%N2nMl_4FhqW>zy}N09BVkYmY*Vtoo<S{HYXe9d(E*H+gwv74#jHLC@=?og*owr+jj~2JD7~F@jDb+Jd6^-5OUy zn)O@@?Wgb3J)+lf1o}wtsy0~|H?l8>qX%~)^0o`zMRZ%lhWrByPj%_&Uh`F&Fe$?0 zOmO8iSw8)F>fEh|=JHpUJ8F+r`R!{P>Gp2tvvirGcgO7k>?ypKZ-t-p$_sU?>w9t> zE8O&Y>vvs}^0OSiSKoV%zhy&4qgCClqr03xaiioWd4zpwbo9)0BPn|sQ|y6}uE?s6 zrynE>6vGvwKQ4>Av~Z`uPqGW$z1^KX$@gEH%zCa8sPnhg(|&Yj%^xx~TH{pDbP-5Y z?nw6L-%o~T4IdLj-=xoctWViYl-ed3ZNj1(_Uk>cE}L5kTwcNB)5gH78WkX)BFd)Y(n;9D(XW$-;Bc!SxX~9Y}Iv`%Gx0Hpuo^aT5M;O%prWTCk1jd7G zK_NLc(hq=Wk1S37>>bTNag_ejawFRled+j*RgyStH&fXF`LRhAGP^IpY9pT3P6SoaW0GLoBD`l`KZ;xfPGiEe0s2-)fwxrYn z{(Tj-b_kY_>G-9-F3qi!RRxc1mNaLG&!CuJG*QL+8O74AmTxT`haTS^M{kox)U{u2)SrRRTO@{iJ8x3HD#LEuT(Z zETAVO)LZ@pFDO4>by17!4D@7}IpikJaeP55*rXX;><=ww>}l0C*b-?1M=iZxd)Xud zYP`<~vfVz}iES3jMfdrcd^6=~s!v^BwX4w-i}*m3u0@<9d`-ggZmqdqu(smgPs$Pf zn`umh$KJ<4fef4479_ap74G%}K*>0}VG@^%uJg|?Ew#hm2);wYz z6tM%J?8#X-O7=_Jd7t4_z+TsI_fe}qL)>wLP6e++D)J^B81evcOu#d;qva0?1lSpV z%E;dj6qphY;G6E)%Bz=Q9y=6{>3fuPnw1y6Jir#>PGvw?oABzT(l`~Uwg}EzbJZJe(%bM6T;*2QmdMRA927CO+t`)ZWXMnX84BIgSH z*BR<~0@FXaT#nn293*@I)a^a9je9n6;LIF<&sz}2TwnjWQJ(&g9Iw?IJM;K4iuk?| zDo_p;`-A*)Ct&1L^9~7K8#Yq^TPtm;rc~GJ-{5?dmww46*z59ZX~j2N|1I%X9$#HH ztxGKQU&a@I!k9%xBZr5a#DX#R*>Uvn(YlwLRcolD8U@x1v~K}29%6-neRnrA9Y*8s zR3cDhHXBo2t!_}zv=MGe7zkN*AoCImM4qpL!%eZqTG^mv;dh-60OF&g;kfuhiY~1^ zh_^@m^G=TwcML#g$MtAiZ$J%!VwR#6Rmzu+)0zn*%BH&f>~xy9wBEoNH;i| zy-$Gc%ghno%RG}(>JXeG@ z%elapLG3DmAgBh?+W!R6jd%nF+T543MX|NOcBl2CO6)ZY<_2v(S=l*lhQE6GBZNiZ zZR{IKS&GdE01nFaRC0BvyAREW6l+Uu%#Tr*8`)Q#8ds`ou4(<9J#aJ+X4Bs!WNqo{ zGZa%YnbZC7h~ByieoAGWu76-=l#Fd*(465S2P>6^$ak!eIq_anm>3FSal10v(Co(2 zZ+_@p|8?{swyv72@PnW_GX~s-(ynyhbFU&|pqy(fv#K3b^GaO+# zLnCg8IX%bt<8fctRCjv+?_nRhR{bpgd_(TLOMNgHlpyyWO3b&~*@J-9ki$47yTyCj z-OoE7#YGckdWr1J4BM}BN;n$$26bi$XA@`Md`oa|nm|cu+&PkIW&qE|m}$V{rK#^1WIAo(1d= zdQzP;>=jbh+8&Dl0Z6j<&j;DVcy$aYWIqTcB6W>17erw%JgtkdXLa-TY%|>SL_{`D zO_rszN1BCmthbXX5fR5@aX1YpA;I zmw0j1o4=A{Zv*BJK~|X3fbrP`!X6ghSecRQ?;M#fTzc;28t{l>XBtdh+oUc^4eJp2 zH2BE9cg-Z?Apf+f-5XTL-^t51e|()5M0)XVN-T*Y{pw}x^-cASo&rvm{{EEbxgrJU zY6HW1>rwFq10iLYeyp_;`JTgz$Zuqs9NSso!fa~2of#Cpt3cg-ER>&*n51jr%`67i zzt~5o+!BIwa!y=Bb3v9c{G>;{U3aphnm0ImxH$vO&ASWS<^KkYGSc6nR2wD@$Lb%`8VW-jm`07{k^^fd+VXs}cx)`2Fd}e2di^A~n zy`w$xB6orft}oTm^+Bb*&e+L3rQtW;oD?`2KIuQ6D{*}dp^Ms=b8i_bUH#X&Hknny z)|w>Cu<(hPZU^O4!bnjk?@GTFO^<4OZrjq@`l~*Ejp?I-)%?M`d;8SFvYwy~vqQ1lonB?Dd@#yGo?5Kk99( z^P47Y`VdE~iL^Ave27pxw^G>TXg5#L8a7uwk?+cRCiSruI3^{$zIV1m%Fp_ZQP6#1 zapvzz4XmSFlN6%0xUc8%@|+-Y<8BEu3-!Evg)Hv)T^xlbMgCNBe@V#mygL{m-X?M> zI8-`e40GS-wE8GOo}$c~+L%KFHM&m<jtEf>LdU~rMu2cRv#BU>{Qct=} z420Iyz+UY1XSPV&dwm-bk8@i0qMPg@{Sn%j?JSYICe&f>u#fy2JmPb+v@=c^lS++I zL2@rqXsDUwx%Tjr{hyRWM?+m)5r?Lor}draXD76Ft@wX8I%O^L*37YZmN#dZA#1~A z`V&(7F)X1)cs^Tzb1-XkDSM>TEs!%~)5B#hPUThlH5mNX7k(HID;}mO`?o3e+f6H8 zK|if)CY!MV5+KJ(MNMZQG&pEt9S{z)dJ|{fH5Y(gheE7M+!24goy3uKwp_M7H?d=>2>tA0>$yQup}{#&z>v;i+bWtmeqp$@=RPp> zifV-2_IkT&aNU{j-*=Ipq~A;9GLj9h8=uzFMm3nQS!emq(JLh$cWceS*2lZVy#1sS zZw6d?WZpA|*A+{6n&egRFT=flz}y+jOH`6Nz45h@sD^gZFCGB1mddAOfr~&K{l)ez zg;UGUAK0jcAUJOv^k`ltzjUdaF{|PrNs$g~fIKrccBUbyUE2IqCg#o!#aJ^5t6Tw5 zN{PZ+8W3JXHAWveeIzhL0{o+(k8ih3(AW zG3ujzWo$0CQynD+)q{S`YBc@G*gkj-8)6l$lh!xOrSOYC*gEo{%ji`O`x{5$rZXaR zD8I1SE4VF^@QcE-i#2P#K3Xw_D5%+b!>%-U(jpb~%ftYG?hQAGgJWBXd-7uccu{t! zDEjfF)0kQn4dIbPy_Oo7Z8b*(+qvItNjwQm$gVfp@4uD0I>A{Bu}RN|*YoO4=wc;~9eo)pY61)JAFM?veV4y>A&WPS=Zp4Q2%+ zYy-(20PQ@8PauFg!_sIlsP@S5BW3E%8NJ^(dcB;$1;mLa;>qZmiKaf%WUWaR`_IHy zyiaBRJW&`vHA9o$#NRc;$p?V%hu@9=;KH5joMTnrbWo!M33K(jF^cI!CL`FIFZI6d zxFuuam4r^*aBFL^_UY0<$DFN+B@rT#sa5?Xyv27dz+3yId&o-4&77h!pW6(R+iyqP zXjei|0Vh}U_@b-_cAcp^Urcqyh<0^B+5@iS`GXpUi`hmi)OLX{xSh)l_jIXf82zmo zlqQPzO^r^$J&kX{&m3PiG})19vbm2ao=y}uRPC%6qC?iePic5XbX7x$JFr}er~j;k z0IMGW-J0{oowTC~GV}|qlYW-JNyMX@EAm|z;#;`0v!56Gn7+)~9&3{=Qx-;zzCKmh z>W#iMfGSB}5RDIr1}j^G<@LQ=U(Vsq9{G=rE7f$tP`D4 zFIsr2tQg$5ft_isEC_hgQ*XuK0ZV2M+jyPdJ|Y`4of4kDpGF6`^}2K`c33O2VQ#99 z5bY^ee|;tvVqo#vu(J8uok7(H8p}iz^0ZA$*mT8iBSY&CLGaCfQ+}Bki*8l^8j|m7 zw9p=hW0S_EbDOP3gH+<5cBtoA4X}G!>qx2gof>DV`6N1}uh&zw*mGM+n6i}a`LflW z5-`k5hG}9XrLgwRg~q)^uG4*@KJ^5WIR8dUnTY*>OH|^4H8n7 zLUr@!-I!aKAq{+Qq6i6dVV)hxP&vAi-WLVa&-#qtS#N*e$5dg1azDSsM?@!`ufFw4 z=a{&RRUC3}P=4*KQiXmz4dpHB;gS9Gk(F+c{(QW#wD-x#H`8LG)*6aI-lv4BL3nlf z1{S>2+c`$jTMXn>=Y$p5UOz!*+4bIc!uBr+ z5Hg_EjqM*G3)oX(Vx5GprZ=zr9{@HhGbvfMzZ>fhhB-yciUPU1y-Svs#Ad-(pVWlG zah_M{Z0Ym7!|2=!rER%9ky-+=zAhUdnYrFI_9fFtWBDK~}y*H_+Mh&H0*W@ceq|5y7+L)Ia4B3eV#hD@VS^+H%m(wQiuCuei; zN8mZpGwE^_M;UH>k~FWQ;}<$##4Si30H!qt7vGbF$SpWFSWW~?m+lCll4Gr}(PPW> zCyTna(<}A{{4`%l>x7zeEKTFF2qmWS37y9823@uEqAu%E&wJ7MrS7o%j15H3Jv|}b zqI$lap506~tBYT3e9En3(Z8_;rfzFzdabv)TF=bPh+j!_rG4h9^r&Rw<>SczSoh}Z z!Fw6437~AZ!;(gcXb-M2rO0g+6|T|~r8(`{t#1uk1HaWb7txyzd@?SQGa{!DwYrv* zk~qu$I~8Z`zooB1BlSyae%}qONJeqWp=$MBCoP?d4IEl&vslS`5KuCiqd`@~dCc7> zq?~hjQ0-3i#cOj&Q1$+K#b=T=-bB=X(qA+evZ>>|bixx?XOE=5FhC1a3}+y%=EV*b z%6yUtS>vCY;uU9oGovAh&4E#zu6LE1CAP9yx!@c@Ho5OotTh{M6(pb+oF64Bo!4Pe zXwG5hwU0K>)nA>1ft}fq%NaD@&Z_mvOgp}=#c zJ$N=*aI_--?Y;`bRA&H*g6tsWaef_mwLzrVW@)GF{d^MmxN-seXZu%$_X#q5=%GL^ z^x$b-G;{4&GUosy@o?9<1T*!(Bjf!j$VQaM9Qotaa%RB?01Z2E6f7<@aw$9DeGC;Z>{coto0?izu9m+!L;ilRv}Oz~_k#eJ8i5$Bn@x_=Un@=f&} z)K@A!IG=3Z8uk)}i!*&iwKvYRpqGIs8bI>E!9bw^W#I;XsJQn_2(N|=G}}i>24Q%z zFFdyV(#HQTnbtKvg5qXc2p^1Vl$yH~7HWKCt(j{trmITa@H+jY`({6osQk!Icvw2F zODmvCIs281wRpFqRBGxKUR_m^)Xy7@2r2I+eYR6d%9?R!Heb2>Gh*%^WofnXyIii* zYgOfTS2=dRfm6aq?hd{m84-qqb*<6o>R|>f;R@eV%-cM#OHsogdgfmj*Dsr#BD`F) zk%xwfb_8|tB6?B0>)Qg`fKffXiP_OqZD^(4wre)uufII3=(!{-WxfUuZ7E>dt)BVa zbL6N}#?>~G+m7KEh5@B)mZ*N6K>t)@fg+|vwO}!8L!NHzb7*3w*>(%n75B+{>L>#J_wy0fzR7oAIT4F=3|Z^zd4j&502LuM%;nL2#UI!g>AebJ zQzLI+BM$&1CPV~TzzDcUEI}u=j-n3$_ygei(%X#-*x3I9x76p#Ad4Kl8%P#{{NTJ- z6Pdcld^?ktS#UPc8f&k@hsmUu+b!liIaZ;;akaRPvlOQLMpI67o)_Fqin;RlcAj8z zk=o(|QMNZm&n4+M_O+xB&f8c7jaC^gw87uiOD@=dD5v}BOx4ino_f8+BFLZps5fY1 zj;*)Qro}h)$YAdc{g>YF4tc~oHBX8+-t}E zxU*h;`{wA@p_y;|ub$HJWzxFR(1(yPimMzHsnOa}7x=kn0>dI#{j8%!40B#>+?QJ{ zFi~fbXk!m~yJn_NCh5;r z>pjwLl3!28@BDi!=4QufS=!T`n@Hss|J}SDwQuU*m!d1KpCkML@Q>c+IbKhdOu$`Q zw5encEtyWnW7?!=qA;V$=YEO|0y(%)@u$SF|sC z=0`M_sNqa_arAhQfGN(0J|%kBmFU40GDW?ir6j=uzZvG!w6*Q(bxmTGW$aUio|y91 zC-4LQYu5$bV3G-uw5H)ttb7`FB85LrVJHkM>L0VtA0o^j`D*qiRQ?Jw925RT@KHN# za0`jQ9^*_Qomqk~i+9Kf6W^GixSd(oB!{YbPNMMwC%ko|ij9rld8i@}@dP#FWdfGa zDW{byolAy0-P}Lf(t98oIc&=u@8f12ysu!)(42&JTro($>?jN? zy>p)pYh{J1!IQO4ZcM0@V%3Fz$1}=a>)p=m>3un5Hc?sYGGlFw%+sL;Jc>R&{t`%n zbiLz%GSxU!?+Mr?@|LNWm>35?O6jGGq_-??@dA^8Y>&eXD}Q8k-ge#ACab6vV^HL)NImTScKX(@?A`!~B4{VN|fv;#W?twyWVF5Q5Z0-M>vnWKy_NWy;IY zw56%%{*B_x@>^!AJhV~d_BSa3#kSK$V(wEhG(pJftQ6MW2T1B}VX|GxH$+&9i@*ya zS7Z{1;Kr1zv!{rWf(fpqz}>d(jtoQEIhm2h-=I9F+5N~ zI;bY(=?69~EIltW8spdZc8kS*K`!0o0XST zrI21Dq;JyXx9_seU}AgO^O3*9teZ1}%&Yisy--*;t3lJ zyWAl`?4d%xoBy3&3A!uB-S^HJN>|sMeC-0&{A=>8w>HTW&+pviwe_$mWBK`8t|;&l zsMr?0x&h~9+OZuqc2@;g(&vJ6Kqszkq3E7Ka>x%aCa7lZCw2r%6$8d-5mJcN%}`&2 z#R|SbnX?_5%YX;+0B|{!Ypys(D|iM_Pi%REo_jKgssCw69a$AONcdhPI{)}?vffWO ztVniS-k$EISA$bC0o>Qd%egS(V4&^id+lFsB}zG>QJcndA&l>cak+#2X11=Q{o_Bl z{Zp`K&e1afcyJH%!}(Y z6KVdU;@h+&RLbJu-gxZ89uD*Op_0Rbg97?(YNEKZ|3xAtX#o+$>|OWCLh^Q;&b;u` zY&lN)na!&xSbv$3KpG3 zbrAkp#6Rq%(`S6ccppStbAm7#Z+^KEV7bO0y>PJRwAnOns9o>j+^?1Whh^%R(uW4h z4U4nXNJ$^rv^f;r{ga=#NW6S`%dnvMWSFk|)o2M`Xms1*h zW8r8p)cEfb=UG)pts6GCXsG5{}OgJ~xm8$-kbR zwOp59xr!9w+AI*W1n<5nk5lP&5Yn#zU^`0bXkGDpj zwk*R-{yhLrU(V99J*meeIYvZ^PjuY)H1}pDw1x=3X&HVLZ^wLuAn9mEWtfmd09!D62+UK3$A;%>Bvdurb)}F_QWpTyNHtz@N*0$ z;D!=|IG;4N`z`X;A6zHD(XV?Y%8B;e*!kE}^r(D6b|OLe8YJNPlxx|iySt*Fjwj5k zdhdytW`bJ%*Tt6Wfr6us6YkfBdt^(#uF2=UUqjE6^$w-@DRYXHpKKMJEe(LYA`3oC zs|Pxia+Diil*(xt68*`Z_F+w3fL5n6T0YwSd(WBnVc!1g=mFM!VX+O7@y2J7$mfe`|=g_NPD= z{!cYVuUe42C8h;QyuICy*f7hHLVNf4saD2}tFcRPIxp+MQo`gtQ@{{4@V znY1jUJHE=hS0LhrkEc%uNq%bvuab>A=*i}*a_#;$s_iH16JiNHTT$Uby~s94r0)#+ zKC(m{C1_5N59&J|y-Z(#@^mOXnkf7b8rI#6dbJK5%J9OdzZt&hfbd{LE=EX#^TAD#!lJAR@Wgcw1!1sA| zg>=G))ic=RzZaX+k7DQZ6biSrT^Gw*#J3SjL93x?K~?9-R+f`>D|uIoiXUh+Ooe<5WKJ(dE`eOJ1P=h$x{szw=A{iP zm?isDdL&?S+izF4(4V)|@hRglLgt=@gpNVD*Bgg!%r7Wjy(LXBM&b8@vT}Lea26)ZJD<= zO&1i5YIGwy|7}m@r5{G5k@4Yy<+v07Le&E{K(sNAg6*`gHKr189Vw>m{etZoU(70+ zRY!6x{B2>1LaCO|BP~1K_Uo(pQ?PjqSwTYd9xz6vGbXP}Aj39xp-;SR0t!vr)|lh> zn5z4fu9f$=r%>_?*`ebwxzS)u&;r%Ci*Bd z`(?xc`(0H<@dH2>R(XKCAf>{p zua)S7oee)vM_;bwN7zhBy$-^}HvdWd6CUB!R9#QkZ;pTtuA*Q8N zw9zohiJIk$*vPvHAl}lckpC zLWWk>c?#?O#A18)TfLMlr3{|3=I`U%ULabtiI+{ySt9Bt>P>B?c-4K(v4P(fDph4C zDtT+OXxlqeaqN_jr_1p89h_g(P5$lk!q*!By1r?DddUom)0z}{SI}hZkoeY;hvqyp z9W^*7!aK~2r(oZIsqPSSbDTXa>)9hPfd#zx6K>kilA{s6`nH;oNbY@Ima1D(N5${& zQq|>>pM<|_C_$@fCSdpMoc`&-VDfm2F(a{lgCZd$V`3}TBwkQ6M4}Tp5JZlQ=n}+s zk!q5Zk$4n0H@XWp3Tmfh0#orL!)=pYi+Vr8F_q~DP?L=Ek@%*@Np8j`*+ z>Rz8m4$#WK(O2NvoU6eT#DUO8IU+KU;{>vIHAA_qK0c>{eNwfmTGGq@xb^yp>1zWH z_F_xg+C{j{q4Sqkidi2&G_SVCl!$J+WI*2hdO)92vDEZPL#b_Jtu>rZ58hly2W=Cq z1V4ph@=ZtrXf=mXXmWLoY1(t2aOdmK`lj*OXRd$g&u{c<qwy_>!%NWMGVgK+6b`^4Od`P1oi=yuxaiXzk>$n`4%Vc` zA(`|SRiTFseTNpU)foo?jx%-7IA!Lr$p)*=c6hmabb##0^{kS5>Ucs7MjO7_?QyH$ukRLcIh)s2UZ z>YBeeR!i?UfN2QD#slC)D~@pieE(Gn;v96Aw`+Zd>(cfBc*VU~%dPOMj&uneti%h) z8=aVL9=)2I4rJV2sp*aOIHvMR8o_QE-;C!dHY;3kdV?ixXGxn%>Ae@GLi}M8MfSUP z;yAY{ep_wrO&y)0CPlZsRt)o1q{rV;dLBF}0W*KSp!IU*siFt9v>99 z=$1!D|NQn<7w}2s1!SpH%$?==Poa?G;T%b5VSd}7 zv3#uK2II0!ugXQvvvYcp%!ayZ#TA2WUyRFC&wQ8l<8Vi5uyQwk!gTM$rBo|0?7e0O zFY$z$i#0Oqr?M;j3sGFj{P~2)w&6our#y!_M&0%Cu|Rv%i9^tI})EB zZp1>u>f*dtj4S=ldU1qhQ{wkSg87tRt6q#LS=6wK?u$m$m_UX`?h5Et zxK^ys385Obgl9XrcarZx;~0U&*_J0b@jWu3YcPmZy>vz6H)WKQjLCtE%0-X;5_Dx)6SJWX?no*}Dwy_}4M z++CzjrD_*v(Ml&z>0O=x^Gb+Fp zG%Xw4*)~+9U$DF8^X&eu-T|y*U%zO~EFR(p2z}-r>pG9%2qd;6{E42LS6KP>1pe2K zEHO4r90P;HBtAEdA6mt7J|##ozNCf`ia1U;+}hxES75KVPBfmqXqt%#Z#I95`X$7r zd@hvv<#N>T8cd9G9~XK0LgBum37Xq;dP94zNr5yRG&N_OQp@GGo4CG%36+tkMqtwH#o#z~W`G`}#maC^IGk zhQhjBc|s)36c>jAUc<7`m}J>*c(G9K9kZ4boOYC=+SD@$biq|YGAo}RPFD-iMm&C%2-M;?*V;rt7=V$Ol;SFwBJ>NU7m80~0qMBtIBS!DsLP|dYfP|WG)<(95G)`Q|T{p)H?_+NU8 zH3+!-ioId;vO@Y5Nu$hs7O4st4PTgGkM^hG(!B}L_X<45jxva0T>@WeB;VDqzE9Fd zi#kRD7C2GtAFPR9>5PD~+(m|4Ml-Y7V%ZL>^DE{R){Q8fh1DU<(1BiT#@VY1)dZaV z>DzBn#f#tANWODFW}%ZSy?ta@ba+&mHceWfC*=#w8AEfn!9DM-rAG=+|?+MQ+XV~3ArTu&VAmACRX(Z&(?RS z(r*z$wQDztcF?Tp2^6eG^Z}_k$YeZJ@4{_)pZtLdL)xuvmK0t zm^MuC3HLueh%Ey}DZX=>>Qq1dbk}jFRA6dwb8^$^v3fQp(7R)#cRQRl6wZ-(=n(bn zT4aY&Ea#5t6TGGNoEdr}gmV{MTZny|!tXhIQORyJ!3(v?N7jui%-OiTYLCF{rJvaq zJGfgf@suW*T2FiN_D?&kOOqUBehLh!vlX?w`>~~`th~vUiIKnsW}`cRv@cFILoPCM z>K;2W#R`T@#OnRXSS!|`Ke!7d9kOe+=RLd+j95(7PcdCvc)?C1Gj;O=gXo-2Hs@`6 z%j1gZ5|Y%8WVBCT@F3i=aWP3piFf>27a~x8fXEjH=-)J3U1&4+1SCYwVRBjHjH23(A-?p9Nk^a|%}~X=#!gw5C(4 z+P%(99+>t>=N-O ztSfce`!=rDfdywvZYTP-amlJ~>d4dodzbA%MFHc~DLaa6|Kr3FJWIWGijMjyjag>u z3SD11yNvYji+L>TBo0)$v7g-1kct8pZ^FrPaiB|gpL|khbWd9Lm0a*i>XYMBPP0Y| z#!Yz(hbO*guD)=JTZeo{cmr$(JV_`JaWr4_Ni*Y z-m^cV;Zq;O7Tu?2?hs+7(bVd>FH?f+bVTpFFRG}tO%m`wcl z;rQp~TdI*$Xf$7x>vGf!5PEV~R11G<%3TgsF7BaVS|AUr&haaP`P|AzJ4(OhiZ*?J zbCclblI;8;U$*`P%@KA8s^xiRS3k@p02Qhdb#H?i+=oKhON;{NatL5nla;@6$c0G~ zXTyV#-{-G-4bLj2jOGvFsn?`6bxY-odd28Y%9H(-da;fO)l!7(sn{C}nm?dZ0eGSM zxdrncLP&@9eaJ-az2sZ(w-rh{QYLKz+o7XtGq-&5mg~_k7fcuoC2L#=UbVMXdF$SHm!#AGdJ%!WY0+3(9@WG)N{UP_ zSg^kxlJEnszm}!@Ml#Tf5eEf3qLh>0HO6$0~QkaJj@#=fBgsxY6UAgj6v&Tl3yAnuEp?ubEDN$>D{@TE66mym*i0JLs9T8gR zJOV{g*6sld{;+Q!8`$qkmedmd4h_d>HEJBjz(}sS<<(CLx-SB_lQ9;=K}9s_FdO1L z_0gif7QgM7=-iusF5Po)os>>=6$EBvC~6XNb1wlC#7qU^&${*{GIL+(&Jo$WsB7nF z{W*i~`wF2{@DEKP1$%y3M`h;I7C!lL{g35C_s1LcQEw`J>1{WN1;WHBE_V&-?=}1F z)!A7Z8RF8qn<~ZBL={CWYmc$rxW8p|&3Jn!l(iOFzLgIDU|X=6Xe}>YiPwp+2t%?KN2Q^kGgynLh7&doX|wBs;sQ`o z3jS`;N?~U$To9k405}`;8wYNlXKa^#d8TAvuf^5C`8j|X*YI{m{=T%CVRcCxO**)5 zqPS;H%Ew!gdH6=0Js!8k2X21&b4B~eP0Ep4D>}J)4OsWK?slh(ast*BLmm4XV9C~O z`T$V=GY-AiR0Xpr1Bc2dH~uixm{#BE`lR=E=R8j{I=P%i7S*hTiqqF4#bJ+pR`jMlUpsDV(e~_kA;VV@ zhR5sYjzzYW@Hfm2rqowepC*(iE~H&Av9mS}v z?kOs;n>1Y)|2IF~Q0SjmxTw{`r>m>=`R7UJ3?uvp#9K6D+rT&DYgQ#$9f0ZgP#S*Bel@vKW5Z7JZy^aEFFmJZ^o2i9%7rH*n|K z2SA6PgBu=dC2IO}lLzpW4*_GAPUF)uxS<_GbAhP!It)KfldkLvRphiXnvF=fK)f&# z($0Cd?TKm&X=5-%hXJo%I^h{CsP-7^z&w!v0 zpp|`jlTdfPF}QfOyUzJOM9VN!Gt#pO{0tfGZ)eSccCwIDrru})hjzc#{}@bF#Bo{~ z7oPeyx}-|~)0=#$&q0-Xf7z9b2Ixx|T&G))g10+MJ*BpfMHi-iFp5o}Cv2_a#5G)v zWpZ6&3i~=^vHs|lKz>n0O?j>6uBM-nkeeu5vDKOZEFgLDU6Wf|cG&{!gWu4LP4N_t zVGV8NfjCn$Z%?Je;da!r5O=yH|Jdk|-c+&l{}ZMNS@%A(D%?h6GN@%zIU@qHt#u|> zHmE?(IXracyZzTeoYbc2)`MLve?gzI2aRmJWAL9#)inEv*&fj_%GvqA>0Z0>Z%4K9 zN5d^k!?#l)H#U-P1-Na?f8yzz7e&rF@%*CMC&lbM_nrKD~vDmG0tyHs2dGON|TShlsRGJBbW7TD8P+1!ImR zI0w^;<5N)9qP$g#`P}qSgqr&6_R{^Gv}k{8j}+ec8(D^33ikQsK?lsC&N4Ig&1HC7 z;7^BqHS=2deQq4%_wF-;>IHl9&2ZR!J*Bz%g-p7##nge|dMAWFB6z~hgces!Q2y`D z7z{mY(7qpj&KjJWq#BQou9R9KQ#28L&PQ>I_Wc9E49@Y4hJyi>5GciVc0LEwS~%rs zw-$Y_PZ$;RSpFz+r)Wn*vW7;b8`Nce55_+Wd@((qouud&pV`*zp%f@mP=9#;01x4s zm&ZOfu<>5MKBwUe&#|_w#wE6nLnA{9j&kP(xCHbSV^;Vr_Qq+To=uxE3}9m?`qn~x z2i9c7meaa7JBly`M?;TZ2lJyX&LcS|V?0I&*;*;+Y<|dJ1ta*);=dBwc#~U}Z5vB~ z2ndV@RxR@P{VT2bxBEUX!|x2Xw*-xSYXqc;qCifm>3(gj90qbc-u$t7wpApbKZ6o``s6O@F=pHE2 z^^F|bTv@zVLQnN&zVRJ+rBW&?;-iU#pEQk`HMymIw#|VF1So&J4SG-PZ)2!k{@p(X z=eLbsANX2Y13Js{?f(FYm9IQzyo&q8ULS_`Jt_-h9Mc`l&Tv!P12yaa0I-&`We4rG z@PJPvqs{R{!|06Mh9^?BAFUX8%iBez)qY_wWc5!|`iF?-B|CxnMc6pPg`?9$ zZ%>LAY~UWL{hfcB@-x-Vl-6EMYjg2KQSqg|tEj9Nw%=u830X&F{{U!xdj2(m9IZUk z&3gAY5d|Z8C)WViM!p%+EG=H)Em}`MBg`kKCyL(Em3XFj8XvJ6hGO!mLg(9$@T*=JR#`l@P!FYJc%N8;>&4oShbM`n+L6y_ zTw#F8HFLu{gfO}Wl$ipkU`>93gOZjGGF?z9Cp)84UP7t&v6ML#<=!H&)9uMADvo@oi>qm^)Wwbe+a{OrCRu( zPJk;}4=zlPzP*Y3FQr_FhGi62>IK!kS5ucQ1!ZFAy30A4|;N9LBY%tU_+4)yb9 zCW7$EB#oHK!ztq{ODL+{DwLkBXmBUViHZC9{{RH<@Xv#O3jWKQE}^Jgp(9LLCv|Rw zrg^WKzihwTW5j+q@tx;@J``$C6_c=r=ISX9U)KazXa4{MvG~v6e~TZp#gD_^6Wkk{ z$B5d`43ZyAark1m{{W4Dve$<`418$1(|jvnlR+lSsN#0rus%UdpYzRp&N8fMMpACf z{d!fU9G;q^y|w6ho`>*yZF@@#b8zNMh6M**t=yLCYie(WgTi)g4dSehdHGFy^`FBZ z3q#;L4-n}3BgF){S>nmePmE%_pN2p1Pu*wYCx_N1vDq{epb z&Ii=j&%^7OoI-_1qw0AMSb9{Hq1S&?$Lc-s{8}+m%%7}1MttndUV?T-J?&c z>iU^1_VX_VARvwpZ0EgkejfOe{uZ~JNwdnBo{>wG!f zP$iP}&TbLM@w<-Zq?_AuvFZQy%uz-vJG7V|?YepJX`LexfRyfyl zMH#`7eJj?M9x|rtLOPxedPn_h=k=Eu0RSBU9cgJ0 P0Du9GJ5tx^$5H>;&>U14 literal 0 HcmV?d00001 diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/_static/js/custom.js b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/_static/js/custom.js new file mode 100644 index 0000000000..44a4057dc2 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/_static/js/custom.js @@ -0,0 +1 @@ +var collapsedSections = ['Model zoo']; diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/changelog.md b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/changelog.md new file mode 100644 index 0000000000..c600b16173 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/changelog.md @@ -0,0 +1,403 @@ +# Changelog + +## v0.17.0(29/10/2021) + +### Highlights + +- Support Tokens-to-Token ViT backbone and Res2Net backbone. Welcome to use! +- Support ImageNet21k dataset. +- Add a pipeline visualization tool. Try it with the [tutorials](https://mmclassification.readthedocs.io/en/latest/tools/visualization.html#pipeline-visualization)! + +### New Features + +- Add Tokens-to-Token ViT backbone and converted checkpoints. ([#467](https://github.com/open-mmlab/mmclassification/pull/467)) +- Add Res2Net backbone and converted weights. ([#465](https://github.com/open-mmlab/mmclassification/pull/465)) +- Support ImageNet21k dataset. ([#461](https://github.com/open-mmlab/mmclassification/pull/461)) +- Support seesaw loss. ([#500](https://github.com/open-mmlab/mmclassification/pull/500)) +- Add a pipeline visualization tool. ([#406](https://github.com/open-mmlab/mmclassification/pull/406)) +- Add a tool to find broken files. ([#482](https://github.com/open-mmlab/mmclassification/pull/482)) +- Add a tool to test TorchServe. ([#468](https://github.com/open-mmlab/mmclassification/pull/468)) + +### Improvements + +- Refator Vision Transformer. ([#395](https://github.com/open-mmlab/mmclassification/pull/395)) +- Use context manager to reuse matplotlib figures. ([#432](https://github.com/open-mmlab/mmclassification/pull/432)) + +### Bug Fixes + +- Remove `DistSamplerSeedHook` if use `IterBasedRunner`. ([#501](https://github.com/open-mmlab/mmclassification/pull/501)) +- Set the priority of `EvalHook` to "LOW" to avoid a bug when using `IterBasedRunner`. ([#488](https://github.com/open-mmlab/mmclassification/pull/488)) +- Fix a wrong parameter of `get_root_logger` in `apis/train.py`. ([#486](https://github.com/open-mmlab/mmclassification/pull/486)) +- Fix version check in dataset builder. ([#474](https://github.com/open-mmlab/mmclassification/pull/474)) + +### Docs Update + +- Add English Colab tutorials and update Chinese Colab tutorials. ([#483](https://github.com/open-mmlab/mmclassification/pull/483), [#497](https://github.com/open-mmlab/mmclassification/pull/497)) +- Add tutuorial for config files. ([#487](https://github.com/open-mmlab/mmclassification/pull/487)) +- Add model-pages in Model Zoo. ([#480](https://github.com/open-mmlab/mmclassification/pull/480)) +- Add code-spell pre-commit hook and fix a large mount of typos. ([#470](https://github.com/open-mmlab/mmclassification/pull/470)) + +## v0.16.0(30/9/2021) + +### Highlights + +- We have improved compatibility with downstream repositories like MMDetection and MMSegmentation. We will add some examples about how to use our backbones in MMDetection. +- Add RepVGG backbone and checkpoints. Welcome to use it! +- Add timm backbones wrapper, now you can simply use backbones of pytorch-image-models in MMClassification! + +### New Features + +- Add RepVGG backbone and checkpoints. ([#414](https://github.com/open-mmlab/mmclassification/pull/414)) +- Add timm backbones wrapper. ([#427](https://github.com/open-mmlab/mmclassification/pull/427)) + +### Improvements + +- Fix TnT compatibility and verbose warning. ([#436](https://github.com/open-mmlab/mmclassification/pull/436)) +- Support setting `--out-items` in `tools/test.py`. ([#437](https://github.com/open-mmlab/mmclassification/pull/437)) +- Add datetime info and saving model using torch<1.6 format. ([#439](https://github.com/open-mmlab/mmclassification/pull/439)) +- Improve downstream repositories compatibility. ([#421](https://github.com/open-mmlab/mmclassification/pull/421)) +- Rename the option `--options` to `--cfg-options` in some tools. ([#425](https://github.com/open-mmlab/mmclassification/pull/425)) +- Add PyTorch 1.9 and Python 3.9 build workflow, and remove some CI. ([#422](https://github.com/open-mmlab/mmclassification/pull/422)) + +### Bug Fixes + +- Fix format error in `test.py` when metric returns `np.ndarray`. ([#441](https://github.com/open-mmlab/mmclassification/pull/441)) +- Fix `publish_model` bug if no parent of `out_file`. ([#463](https://github.com/open-mmlab/mmclassification/pull/463)) +- Fix num_classes bug in pytorch2onnx.py. ([#458](https://github.com/open-mmlab/mmclassification/pull/458)) +- Fix missing runtime requirement `packaging`. ([#459](https://github.com/open-mmlab/mmclassification/pull/459)) +- Fix saving simplified model bug in ONNX export tool. ([#438](https://github.com/open-mmlab/mmclassification/pull/438)) + +### Docs Update + +- Update `getting_started.md` and `install.md`. And rewrite `finetune.md`. ([#466](https://github.com/open-mmlab/mmclassification/pull/466)) +- Use PyTorch style docs theme. ([#457](https://github.com/open-mmlab/mmclassification/pull/457)) +- Update metafile and Readme. ([#435](https://github.com/open-mmlab/mmclassification/pull/435)) +- Add `CITATION.cff`. ([#428](https://github.com/open-mmlab/mmclassification/pull/428)) + +## v0.15.0(31/8/2021) + +### Highlights +- Support `hparams` argument in `AutoAugment` and `RandAugment` to provide hyperparameters for sub-policies. +- Support custom squeeze channels in `SELayer`. +- Support classwise weight in losses. + +### New Features + +- Add `hparams` argument in `AutoAugment` and `RandAugment` and some other improvement. ([#398](https://github.com/open-mmlab/mmclassification/pull/398)) +- Support classwise weight in losses. ([#388](https://github.com/open-mmlab/mmclassification/pull/388)) +- Enhance `SELayer` to support custom squeeze channels. ([#417](https://github.com/open-mmlab/mmclassification/pull/417)) + +### Code Refactor + +- Better result visualization. ([#419](https://github.com/open-mmlab/mmclassification/pull/419)) +- Use `post_process` function to handle pred result processing. ([#390](https://github.com/open-mmlab/mmclassification/pull/390)) +- Update `digit_version` function. ([#402](https://github.com/open-mmlab/mmclassification/pull/402)) +- Avoid albumentations to install both opencv and opencv-headless. ([#397](https://github.com/open-mmlab/mmclassification/pull/397)) +- Avoid unnecessary listdir when building ImageNet. ([#396](https://github.com/open-mmlab/mmclassification/pull/396)) +- Use dynamic mmcv download link in TorchServe dockerfile. ([#387](https://github.com/open-mmlab/mmclassification/pull/387)) + +### Docs Improvement + +- Add readme of some algorithms and update meta yml. ([#418](https://github.com/open-mmlab/mmclassification/pull/418)) +- Add Copyright information. ([#413](https://github.com/open-mmlab/mmclassification/pull/413)) +- Fix typo 'metirc'. ([#411](https://github.com/open-mmlab/mmclassification/pull/411)) +- Update QQ group QR code. ([#393](https://github.com/open-mmlab/mmclassification/pull/393)) +- Add PR template and modify issue template. ([#380](https://github.com/open-mmlab/mmclassification/pull/380)) + +## v0.14.0(4/8/2021) + +### Highlights +- Add transformer-in-transformer backbone and pretrain checkpoints, refers to [the paper](https://arxiv.org/abs/2103.00112). +- Add Chinese colab tutorial. +- Provide dockerfile to build mmcls dev docker image. + +### New Features + +- Add transformer in transformer backbone and pretrain checkpoints. ([#339](https://github.com/open-mmlab/mmclassification/pull/339)) +- Support mim, welcome to use mim to manage your mmcls project. ([#376](https://github.com/open-mmlab/mmclassification/pull/376)) +- Add Dockerfile. ([#365](https://github.com/open-mmlab/mmclassification/pull/365)) +- Add ResNeSt configs. ([#332](https://github.com/open-mmlab/mmclassification/pull/332)) + +### Improvements + +- Use the `presistent_works` option if available, to accelerate training. ([#349](https://github.com/open-mmlab/mmclassification/pull/349)) +- Add Chinese ipynb tutorial. ([#306](https://github.com/open-mmlab/mmclassification/pull/306)) +- Refactor unit tests. ([#321](https://github.com/open-mmlab/mmclassification/pull/321)) +- Support to test mmdet inference with mmcls backbone. ([#343](https://github.com/open-mmlab/mmclassification/pull/343)) +- Use zero as default value of `thrs` in metrics. ([#341](https://github.com/open-mmlab/mmclassification/pull/341)) + +### Bug Fixes + +- Fix ImageNet dataset annotation file parse bug. ([#370](https://github.com/open-mmlab/mmclassification/pull/370)) +- Fix docstring typo and init bug in ShuffleNetV1. ([#374](https://github.com/open-mmlab/mmclassification/pull/374)) +- Use local ATTENTION registry to avoid conflict with other repositories. ([#376](https://github.com/open-mmlab/mmclassification/pull/375)) +- Fix swin transformer config bug. ([#355](https://github.com/open-mmlab/mmclassification/pull/355)) +- Fix `patch_cfg` argument bug in SwinTransformer. ([#368](https://github.com/open-mmlab/mmclassification/pull/368)) +- Fix duplicate `init_weights` call in ViT init function. ([#373](https://github.com/open-mmlab/mmclassification/pull/373)) +- Fix broken `_base_` link in a resnet config. ([#361](https://github.com/open-mmlab/mmclassification/pull/361)) +- Fix vgg-19 model link missing. ([#363](https://github.com/open-mmlab/mmclassification/pull/363)) + +## v0.13.0(3/7/2021) + +- Support Swin-Transformer backbone and add training configs for Swin-Transformer on ImageNet. + +### New Features + +- Support Swin-Transformer backbone and add training configs for Swin-Transformer on ImageNet. (#271) +- Add pretained model of RegNetX. (#269) +- Support adding custom hooks in config file. (#305) +- Improve and add Chinese translation of `CONTRIBUTING.md` and all tools tutorials. (#320) +- Dump config before training. (#282) +- Add torchscript and torchserve deployment tools. (#279, #284) + +### Improvements + +- Improve test tools and add some new tools. (#322) +- Correct MobilenetV3 backbone structure and add pretained models. (#291) +- Refactor `PatchEmbed` and `HybridEmbed` as independent components. (#330) +- Refactor mixup and cutmix as `Augments` to support more functions. (#278) +- Refactor weights initialization method. (#270, #318, #319) +- Refactor `LabelSmoothLoss` to support multiple calculation formulas. (#285) + +### Bug Fixes + +- Fix bug for CPU training. (#286) +- Fix missing test data when `num_imgs` can not be evenly divided by `num_gpus`. (#299) +- Fix build compatible with pytorch v1.3-1.5. (#301) +- Fix `magnitude_std` bug in `RandAugment`. (#309) +- Fix bug when `samples_per_gpu` is 1. (#311) + +## v0.12.0(3/6/2021) + +- Finish adding Chinese tutorials and build Chinese documentation on readthedocs. +- Update ResNeXt checkpoints and ResNet checkpoints on CIFAR. + +### New Features + +- Improve and add Chinese translation of `data_pipeline.md` and `new_modules.md`. (#265) +- Build Chinese translation on readthedocs. (#267) +- Add an argument efficientnet_style to `RandomResizedCrop` and `CenterCrop`. (#268) + +### Improvements + +- Only allow directory operation when rank==0 when testing. (#258) +- Fix typo in `base_head`. (#274) +- Update ResNeXt checkpoints. (#283) + +### Bug Fixes + +- Add attribute `data.test` in MNIST configs. (#264) +- Download CIFAR/MNIST dataset only on rank 0. (#273) +- Fix MMCV version compatibility. (#276) +- Fix CIFAR color channels bug and update checkpoints in model zoo. (#280) + +## v0.11.1(21/5/2021) + +- Refine `new_dataset.md` and add Chinese translation of `finture.md`, `new_dataset.md`. + +### New Features + +- Add `dim` argument for `GlobalAveragePooling`. (#236) +- Add random noise to `RandAugment` magnitude. (#240) +- Refine `new_dataset.md` and add Chinese translation of `finture.md`, `new_dataset.md`. (#243) + +### Improvements + +- Refactor arguments passing for Heads. (#239) +- Allow more flexible `magnitude_range` in `RandAugment`. (#249) +- Inherits MMCV registry so that in the future OpenMMLab repos like MMDet and MMSeg could directly use the backbones supported in MMCls. (#252) + +### Bug Fixes + +- Fix typo in `analyze_results.py`. (#237) +- Fix typo in unittests. (#238) +- Check if specified tmpdir exists when testing to avoid deleting existing data. (#242 & #258) +- Add missing config files in `MANIFEST.in`. (#250 & #255) +- Use temporary directory under shared directory to collect results to avoid unavailability of temporary directory for multi-node testing. (#251) + +## v0.11.0(1/5/2021) + +- Support cutmix trick. +- Support random augmentation. +- Add `tools/deployment/test.py` as a ONNX runtime test tool. +- Support ViT backbone and add training configs for ViT on ImageNet. +- Add Chinese `README.md` and some Chinese tutorials. + +### New Features + +- Support cutmix trick. (#198) +- Add `simplify` option in `pytorch2onnx.py`. (#200) +- Support random augmentation. (#201) +- Add config and checkpoint for training ResNet on CIFAR-100. (#208) +- Add `tools/deployment/test.py` as a ONNX runtime test tool. (#212) +- Support ViT backbone and add training configs for ViT on ImageNet. (#214) +- Add finetuning configs for ViT on ImageNet. (#217) +- Add `device` option to support training on CPU. (#219) +- Add Chinese `README.md` and some Chinese tutorials. (#221) +- Add `metafile.yml` in configs to support interaction with paper with code(PWC) and MMCLI. (#225) +- Upload configs and converted checkpoints for ViT fintuning on ImageNet. (#230) + +### Improvements + +- Fix `LabelSmoothLoss` so that label smoothing and mixup could be enabled at the same time. (#203) +- Add `cal_acc` option in `ClsHead`. (#206) +- Check `CLASSES` in checkpoint to avoid unexpected key error. (#207) +- Check mmcv version when importing mmcls to ensure compatibility. (#209) +- Update `CONTRIBUTING.md` to align with that in MMCV. (#210) +- Change tags to html comments in configs README.md. (#226) +- Clean codes in ViT backbone. (#227) +- Reformat `pytorch2onnx.md` tutorial. (#229) +- Update `setup.py` to support MMCLI. (#232) + +### Bug Fixes + +- Fix missing `cutmix_prob` in ViT configs. (#220) +- Fix backend for resize in ResNeXt configs. (#222) + +## v0.10.0(1/4/2021) + +- Support AutoAugmentation +- Add tutorials for installation and usage. + +### New Features + +- Add `Rotate` pipeline for data augmentation. (#167) +- Add `Invert` pipeline for data augmentation. (#168) +- Add `Color` pipeline for data augmentation. (#171) +- Add `Solarize` and `Posterize` pipeline for data augmentation. (#172) +- Support fp16 training. (#178) +- Add tutorials for installation and basic usage of MMClassification.(#176) +- Support `AutoAugmentation`, `AutoContrast`, `Equalize`, `Contrast`, `Brightness` and `Sharpness` pipelines for data augmentation. (#179) + +### Improvements + +- Support dynamic shape export to onnx. (#175) +- Release training configs and update model zoo for fp16 (#184) +- Use MMCV's EvalHook in MMClassification (#182) + +### Bug Fixes + +- Fix wrong naming in vgg config (#181) + +## v0.9.0(1/3/2021) + +- Implement mixup trick. +- Add a new tool to create TensorRT engine from ONNX, run inference and verify outputs in Python. + +### New Features + +- Implement mixup and provide configs of training ResNet50 using mixup. (#160) +- Add `Shear` pipeline for data augmentation. (#163) +- Add `Translate` pipeline for data augmentation. (#165) +- Add `tools/onnx2tensorrt.py` as a tool to create TensorRT engine from ONNX, run inference and verify outputs in Python. (#153) + +### Improvements + +- Add `--eval-options` in `tools/test.py` to support eval options override, matching the behavior of other open-mmlab projects. (#158) +- Support showing and saving painted results in `mmcls.apis.test` and `tools/test.py`, matching the behavior of other open-mmlab projects. (#162) + +### Bug Fixes + +- Fix configs for VGG, replace checkpoints converted from other repos with the ones trained by ourselves and upload the missing logs in the model zoo. (#161) + +## v0.8.0(31/1/2021) + +- Support multi-label task. +- Support more flexible metrics settings. +- Fix bugs. + +### New Features + +- Add evaluation metrics: mAP, CP, CR, CF1, OP, OR, OF1 for multi-label task. (#123) +- Add BCE loss for multi-label task. (#130) +- Add focal loss for multi-label task. (#131) +- Support PASCAL VOC 2007 dataset for multi-label task. (#134) +- Add asymmetric loss for multi-label task. (#132) +- Add analyze_results.py to select images for success/fail demonstration. (#142) +- Support new metric that calculates the total number of occurrences of each label. (#143) +- Support class-wise evaluation results. (#143) +- Add thresholds in eval_metrics. (#146) +- Add heads and a baseline config for multilabel task. (#145) + +### Improvements + +- Remove the models with 0 checkpoint and ignore the repeated papers when counting papers to gain more accurate model statistics. (#135) +- Add tags in README.md. (#137) +- Fix optional issues in docstring. (#138) +- Update stat.py to classify papers. (#139) +- Fix mismatched columns in README.md. (#150) +- Fix test.py to support more evaluation metrics. (#155) + +### Bug Fixes + +- Fix bug in VGG weight_init. (#140) +- Fix bug in 2 ResNet configs in which outdated heads were used. (#147) +- Fix bug of misordered height and width in `RandomCrop` and `RandomResizedCrop`. (#151) +- Fix missing `meta_keys` in `Collect`. (#149 & #152) + +## v0.7.0(31/12/2020) + +- Add more evaluation metrics. +- Fix bugs. + +### New Features + +- Remove installation of MMCV from requirements. (#90) +- Add 3 evaluation metrics: precision, recall and F-1 score. (#93) +- Allow config override during testing and inference with `--options`. (#91 & #96) + +### Improvements + +- Use `build_runner` to make runners more flexible. (#54) +- Support to get category ids in `BaseDataset`. (#72) +- Allow `CLASSES` override during `BaseDateset` initialization. (#85) +- Allow input image as ndarray during inference. (#87) +- Optimize MNIST config. (#98) +- Add config links in model zoo documentation. (#99) +- Use functions from MMCV to collect environment. (#103) +- Refactor config files so that they are now categorized by methods. (#116) +- Add README in config directory. (#117) +- Add model statistics. (#119) +- Refactor documentation in consistency with other MM repositories. (#126) + +### Bug Fixes + +- Add missing `CLASSES` argument to dataset wrappers. (#66) +- Fix slurm evaluation error during training. (#69) +- Resolve error caused by shape in `Accuracy`. (#104) +- Fix bug caused by extremely insufficient data in distributed sampler.(#108) +- Fix bug in `gpu_ids` in distributed training. (#107) +- Fix bug caused by extremely insufficient data in collect results during testing (#114) + +## v0.6.0(11/10/2020) + +- Support new method: ResNeSt and VGG. +- Support new dataset: CIFAR10. +- Provide new tools to do model inference, model conversion from pytorch to onnx. + +### New Features + +- Add model inference. (#16) +- Add pytorch2onnx. (#20) +- Add PIL backend for transform `Resize`. (#21) +- Add ResNeSt. (#25) +- Add VGG and its pretained models. (#27) +- Add CIFAR10 configs and models. (#38) +- Add albumentations transforms. (#45) +- Visualize results on image demo. (#58) + +### Improvements + +- Replace urlretrieve with urlopen in dataset.utils. (#13) +- Resize image according to its short edge. (#22) +- Update ShuffleNet config. (#31) +- Update pre-trained models for shufflenet_v2, shufflenet_v1, se-resnet50, se-resnet101. (#33) + +### Bug Fixes + +- Fix init_weights in `shufflenet_v2.py`. (#29) +- Fix the parameter `size` in test_pipeline. (#30) +- Fix the parameter in cosine lr schedule. (#32) +- Fix the convert tools for mobilenet_v2. (#34) +- Fix crash in CenterCrop transform when image is greyscale (#40) +- Fix outdated configs. (#53) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/community/CONTRIBUTING.md b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/community/CONTRIBUTING.md new file mode 100644 index 0000000000..1044a050f3 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/community/CONTRIBUTING.md @@ -0,0 +1,71 @@ +# Contributing to OpenMMLab + +All kinds of contributions are welcome, including but not limited to the following. + +- Fixes (typo, bugs) +- New features and components + +## Workflow + +1. fork and pull the latest OpenMMLab repository (mmclassification) +2. checkout a new branch (do not use master branch for PRs) +3. commit your changes +4. create a PR + +Note: If you plan to add some new features that involve large changes, it is encouraged to open an issue for discussion first. + +## Code style + +### Python + +We adopt [PEP8](https://www.python.org/dev/peps/pep-0008/) as the preferred code style. + +We use the following tools for linting and formatting: + +- [flake8](http://flake8.pycqa.org/en/latest/): A wrapper around some linter tools. +- [yapf](https://github.com/google/yapf): A formatter for Python files. +- [isort](https://github.com/timothycrosley/isort): A Python utility to sort imports. +- [markdownlint](https://github.com/markdownlint/markdownlint): A linter to check markdown files and flag style issues. +- [docformatter](https://github.com/myint/docformatter): A formatter to format docstring. + +Style configurations of yapf and isort can be found in [setup.cfg](https://github.com/open-mmlab/mmclassification/blob/master/setup.cfg). + +We use [pre-commit hook](https://pre-commit.com/) that checks and formats for `flake8`, `yapf`, `isort`, `trailing whitespaces`, `markdown files`, +fixes `end-of-files`, `double-quoted-strings`, `python-encoding-pragma`, `mixed-line-ending`, sorts `requirments.txt` automatically on every commit. +The config for a pre-commit hook is stored in [.pre-commit-config](https://github.com/open-mmlab/mmclassification/blob/master/.pre-commit-config.yaml). + +After you clone the repository, you will need to install initialize pre-commit hook. + +```shell +pip install -U pre-commit +``` + +From the repository folder + +```shell +pre-commit install +``` + +Try the following steps to install ruby when you encounter an issue on installing markdownlint + +```shell +# install rvm +curl -L https://get.rvm.io | bash -s -- --autolibs=read-fail +[[ -s "$HOME/.rvm/scripts/rvm" ]] && source "$HOME/.rvm/scripts/rvm" +rvm autolibs disable + +# install ruby +rvm install 2.7.1 +``` + +Or refer to [this repo](https://github.com/innerlee/setup) and take [`zzruby.sh`](https://github.com/innerlee/setup/blob/master/zzruby.sh) according its instruction. + +After this on every commit check code linters and formatter will be enforced. + +```{important} +Before you create a PR, make sure that your code lints and is formatted by yapf. +``` + +### C++ and CUDA + +We follow the [Google C++ Style Guide](https://google.github.io/styleguide/cppguide.html). diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/conf.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/conf.py new file mode 100644 index 0000000000..28d2310e14 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/conf.py @@ -0,0 +1,297 @@ +# Configuration file for the Sphinx documentation builder. +# +# This file only contains a selection of the most common options. For a full +# list see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html + +# -- Path setup -------------------------------------------------------------- + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# +import os +import subprocess +import sys + +import pytorch_sphinx_theme +from m2r import MdInclude +from recommonmark.transform import AutoStructify +from sphinx.builders.html import StandaloneHTMLBuilder + +sys.path.insert(0, os.path.abspath('..')) + +# -- Project information ----------------------------------------------------- + +project = 'MMClassification' +copyright = '2020, OpenMMLab' +author = 'MMClassification Authors' +version_file = '../mmcls/version.py' + + +def get_version(): + with open(version_file, 'r') as f: + exec(compile(f.read(), version_file, 'exec')) + return locals()['__version__'] + + +# The full version, including alpha/beta/rc tags +release = get_version() + +# -- General configuration --------------------------------------------------- + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + 'sphinx.ext.autodoc', + 'sphinx.ext.napoleon', + 'sphinx.ext.viewcode', + 'sphinx_markdown_tables', + 'myst_parser', + 'sphinx_copybutton', +] + +autodoc_mock_imports = ['matplotlib', 'mmcls.version', 'mmcv.ops'] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This pattern also affects html_static_path and html_extra_path. +exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] + +# -- Options for HTML output ------------------------------------------------- +source_suffix = { + '.rst': 'restructuredtext', + '.md': 'markdown', +} + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_theme = 'pytorch_sphinx_theme' +html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()] + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +# +html_theme_options = { + # 'logo_url': 'https://mmocr.readthedocs.io/en/latest/', + 'menu': [ + { + 'name': 'GitHub', + 'url': 'https://github.com/open-mmlab/mmclassification' + }, + { + 'name': + 'Colab Tutorials', + 'children': [ + { + 'name': + 'Train and inference with shell commands', + 'url': + 'https://colab.research.google.com/github/' + 'open-mmlab/mmclassification/blob/master/docs/tutorials/' + 'MMClassification_tools.ipynb', + }, + { + 'name': + 'Train and inference with Python APIs', + 'url': + 'https://colab.research.google.com/github/' + 'open-mmlab/mmclassification/blob/master/docs/tutorials/' + 'MMClassification_python.ipynb', + }, + ] + }, + { + 'name': + 'Projects', + 'children': [ + { + 'name': 'MMAction2', + 'url': 'https://github.com/open-mmlab/mmaction2', + }, + { + 'name': 'MMClassification', + 'url': 'https://github.com/open-mmlab/mmclassification', + }, + { + 'name': 'MMDetection', + 'url': 'https://github.com/open-mmlab/mmdetection', + }, + { + 'name': 'MMDetection3D', + 'url': 'https://github.com/open-mmlab/mmdetection3d', + }, + { + 'name': 'MMEditing', + 'url': 'https://github.com/open-mmlab/mmediting', + }, + { + 'name': 'MMGeneration', + 'url': 'https://github.com/open-mmlab/mmgeneration', + }, + { + 'name': 'MMOCR', + 'url': 'https://github.com/open-mmlab/mmocr', + }, + { + 'name': 'MMPose', + 'url': 'https://github.com/open-mmlab/mmpose', + }, + { + 'name': 'MMSegmentation', + 'url': 'https://github.com/open-mmlab/mmsegmentation', + }, + { + 'name': 'MMTracking', + 'url': 'https://github.com/open-mmlab/mmtracking', + }, + ] + }, + { + 'name': + 'OpenMMLab', + 'children': [ + { + 'name': 'Homepage', + 'url': 'https://openmmlab.com/' + }, + { + 'name': 'GitHub', + 'url': 'https://github.com/open-mmlab/' + }, + { + 'name': 'Twitter', + 'url': 'https://twitter.com/OpenMMLab' + }, + { + 'name': 'Zhihu', + 'url': 'https://zhihu.com/people/openmmlab' + }, + ] + }, + ] +} + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] +html_css_files = ['css/readthedocs.css'] +html_js_files = ['js/custom.js'] + +master_doc = 'index' + +# -- Options for HTMLHelp output --------------------------------------------- + +# Output file base name for HTML help builder. +htmlhelp_basename = 'mmclsdoc' + +# -- Options for LaTeX output ------------------------------------------------ + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # + # 'papersize': 'letterpaper', + + # The font size ('10pt', '11pt' or '12pt'). + # + # 'pointsize': '10pt', + + # Additional stuff for the LaTeX preamble. + # + # 'preamble': '', + 'preamble': + r''' +\hypersetup{unicode=true} +\usepackage{CJKutf8} +\DeclareUnicodeCharacter{00A0}{\nobreakspace} +\DeclareUnicodeCharacter{2203}{\ensuremath{\exists}} +\DeclareUnicodeCharacter{2200}{\ensuremath{\forall}} +\DeclareUnicodeCharacter{2286}{\ensuremath{\subseteq}} +\DeclareUnicodeCharacter{2713}{x} +\DeclareUnicodeCharacter{27FA}{\ensuremath{\Longleftrightarrow}} +\DeclareUnicodeCharacter{221A}{\ensuremath{\sqrt{}}} +\DeclareUnicodeCharacter{221B}{\ensuremath{\sqrt[3]{}}} +\DeclareUnicodeCharacter{2295}{\ensuremath{\oplus}} +\DeclareUnicodeCharacter{2297}{\ensuremath{\otimes}} +\begin{CJK}{UTF8}{gbsn} +\AtEndDocument{\end{CJK}} +''', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + (master_doc, 'mmcls.tex', 'MMClassification Documentation', + 'MMClassification Contributors', 'manual'), +] + +# -- Options for manual page output ------------------------------------------ + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [(master_doc, 'mmcls', 'MMClassification Documentation', [author], + 1)] + +# -- Options for Texinfo output ---------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + (master_doc, 'mmcls', 'MMClassification Documentation', author, 'mmcls', + 'One line description of project.', 'Miscellaneous'), +] + +# -- Options for Epub output ------------------------------------------------- + +# Bibliographic Dublin Core info. +epub_title = project + +# The unique identifier of the text. This can be a ISBN number +# or the project homepage. +# +# epub_identifier = '' + +# A unique identification for the text. +# +# epub_uid = '' + +# A list of files that should not be packed into the epub file. +epub_exclude_files = ['search.html'] + +# set priority when building html +StandaloneHTMLBuilder.supported_image_types = [ + 'image/svg+xml', 'image/gif', 'image/png', 'image/jpeg' +] + +# -- Extension configuration ------------------------------------------------- +# Ignore >>> when copying code +copybutton_prompt_text = r'>>> |\.\.\. ' +copybutton_prompt_is_regexp = True + + +def builder_inited_handler(app): + subprocess.run(['./stat.py']) + + +def setup(app): + app.add_config_value('no_underscore_emphasis', False, 'env') + app.add_config_value('m2r_parse_relative_links', False, 'env') + app.add_config_value('m2r_anonymous_references', False, 'env') + app.add_config_value('m2r_disable_inline_math', False, 'env') + app.add_directive('mdinclude', MdInclude) + app.add_config_value('recommonmark_config', { + 'auto_toc_tree_section': 'Contents', + 'enable_eval_rst': True, + }, True) + app.add_transform(AutoStructify) + app.add_js_file('./_static/js/custom.js') + app.connect('builder-inited', builder_inited_handler) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/getting_started.md b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/getting_started.md new file mode 100644 index 0000000000..a416b4d3f3 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/getting_started.md @@ -0,0 +1,232 @@ +# Getting Started + +This page provides basic tutorials about the usage of MMClassification. + +## Prepare datasets + +It is recommended to symlink the dataset root to `$MMCLASSIFICATION/data`. +If your folder structure is different, you may need to change the corresponding paths in config files. + +``` +mmclassification +├── mmcls +├── tools +├── configs +├── docs +├── data +│ ├── imagenet +│ │ ├── meta +│ │ ├── train +│ │ ├── val +│ ├── cifar +│ │ ├── cifar-10-batches-py +│ ├── mnist +│ │ ├── train-images-idx3-ubyte +│ │ ├── train-labels-idx1-ubyte +│ │ ├── t10k-images-idx3-ubyte +│ │ ├── t10k-labels-idx1-ubyte + +``` + +For ImageNet, it has multiple versions, but the most commonly used one is [ILSVRC 2012](http://www.image-net.org/challenges/LSVRC/2012/). It can be accessed with the following steps. + +1. Register an account and login to the [download page](http://www.image-net.org/download-images). +2. Find download links for ILSVRC2012 and download the following two files + - ILSVRC2012_img_train.tar (~138GB) + - ILSVRC2012_img_val.tar (~6.3GB) +3. Untar the downloaded files +4. Download meta data using this [script](https://github.com/BVLC/caffe/blob/master/data/ilsvrc12/get_ilsvrc_aux.sh) + +For MNIST, CIFAR10 and CIFAR100, the datasets will be downloaded and unzipped automatically if they are not found. + +For using custom datasets, please refer to [Tutorials 2: Adding New Dataset](tutorials/new_dataset.md). + +## Inference with pretrained models + +We provide scripts to inference a single image, inference a dataset and test a dataset (e.g., ImageNet). + +### Inference a single image + +```shell +python demo/image_demo.py ${IMAGE_FILE} ${CONFIG_FILE} ${CHECKPOINT_FILE} + +# Example +python demo/image_demo.py demo/demo.JPEG configs/resnet/resnet50_b32x8_imagenet.py \ + https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb32_in1k_20210831-ea4938fc.pth +``` + +### Inference and test a dataset + +- single GPU +- single node multiple GPU +- multiple node + +You can use the following commands to infer a dataset. + +```shell +# single-gpu +python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [--metrics ${METRICS}] [--out ${RESULT_FILE}] + +# multi-gpu +./tools/dist_test.sh ${CONFIG_FILE} ${CHECKPOINT_FILE} ${GPU_NUM} [--metrics ${METRICS}] [--out ${RESULT_FILE}] + +# multi-node in slurm environment +python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [--metrics ${METRICS}] [--out ${RESULT_FILE}] --launcher slurm +``` + +Optional arguments: + +- `RESULT_FILE`: Filename of the output results. If not specified, the results will not be saved to a file. Support formats include json, yaml and pickle. +- `METRICS`:Items to be evaluated on the results, like accuracy, precision, recall, etc. + +Examples: + +Assume that you have already downloaded the checkpoints to the directory `checkpoints/`. +Infer ResNet-50 on ImageNet validation set to get predicted labels and their corresponding predicted scores. + +```shell +python tools/test.py configs/resnet/resnet50_b16x8_cifar10.py \ + https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_b16x8_cifar10_20210528-f54bfad9.pth \ + --out result.pkl +``` + +## Train a model + +MMClassification implements distributed training and non-distributed training, +which uses `MMDistributedDataParallel` and `MMDataParallel` respectively. + +All outputs (log files and checkpoints) will be saved to the working directory, +which is specified by `work_dir` in the config file. + +By default we evaluate the model on the validation set after each epoch, you can change the evaluation interval by adding the interval argument in the training config. + +```python +evaluation = dict(interval=12) # Evaluate the model per 12 epochs. +``` + +### Train with a single GPU + +```shell +python tools/train.py ${CONFIG_FILE} [optional arguments] +``` + +If you want to specify the working directory in the command, you can add an argument `--work_dir ${YOUR_WORK_DIR}`. + +### Train with multiple GPUs + +```shell +./tools/dist_train.sh ${CONFIG_FILE} ${GPU_NUM} [optional arguments] +``` + +Optional arguments are: + +- `--no-validate` (**not suggested**): By default, the codebase will perform evaluation at every k (default value is 1) epochs during the training. To disable this behavior, use `--no-validate`. +- `--work-dir ${WORK_DIR}`: Override the working directory specified in the config file. +- `--resume-from ${CHECKPOINT_FILE}`: Resume from a previous checkpoint file. + +Difference between `resume-from` and `load-from`: +`resume-from` loads both the model weights and optimizer status, and the epoch is also inherited from the specified checkpoint. It is usually used for resuming the training process that is interrupted accidentally. +`load-from` only loads the model weights and the training epoch starts from 0. It is usually used for finetuning. + +### Train with multiple machines + +If you run MMClassification on a cluster managed with [slurm](https://slurm.schedmd.com/), you can use the script `slurm_train.sh`. (This script also supports single machine training.) + +```shell +[GPUS=${GPUS}] ./tools/slurm_train.sh ${PARTITION} ${JOB_NAME} ${CONFIG_FILE} ${WORK_DIR} +``` + +You can check [slurm_train.sh](https://github.com/open-mmlab/mmclassification/blob/master/tools/slurm_train.sh) for full arguments and environment variables. + +If you have just multiple machines connected with ethernet, you can refer to +PyTorch [launch utility](https://pytorch.org/docs/stable/distributed_deprecated.html#launch-utility). +Usually it is slow if you do not have high speed networking like InfiniBand. + +### Launch multiple jobs on a single machine + +If you launch multiple jobs on a single machine, e.g., 2 jobs of 4-GPU training on a machine with 8 GPUs, +you need to specify different ports (29500 by default) for each job to avoid communication conflict. + +If you use `dist_train.sh` to launch training jobs, you can set the port in commands. + +```shell +CUDA_VISIBLE_DEVICES=0,1,2,3 PORT=29500 ./tools/dist_train.sh ${CONFIG_FILE} 4 +CUDA_VISIBLE_DEVICES=4,5,6,7 PORT=29501 ./tools/dist_train.sh ${CONFIG_FILE} 4 +``` + +If you use launch training jobs with Slurm, you need to modify the config files (usually the 6th line from the bottom in config files) to set different communication ports. + +In `config1.py`, + +```python +dist_params = dict(backend='nccl', port=29500) +``` + +In `config2.py`, + +```python +dist_params = dict(backend='nccl', port=29501) +``` + +Then you can launch two jobs with `config1.py` ang `config2.py`. + +```shell +CUDA_VISIBLE_DEVICES=0,1,2,3 GPUS=4 ./tools/slurm_train.sh ${PARTITION} ${JOB_NAME} config1.py ${WORK_DIR} +CUDA_VISIBLE_DEVICES=4,5,6,7 GPUS=4 ./tools/slurm_train.sh ${PARTITION} ${JOB_NAME} config2.py ${WORK_DIR} +``` + +## Useful tools + +We provide lots of useful tools under `tools/` directory. + +### Get the FLOPs and params (experimental) + +We provide a script adapted from [flops-counter.pytorch](https://github.com/sovrasov/flops-counter.pytorch) to compute the FLOPs and params of a given model. + +```shell +python tools/get_flops.py ${CONFIG_FILE} [--shape ${INPUT_SHAPE}] +``` + +You will get the result like this. + +``` +============================== +Input shape: (3, 224, 224) +Flops: 4.12 GFLOPs +Params: 25.56 M +============================== +``` + +```{warning} +This tool is still experimental and we do not guarantee that the number is correct. You may well use the result for simple comparisons, but double check it before you adopt it in technical reports or papers. +- FLOPs are related to the input shape while parameters are not. The default input shape is (1, 3, 224, 224). +- Some operators are not counted into FLOPs like GN and custom operators. Refer to [`mmcv.cnn.get_model_complexity_info()`](https://github.com/open-mmlab/mmcv/blob/master/mmcv/cnn/utils/flops_counter.py) for details. +``` + +### Publish a model + +Before you publish a model, you may want to +1. Convert model weights to CPU tensors. +2. Delete the optimizer states. +3. Compute the hash of the checkpoint file and append the hash id to the filename. + +```shell +python tools/convert_models/publish_model.py ${INPUT_FILENAME} ${OUTPUT_FILENAME} +``` + +E.g., + +```shell +python tools/convert_models/publish_model.py work_dirs/resnet50/latest.pth imagenet_resnet50.pth +``` + +The final output filename will be `imagenet_resnet50_{date}-{hash id}.pth`. + +## Tutorials + +Currently, we provide five tutorials for users. + +- [finetune models](tutorials/finetune.md) +- [add new dataset](tutorials/new_dataset.md) +- [design data pipeline](tutorials/data_pipeline.md) +- [add new modules](tutorials/new_modules.md). diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/install.md b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/install.md new file mode 100644 index 0000000000..b416107499 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/install.md @@ -0,0 +1,142 @@ +# Installation + +## Requirements + +- Python 3.6+ +- PyTorch 1.5+ +- [MMCV](https://github.com/open-mmlab/mmcv) + +The compatible MMClassification and MMCV versions are as below. Please install the correct version of MMCV to avoid installation issues. + +| MMClassification version | MMCV version | +|:------------------------:|:---------------------:| +| master | mmcv>=1.3.16, <=1.5.0 | +| 0.17.0 | mmcv>=1.3.8, <=1.5.0 | +| 0.16.0 | mmcv>=1.3.8, <=1.5.0 | +| 0.15.0 | mmcv>=1.3.8, <=1.5.0 | +| 0.15.0 | mmcv>=1.3.8, <=1.5.0 | +| 0.14.0 | mmcv>=1.3.8, <=1.5.0 | +| 0.13.0 | mmcv>=1.3.8, <=1.5.0 | +| 0.12.0 | mmcv>=1.3.1, <=1.5.0 | +| 0.11.1 | mmcv>=1.3.1, <=1.5.0 | +| 0.11.0 | mmcv>=1.3.0 | +| 0.10.0 | mmcv>=1.3.0 | +| 0.9.0 | mmcv>=1.1.4 | +| 0.8.0 | mmcv>=1.1.4 | +| 0.7.0 | mmcv>=1.1.4 | +| 0.6.0 | mmcv>=1.1.4 | + +```{note} +Since the `master` branch is under frequent development, the `mmcv` +version dependency may be inaccurate. If you encounter problems when using +the `master` branch, please try to update `mmcv` to the latest version. +``` + +## Install MMClassification + +a. Create a conda virtual environment and activate it. + +```shell +conda create -n open-mmlab python=3.8 -y +conda activate open-mmlab +``` + +b. Install PyTorch and torchvision following the [official instructions](https://pytorch.org/), e.g., + +```shell +conda install pytorch torchvision -c pytorch +``` + +```{note} +Make sure that your compilation CUDA version and runtime CUDA version match. +You can check the supported CUDA version for precompiled packages on the +[PyTorch website](https://pytorch.org/). +``` + +`E.g.1` If you have CUDA 10.1 installed under `/usr/local/cuda` and would like to install +PyTorch 1.5.1, you need to install the prebuilt PyTorch with CUDA 10.1. + +```shell +conda install pytorch==1.5.1 torchvision==0.6.1 cudatoolkit=10.1 -c pytorch +``` + +`E.g.2` If you have CUDA 11.3 installed under `/usr/local/cuda` and would like to install +PyTorch 1.10.0., you need to install the prebuilt PyTorch with CUDA 11.3. + +```shell +conda install pytorch==1.10.0 torchvision==0.11.1 cudatoolkit=11.3 -c pytorch +``` + +If you build PyTorch from source instead of installing the prebuilt package, +you can use more CUDA versions such as 9.0. + +c. Install MMClassification repository. + +### Release version + +We recommend you to install MMClassification with [MIM](https://github.com/open-mmlab/mim). + +```shell +pip install git+https://github.com/open-mmlab/mim.git +mim install mmcls +``` + +MIM can automatically install OpenMMLab projects and their requirements, +and it can also help us to train, parameter search and pretrain model download. + +Or, you can install MMClassification with pip: + +```shell +pip install mmcls +``` + +### Develop version + +First, clone the MMClassification repository. + +```shell +git clone https://github.com/open-mmlab/mmclassification.git +cd mmclassification +``` + +And then, install build requirements and install MMClassification. + +```shell +pip install -e . # or "python setup.py develop" +``` + +```{note} +Following above instructions, MMClassification is installed on `dev` mode, +any local modifications made to the code will take effect without the need to +reinstall it (unless you submit some commits and want to update the version +number). +``` + +### Another option: Docker Image + +We provide a [Dockerfile](https://github.com/open-mmlab/mmclassification/blob/master/docker/Dockerfile) to build an image. + +```shell +# build an image with PyTorch 1.6.0, CUDA 10.1, CUDNN 7. +docker build -f ./docker/Dockerfile --rm -t mmcls:torch1.6.0-cuda10.1-cudnn7 . +``` + +```{important} +Make sure you've installed the [nvidia-container-toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#docker). +``` + +Run a container built from mmcls image with command: + +```shell +docker run --gpus all --shm-size=8g -it -v {DATA_DIR}:/workspace/mmclassification/data mmcls:torch1.6.0-cuda10.1-cudnn7 /bin/bash +``` + +## Using multiple MMClassification versions + +The train and test scripts already modify the `PYTHONPATH` to ensure the script use the MMClassification in the current directory. + +To use the default MMClassification installed in the environment rather than that you are working with, you can remove the following line in those scripts + +```shell +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH +``` diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/model_zoo.md b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/model_zoo.md new file mode 100644 index 0000000000..dc4163569d --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/model_zoo.md @@ -0,0 +1,75 @@ +# Model Zoo + +## ImageNet + +ImageNet has multiple versions, but the most commonly used one is [ILSVRC 2012](http://www.image-net.org/challenges/LSVRC/2012/). +The ResNet family models below are trained by standard data augmentations, i.e., RandomResizedCrop, RandomHorizontalFlip and Normalize. + +| Model | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download | +|:---------------------:|:---------:|:--------:|:---------:|:---------:|:---------:|:--------:| +| VGG-11 | 132.86 | 7.63 | 68.75 | 88.87 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/vgg/vgg11_b32x8_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/vgg/vgg11_batch256_imagenet_20210208-4271cd6c.pth) | [log](https://download.openmmlab.com/mmclassification/v0/vgg/vgg11_batch256_imagenet_20210208-4271cd6c.log.json) | +| VGG-13 | 133.05 | 11.34 | 70.02 | 89.46 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/vgg/vgg13_b32x8_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/vgg/vgg13_batch256_imagenet_20210208-4d1d6080.pth) | [log](https://download.openmmlab.com/mmclassification/v0/vgg/vgg13_batch256_imagenet_20210208-4d1d6080.log.json) | +| VGG-16 | 138.36 | 15.5 | 71.62 | 90.49 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/vgg/vgg16_b32x8_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/vgg/vgg16_batch256_imagenet_20210208-db26f1a5.pth) | [log](https://download.openmmlab.com/mmclassification/v0/vgg/vgg16_batch256_imagenet_20210208-db26f1a5.log.json) | +| VGG-19 | 143.67 | 19.67 | 72.41 | 90.80 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/vgg/vgg19_b32x8_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/vgg/vgg19_batch256_imagenet_20210208-e6920e4a.pth) | [log](https://download.openmmlab.com/mmclassification/v0/vgg/vgg19_batch256_imagenet_20210208-e6920e4a.log.json)| +| VGG-11-BN | 132.87 | 7.64 | 70.75 | 90.12 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/vgg/vgg11bn_b32x8_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/vgg/vgg11_bn_batch256_imagenet_20210207-f244902c.pth) | [log](https://download.openmmlab.com/mmclassification/v0/vgg/vgg11_bn_batch256_imagenet_20210207-f244902c.log.json) | +| VGG-13-BN | 133.05 | 11.36 | 72.15 | 90.71 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/vgg/vgg13bn_b32x8_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/vgg/vgg13_bn_batch256_imagenet_20210207-1a8b7864.pth) | [log](https://download.openmmlab.com/mmclassification/v0/vgg/vgg13_bn_batch256_imagenet_20210207-1a8b7864.log.json) | +| VGG-16-BN | 138.37 | 15.53 | 73.72 | 91.68 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/vgg/vgg16_b32x8_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/vgg/vgg16_bn_batch256_imagenet_20210208-7e55cd29.pth) | [log](https://download.openmmlab.com/mmclassification/v0/vgg/vgg16_bn_batch256_imagenet_20210208-7e55cd29.log.json) | +| VGG-19-BN | 143.68 | 19.7 | 74.70 | 92.24 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/vgg/vgg19bn_b32x8_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/vgg/vgg19_bn_batch256_imagenet_20210208-da620c4f.pth) | [log](https://download.openmmlab.com/mmclassification/v0/vgg/vgg19_bn_batch256_imagenet_20210208-da620c4f.log.json)| +| RepVGG-A0\* | 9.11(train) | 8.31 (deploy) | 1.52 (train) | 1.36 (deploy) | 72.41 | 90.50 | [config (train)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/repvgg-A0_4xb64-coslr-120e_in1k.py) | [config (deploy)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/deploy/repvgg-A0_deploy_4xb64-coslr-120e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-A0_3rdparty_4xb64-coslr-120e_in1k_20210909-883ab98c.pth) | [log]() | +| RepVGG-A1\* | 14.09 (train) | 12.79 (deploy) | 2.64 (train) | 2.37 (deploy) | 74.47 | 91.85 | [config (train)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/repvgg-A1_4xb64-coslr-120e_in1k.py) | [config (deploy)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/deploy/repvgg-A1_deploy_4xb64-coslr-120e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-A1_3rdparty_4xb64-coslr-120e_in1k_20210909-24003a24.pth) | [log]() | +| RepVGG-A2\* | 28.21 (train) | 25.5 (deploy) | 5.7 (train) | 5.12 (deploy) | 76.48 | 93.01 | [config (train)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/repvgg-A2_4xb64-coslr-120e_in1k.py) | [config (deploy)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/deploy/repvgg-A2_deploy_4xb64-coslr-120e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-A2_3rdparty_4xb64-coslr-120e_in1k_20210909-97d7695a.pth) | [log]() | +| RepVGG-B0\* | 15.82 (train) | 14.34 (deploy) | 3.42 (train) | 3.06 (deploy) | 75.14 | 92.42 | [config (train)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/repvgg-B0_4xb64-coslr-120e_in1k.py) | [config (deploy)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/deploy/repvgg-B0_deploy_4xb64-coslr-120e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B0_3rdparty_4xb64-coslr-120e_in1k_20210909-446375f4.pth) | [log]() | +| RepVGG-B1\* | 57.42 (train) | 51.83 (deploy) | 13.16 (train) | 11.82 (deploy) | 78.37 | 94.11 | [config (train)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/repvgg-B1_4xb64-coslr-120e_in1k.py) | [config (deploy)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/deploy/repvgg-B1_deploy_4xb64-coslr-120e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B1_3rdparty_4xb64-coslr-120e_in1k_20210909-750cdf67.pth) | [log]() | +| RepVGG-B1g2\* | 45.78 (train) | 41.36 (deploy) | 9.82 (train) | 8.82 (deploy) | 77.79 | 93.88 | [config (train)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/repvgg-B1g2_4xb64-coslr-120e_in1k.py) | [config (deploy)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/deploy/repvgg-B1g2_deploy_4xb64-coslr-120e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B1g2_3rdparty_4xb64-coslr-120e_in1k_20210909-344f6422.pth) | [log]() | +| RepVGG-B1g4\* | 39.97 (train) | 36.13 (deploy) | 8.15 (train) | 7.32 (deploy) | 77.58 | 93.84 | [config (train)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/repvgg-B1g4_4xb64-coslr-120e_in1k.py) | [config (deploy)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/deploy/repvgg-B1g4_deploy_4xb64-coslr-120e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B1g4_3rdparty_4xb64-coslr-120e_in1k_20210909-d4c1a642.pth) | [log]() | +| RepVGG-B2\* | 89.02 (train) | 80.32 (deploy) | 20.46 (train) | 18.39 (deploy) | 78.78 | 94.42 | [config (train)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/repvgg-B2_4xb64-coslr-120e_in1k.py) | [config (deploy)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/deploy/repvgg-B2_deploy_4xb64-coslr-120e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B2_3rdparty_4xb64-coslr-120e_in1k_20210909-bd6b937c.pth) | [log]() | +| RepVGG-B2g4\* | 61.76 (train) | 55.78 (deploy) | 12.63 (train) | 11.34 (deploy) | 79.38 | 94.68 | [config (train)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/repvgg-B2g4_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py) | [config (deploy)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/deploy/repvgg-B2g4_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B2g4_3rdparty_4xb64-autoaug-lbs-mixup-coslr-200e_in1k_20210909-7b7955f0.pth) | [log]() | +| RepVGG-B3\* | 123.09 (train) | 110.96 (deploy) | 29.17 (train) | 26.22 (deploy) | 80.52 | 95.26 | [config (train)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/repvgg-B3_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py) | [config (deploy)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/deploy/repvgg-B3_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B3_3rdparty_4xb64-autoaug-lbs-mixup-coslr-200e_in1k_20210909-dda968bf.pth) | [log]() | +| RepVGG-B3g4\* | 83.83 (train) | 75.63 (deploy) | 17.9 (train) | 16.08 (deploy) | 80.22 | 95.10 | [config (train)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/repvgg-B3g4_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py) | [config (deploy)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/deploy/repvgg-B3g4_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B3g4_3rdparty_4xb64-autoaug-lbs-mixup-coslr-200e_in1k_20210909-4e54846a.pth) | [log]() | +| RepVGG-D2se\* | 133.33 (train) | 120.39 (deploy) | 36.56 (train) | 32.85 (deploy) | 81.81 | 95.94 | [config (train)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/repvgg-D2se_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py) | [config (deploy)](https://github.com/open-mmlab/mmclassification/blob/master/configs/repvgg/deploy/repvgg-D2se_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-D2se_3rdparty_4xb64-autoaug-lbs-mixup-coslr-200e_in1k_20210909-cf3139b7.pth) | [log]() | +| ResNet-18 | 11.69 | 1.82 | 70.07 | 89.44 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet18_b32x8_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet18_batch256_imagenet_20200708-34ab8f90.pth) | [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet18_batch256_imagenet_20200708-34ab8f90.log.json) | +| ResNet-34 | 21.8 | 3.68 | 73.85 | 91.53 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet34_b32x8_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet34_batch256_imagenet_20200708-32ffb4f7.pth) | [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet34_batch256_imagenet_20200708-32ffb4f7.log.json) | +| ResNet-50 | 25.56 | 4.12 | 76.55 | 93.15 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet50_b32x8_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_batch256_imagenet_20200708-cfb998bf.pth) | [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_batch256_imagenet_20200708-cfb998bf.log.json) | +| ResNet-101 | 44.55 | 7.85 | 78.18 | 94.03 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet101_b32x8_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet101_batch256_imagenet_20200708-753f3608.pth) | [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet101_batch256_imagenet_20200708-753f3608.log.json) | +| ResNet-152 | 60.19 | 11.58 | 78.63 | 94.16 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet152_b32x8_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet152_batch256_imagenet_20200708-ec25b1f9.pth) | [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet152_batch256_imagenet_20200708-ec25b1f9.log.json) | +| Res2Net-50-14w-8s\* | 25.06 | 4.22 | 78.14 | 93.85 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/res2net/res2net50-w14-s8_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/res2net/res2net50-w14-s8_3rdparty_8xb32_in1k_20210927-bc967bf1.pth) | [log]()| +| Res2Net-50-26w-8s\* | 48.40 | 8.39 | 79.20 | 94.36 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/res2net/res2net50-w26-s8_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/res2net/res2net50-w26-s8_3rdparty_8xb32_in1k_20210927-f547a94b.pth) | [log]()| +| Res2Net-101-26w-4s\* | 45.21 | 8.12 | 79.19 | 94.44 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/res2net/res2net101-w26-s4_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/res2net/res2net101-w26-s4_3rdparty_8xb32_in1k_20210927-870b6c36.pth) | [log]()| +| ResNeSt-50\* | 27.48 | 5.41 | 81.13 | 95.59 | | [model](https://download.openmmlab.com/mmclassification/v0/resnest/resnest50_imagenet_converted-1ebf0afe.pth) | [log]() | +| ResNeSt-101\* | 48.28 | 10.27 | 82.32 | 96.24 | | [model](https://download.openmmlab.com/mmclassification/v0/resnest/resnest101_imagenet_converted-032caa52.pth) | [log]() | +| ResNeSt-200\* | 70.2 | 17.53 | 82.41 | 96.22 | | [model](https://download.openmmlab.com/mmclassification/v0/resnest/resnest200_imagenet_converted-581a60f2.pth) | [log]() | +| ResNeSt-269\* | 110.93 | 22.58 | 82.70 | 96.28 | | [model](https://download.openmmlab.com/mmclassification/v0/resnest/resnest269_imagenet_converted-59930960.pth) | [log]() | +| ResNetV1D-50 | 25.58 | 4.36 | 77.54 | 93.57 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnetv1d50_b32x8_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1d50_b32x8_imagenet_20210531-db14775a.pth) | [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1d50_b32x8_imagenet_20210531-db14775a.log.json) | +| ResNetV1D-101 | 44.57 | 8.09 | 78.93 | 94.48 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnetv1d101_b32x8_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1d101_b32x8_imagenet_20210531-6e13bcd3.pth) | [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1d101_b32x8_imagenet_20210531-6e13bcd3.log.json) | +| ResNetV1D-152 | 60.21 | 11.82 | 79.41 | 94.7 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnetv1d152_b32x8_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1d152_b32x8_imagenet_20210531-278cf22a.pth) | [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1d152_b32x8_imagenet_20210531-278cf22a.log.json) | +| ResNeXt-32x4d-50 | 25.03 | 4.27 | 77.90 | 93.66 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnext/resnext50_32x4d_b32x8_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnext/resnext50_32x4d_b32x8_imagenet_20210429-56066e27.pth) | [log](https://download.openmmlab.com/mmclassification/v0/resnext/resnext50_32x4d_b32x8_imagenet_20210429-56066e27.log.json) | +| ResNeXt-32x4d-101 | 44.18 | 8.03 | 78.71 | 94.12 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnext/resnext101_32x4d_b32x8_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnext/resnext101_32x4d_b32x8_imagenet_20210506-e0fa3dd5.pth) | [log](https://download.openmmlab.com/mmclassification/v0/resnext/resnext101_32x4d_b32x8_imagenet_20210506-e0fa3dd5.log.json) | +| ResNeXt-32x8d-101 | 88.79 | 16.5 | 79.23 | 94.58 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnext/resnext101_32x8d_b32x8_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnext/resnext101_32x8d_b32x8_imagenet_20210506-23a247d5.pth) | [log](https://download.openmmlab.com/mmclassification/v0/resnext/resnext101_32x8d_b32x8_imagenet_20210506-23a247d5.log.json) | +| ResNeXt-32x4d-152 | 59.95 | 11.8 | 78.93 | 94.41 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnext/resnext152_32x4d_b32x8_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnext/resnext152_32x4d_b32x8_imagenet_20210524-927787be.pth) | [log](https://download.openmmlab.com/mmclassification/v0/resnext/resnext152_32x4d_b32x8_imagenet_20210524-927787be.log.json) | +| SE-ResNet-50 | 28.09 | 4.13 | 77.74 | 93.84 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/seresnet/seresnet50_b32x8_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/se-resnet/se-resnet50_batch256_imagenet_20200804-ae206104.pth) | [log](https://download.openmmlab.com/mmclassification/v0/se-resnet/se-resnet50_batch256_imagenet_20200708-657b3c36.log.json) | +| SE-ResNet-101 | 49.33 | 7.86 | 78.26 | 94.07 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/seresnet/seresnet101_b32x8_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/se-resnet/se-resnet101_batch256_imagenet_20200804-ba5b51d4.pth) | [log](https://download.openmmlab.com/mmclassification/v0/se-resnet/se-resnet101_batch256_imagenet_20200708-038a4d04.log.json) | +| ShuffleNetV1 1.0x (group=3) | 1.87 | 0.146 | 68.13 | 87.81 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/shufflenet_v1/shufflenet_v1_1x_b64x16_linearlr_bn_nowd_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/shufflenet_v1/shufflenet_v1_batch1024_imagenet_20200804-5d6cec73.pth) | [log](https://download.openmmlab.com/mmclassification/v0/shufflenet_v1/shufflenet_v1_batch1024_imagenet_20200804-5d6cec73.log.json) | +| ShuffleNetV2 1.0x | 2.28 | 0.149 | 69.55 | 88.92 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/shufflenet_v2/shufflenet_v2_1x_b64x16_linearlr_bn_nowd_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/shufflenet_v2/shufflenet_v2_batch1024_imagenet_20200812-5bf4721e.pth) | [log](https://download.openmmlab.com/mmclassification/v0/shufflenet_v2/shufflenet_v2_batch1024_imagenet_20200804-8860eec9.log.json) | +| MobileNet V2 | 3.5 | 0.319 | 71.86 | 90.42 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/mobilenet_v2/mobilenet_v2_b32x8_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth) | [log](https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.log.json) | +| ViT-B/16\* | 86.86 | 33.03 | 85.43 | 97.77 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/vision_transformer/vit-base-p16_ft-evalonly_in-1k-384.py) | [model](https://download.openmmlab.com/mmclassification/v0/vit/finetune/vit-base-p16_in21k-pre-3rdparty_in1k-384_20210819-65c4bf44.pth) | [log]() | +| ViT-B/32\* | 88.3 | 8.56 | 84.01 | 97.08 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/vision_transformer/vit-base-p32_ft-evalonly_in-1k-384.py) | [model](https://download.openmmlab.com/mmclassification/v0/vit/finetune/vit-base-p32_in21k-pre-3rdparty_in1k-384_20210819-a56f8886.pth) | [log]() | +| ViT-L/16\* | 304.72 | 116.68 | 85.63 | 97.63 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/vision_transformer/vit-large-p16_ft-evalonly_in-1k-384.py) | [model](https://download.openmmlab.com/mmclassification/v0/vit/finetune/vit-large-p16_in21k-pre-3rdparty_in1k-384_20210819-0bb8550c.pth) | [log]() | +| Swin-Transformer tiny | 28.29 | 4.36 | 81.18 | 95.61 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/swin_transformer/swin_tiny_224_b16x64_300e_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin_tiny_224_b16x64_300e_imagenet_20210616_090925-66df6be6.pth) | [log](https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin_tiny_224_b16x64_300e_imagenet_20210616_090925.log.json)| +| Swin-Transformer small| 49.61 | 8.52 | 83.02 | 96.29 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/swin_transformer/swin_small_224_b16x64_300e_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin_small_224_b16x64_300e_imagenet_20210615_110219-7f9d988b.pth) | [log](https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin_small_224_b16x64_300e_imagenet_20210615_110219.log.json)| +| Swin-Transformer base | 87.77 | 15.14 | 83.36 | 96.44 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/swin_transformer/swin_base_224_b16x64_300e_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin_base_224_b16x64_300e_imagenet_20210616_190742-93230b0d.pth) | [log](https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin_base_224_b16x64_300e_imagenet_20210616_190742.log.json)| +| Transformer in Transformer small\* | 23.76 | 3.36 | 81.52 | 95.73 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/tnt/tnt_s_patch16_224_evalonly_imagenet.py) | [model](https://download.openmmlab.com/mmclassification/v0/tnt/tnt-small-p16_3rdparty_in1k_20210903-c56ee7df.pth) | [log]()| +| T2T-ViT_t-14\* | 21.47 | 4.34 | 81.69 | 95.85 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/t2t_vit/t2t-vit-t-14_8xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/t2t-vit/t2t-vit-t-14_3rdparty_8xb64_in1k_20210928-420df0f6.pth) | [log]()| +| T2T-ViT_t-19\* | 39.08 | 7.80 | 82.43 | 96.08 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/t2t_vit/t2t-vit-t-19_8xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/t2t-vit/t2t-vit-t-19_3rdparty_8xb64_in1k_20210928-e479c2a6.pth) | [log]()| +| T2T-ViT_t-24\* | 64.00 | 12.69 | 82.55 | 96.06 | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/t2t_vit/t2t-vit-t-24_8xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/t2t-vit/t2t-vit-t-24_3rdparty_8xb64_in1k_20210928-b5bf2526.pth) | [log]()| + +Models with * are converted from other repos, others are trained by ourselves. + +## CIFAR10 + +| Model | Params(M) | Flops(G) | Top-1 (%) | Config | Download | +|:---------------------:|:---------:|:--------:|:---------:|:--------:|:--------:| +| ResNet-18-b16x8 | 11.17 | 0.56 | 94.82 | | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet18_b16x8_cifar10.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet18_b16x8_cifar10_20210528-bd6371c8.pth) | [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet18_b16x8_cifar10_20210528-bd6371c8.log.json) | +| ResNet-34-b16x8 | 21.28 | 1.16 | 95.34 | | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet34_b16x8_cifar10.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet34_b16x8_cifar10_20210528-a8aa36a6.pth) | [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet34_b16x8_cifar10_20210528-a8aa36a6.log.json) | +| ResNet-50-b16x8 | 23.52 | 1.31 | 95.55 | | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet50_b16x8_cifar10.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_b16x8_cifar10_20210528-f54bfad9.pth) | [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_b16x8_cifar10_20210528-f54bfad9.log.json) | +| ResNet-101-b16x8 | 42.51 | 2.52 | 95.58 | | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet101_b16x8_cifar10.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet101_b16x8_cifar10_20210528-2d29e936.pth) | [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet101_b16x8_cifar10_20210528-2d29e936.log.json) | +| ResNet-152-b16x8 | 58.16 | 3.74 | 95.76 | | [config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet152_b16x8_cifar10.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet152_b16x8_cifar10_20210528-3e8e9178.pth) | [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet152_b16x8_cifar10_20210528-3e8e9178.log.json) | diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/stat.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/stat.py new file mode 100644 index 0000000000..feee2fe0aa --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/stat.py @@ -0,0 +1,107 @@ +#!/usr/bin/env python +import functools as func +import glob +import os +import re +from pathlib import Path + +import numpy as np + +MMCLS_ROOT = Path(__file__).absolute().parents[1] +url_prefix = 'https://github.com/open-mmlab/mmclassification/blob/master/' + +papers_root = Path('papers') +papers_root.mkdir(exist_ok=True) +files = [Path(f) for f in sorted(glob.glob('../configs/*/README.md'))] + +stats = [] +titles = [] +num_ckpts = 0 +num_configs = 0 + +for f in files: + with open(f, 'r') as content_file: + content = content_file.read() + + # Extract checkpoints + ckpts = set(x.lower().strip() + for x in re.findall(r'\[model\]\((https?.*)\)', content)) + if len(ckpts) == 0: + continue + num_ckpts += len(ckpts) + + # Extract paper title + title = content.split('\n')[0].replace('# ', '').strip() + titles.append(title) + + # Extract paper abbreviation + abbr = [x for x in re.findall(r'', content)] + abbr = abbr[0] if len(abbr) > 0 else title + + # Extract paper type + _papertype = [x for x in re.findall(r'\[([A-Z]+)\]', content)] + assert len(_papertype) > 0 + papertype = _papertype[0] + paper = set([(papertype, title)]) + + # Write a copy of README + copy = papers_root / (f.parent.name + '.md') + if copy.exists(): + os.remove(copy) + + def replace_link(matchobj): + # Replace relative link to GitHub link. + name = matchobj.group(1) + link = matchobj.group(2) + if not link.startswith('http') and (f.parent / link).exists(): + rel_link = (f.parent / link).absolute().relative_to(MMCLS_ROOT) + link = url_prefix + str(rel_link) + return f'[{name}]({link})' + + content = re.sub(r'\[([^\]]+)\]\(([^)]+)\)', replace_link, content) + + with open(copy, 'w') as copy_file: + copy_file.write(content) + + statsmsg = f""" +\t* [{papertype}] [{title}]({copy}) ({len(ckpts)} ckpts) +""" + stats.append( + dict( + paper=paper, ckpts=ckpts, statsmsg=statsmsg, abbr=abbr, copy=copy)) + +allpapers = func.reduce(lambda a, b: a.union(b), + [stat['paper'] for stat in stats]) +msglist = '\n'.join(stat['statsmsg'] for stat in stats) + +papertypes, papercounts = np.unique([t for t, _ in allpapers], + return_counts=True) +countstr = '\n'.join( + [f' - {t}: {c}' for t, c in zip(papertypes, papercounts)]) + +modelzoo = f""" +# Model Zoo Summary + +* Number of papers: {len(set(titles))} +{countstr} + +* Number of checkpoints: {num_ckpts} +{msglist} +""" + +with open('modelzoo_statistics.md', 'w') as f: + f.write(modelzoo) + +toctree = """ +.. toctree:: + :maxdepth: 1 + :caption: Model zoo + :glob: + + modelzoo_statistics.md + model_zoo.md +""" +with open('_model_zoo.rst', 'w') as f: + f.write(toctree) + for stat in stats: + f.write(f' {stat["abbr"]} <{stat["copy"]}>\n') diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/tools/model_serving.md b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/tools/model_serving.md new file mode 100644 index 0000000000..67efe67ec4 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/tools/model_serving.md @@ -0,0 +1,87 @@ +# Model Serving + +In order to serve an `MMClassification` model with [`TorchServe`](https://pytorch.org/serve/), you can follow the steps: + +## 1. Convert model from MMClassification to TorchServe + +```shell +python tools/deployment/mmcls2torchserve.py ${CONFIG_FILE} ${CHECKPOINT_FILE} \ +--output-folder ${MODEL_STORE} \ +--model-name ${MODEL_NAME} +``` + +```{note} +${MODEL_STORE} needs to be an absolute path to a folder. +``` + +Example: + +```shell +python tools/deployment/mmcls2torchserve.py \ + configs/resnet/resnet18_b32x8_imagenet.py \ + checkpoints/resnet18_8xb32_in1k_20210831-fbbb1da6.pth \ + --output-folder ./checkpoints \ + --model-name resnet18_in1k +``` + +## 2. Build `mmcls-serve` docker image + +```shell +docker build -t mmcls-serve:latest docker/serve/ +``` + +## 3. Run `mmcls-serve` + +Check the official docs for [running TorchServe with docker](https://github.com/pytorch/serve/blob/master/docker/README.md#running-torchserve-in-a-production-docker-environment). + +In order to run in GPU, you need to install [nvidia-docker](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html). You can omit the `--gpus` argument in order to run in GPU. + +Example: + +```shell +docker run --rm \ +--cpus 8 \ +--gpus device=0 \ +-p8080:8080 -p8081:8081 -p8082:8082 \ +--mount type=bind,source=`realpath ./checkpoints`,target=/home/model-server/model-store \ +mmcls-serve:latest +``` + +```{note} +`realpath ./checkpoints` points to the absolute path of "./checkpoints", and you can replace it with the absolute path where you store torchserve models. +``` + +[Read the docs](https://github.com/pytorch/serve/blob/master/docs/rest_api.md) about the Inference (8080), Management (8081) and Metrics (8082) APis + +## 4. Test deployment + +```shell +curl http://127.0.0.1:8080/predictions/${MODEL_NAME} -T demo/demo.JPEG +``` + +You should obtain a response similar to: + +```json +{ + "pred_label": 58, + "pred_score": 0.38102269172668457, + "pred_class": "water snake" +} +``` + +And you can use `test_torchserver.py` to compare result of TorchServe and PyTorch, and visualize them. + +```shell +python tools/deployment/test_torchserver.py ${IMAGE_FILE} ${CONFIG_FILE} ${CHECKPOINT_FILE} ${MODEL_NAME} +[--inference-addr ${INFERENCE_ADDR}] [--device ${DEVICE}] +``` + +Example: + +```shell +python tools/deployment/test_torchserver.py \ + demo/demo.JPEG \ + configs/resnet/resnet18_b32x8_imagenet.py \ + checkpoints/resnet18_8xb32_in1k_20210831-fbbb1da6.pth \ + resnet18_in1k +``` diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/tools/onnx2tensorrt.md b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/tools/onnx2tensorrt.md new file mode 100644 index 0000000000..7869dcf24c --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/tools/onnx2tensorrt.md @@ -0,0 +1,80 @@ +# ONNX to TensorRT (Experimental) + + + +- [ONNX to TensorRT (Experimental)](#onnx-to-tensorrt-experimental) + - [How to convert models from ONNX to TensorRT](#how-to-convert-models-from-onnx-to-tensorrt) + - [Prerequisite](#prerequisite) + - [Usage](#usage) + - [List of supported models convertible to TensorRT](#list-of-supported-models-convertible-to-tensorrt) + - [Reminders](#reminders) + - [FAQs](#faqs) + + + +## How to convert models from ONNX to TensorRT + +### Prerequisite + +1. Please refer to [install.md](https://mmclassification.readthedocs.io/en/latest/install.html#install-mmclassification) for installation of MMClassification from source. +2. Use our tool [pytorch2onnx.md](./pytorch2onnx.md) to convert the model from PyTorch to ONNX. + +### Usage + +```bash +python tools/deployment/onnx2tensorrt.py \ + ${MODEL} \ + --trt-file ${TRT_FILE} \ + --shape ${IMAGE_SHAPE} \ + --max-batch-size ${MAX_BATCH_SIZE} \ + --workspace-size ${WORKSPACE_SIZE} \ + --fp16 \ + --show \ + --verify \ +``` + +Description of all arguments: + +- `model` : The path of an ONNX model file. +- `--trt-file`: The Path of output TensorRT engine file. If not specified, it will be set to `tmp.trt`. +- `--shape`: The height and width of model input. If not specified, it will be set to `224 224`. +- `--max-batch-size`: The max batch size of TensorRT model, should not be less than 1. +- `--fp16`: Enable fp16 mode. +- `--workspace-size` : The required GPU workspace size in GiB to build TensorRT engine. If not specified, it will be set to `1` GiB. +- `--show`: Determines whether to show the outputs of the model. If not specified, it will be set to `False`. +- `--verify`: Determines whether to verify the correctness of models between ONNXRuntime and TensorRT. If not specified, it will be set to `False`. + +Example: + +```bash +python tools/deployment/onnx2tensorrt.py \ + checkpoints/resnet/resnet18_b16x8_cifar10.onnx \ + --trt-file checkpoints/resnet/resnet18_b16x8_cifar10.trt \ + --shape 224 224 \ + --show \ + --verify \ +``` + +## List of supported models convertible to TensorRT + +The table below lists the models that are guaranteed to be convertible to TensorRT. + +| Model | Config | Status | +| :----------: | :--------------------------------------------------------------------------: | :----: | +| MobileNetV2 | `configs/mobilenet_v2/mobilenet_v2_b32x8_imagenet.py` | Y | +| ResNet | `configs/resnet/resnet18_b16x8_cifar10.py` | Y | +| ResNeXt | `configs/resnext/resnext50_32x4d_b32x8_imagenet.py` | Y | +| ShuffleNetV1 | `configs/shufflenet_v1/shufflenet_v1_1x_b64x16_linearlr_bn_nowd_imagenet.py` | Y | +| ShuffleNetV2 | `configs/shufflenet_v2/shufflenet_v2_1x_b64x16_linearlr_bn_nowd_imagenet.py` | Y | + +Notes: + +- *All models above are tested with Pytorch==1.6.0 and TensorRT-7.2.1.6.Ubuntu-16.04.x86_64-gnu.cuda-10.2.cudnn8.0* + +## Reminders + +- If you meet any problem with the listed models above, please create an issue and it would be taken care of soon. For models not included in the list, we may not provide much help here due to the limited resources. Please try to dig a little deeper and debug by yourself. + +## FAQs + +- None diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/tools/pytorch2onnx.md b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/tools/pytorch2onnx.md new file mode 100644 index 0000000000..b64aadf41d --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/tools/pytorch2onnx.md @@ -0,0 +1,204 @@ +# Pytorch to ONNX (Experimental) + + + +- [Pytorch to ONNX (Experimental)](#pytorch-to-onnx-experimental) + - [How to convert models from Pytorch to ONNX](#how-to-convert-models-from-pytorch-to-onnx) + - [Prerequisite](#prerequisite) + - [Usage](#usage) + - [Description of all arguments:](#description-of-all-arguments) + - [How to evaluate ONNX models with ONNX Runtime](#how-to-evaluate-onnx-models-with-onnx-runtime) + - [Prerequisite](#prerequisite-1) + - [Usage](#usage-1) + - [Description of all arguments](#description-of-all-arguments-1) + - [Results and Models](#results-and-models) + - [List of supported models exportable to ONNX](#list-of-supported-models-exportable-to-onnx) + - [Reminders](#reminders) + - [FAQs](#faqs) + + + +## How to convert models from Pytorch to ONNX + +### Prerequisite + +1. Please refer to [install](https://mmclassification.readthedocs.io/en/latest/install.html#install-mmclassification) for installation of MMClassification. +2. Install onnx and onnxruntime + + ```shell + pip install onnx onnxruntime==1.5.1 + ``` + +### Usage + +```bash +python tools/deployment/pytorch2onnx.py \ + ${CONFIG_FILE} \ + --checkpoint ${CHECKPOINT_FILE} \ + --output-file ${OUTPUT_FILE} \ + --shape ${IMAGE_SHAPE} \ + --opset-version ${OPSET_VERSION} \ + --dynamic-export \ + --show \ + --simplify \ + --verify \ +``` + +### Description of all arguments: + +- `config` : The path of a model config file. +- `--checkpoint` : The path of a model checkpoint file. +- `--output-file`: The path of output ONNX model. If not specified, it will be set to `tmp.onnx`. +- `--shape`: The height and width of input tensor to the model. If not specified, it will be set to `224 224`. +- `--opset-version` : The opset version of ONNX. If not specified, it will be set to `11`. +- `--dynamic-export` : Determines whether to export ONNX with dynamic input shape and output shapes. If not specified, it will be set to `False`. +- `--show`: Determines whether to print the architecture of the exported model. If not specified, it will be set to `False`. +- `--simplify`: Determines whether to simplify the exported ONNX model. If not specified, it will be set to `False`. +- `--verify`: Determines whether to verify the correctness of an exported model. If not specified, it will be set to `False`. + +Example: + +```bash +python tools/deployment/pytorch2onnx.py \ + configs/resnet/resnet18_b16x8_cifar10.py \ + --checkpoint checkpoints/resnet/resnet18_b16x8_cifar10.pth \ + --output-file checkpoints/resnet/resnet18_b16x8_cifar10.onnx \ + --dynamic-export \ + --show \ + --simplify \ + --verify \ +``` + +## How to evaluate ONNX models with ONNX Runtime + +We prepare a tool `tools/deployment/test.py` to evaluate ONNX models with ONNXRuntime or TensorRT. + +### Prerequisite + +- Install onnx and onnxruntime-gpu + + ```shell + pip install onnx onnxruntime-gpu + ``` + +### Usage + +```bash +python tools/deployment/test.py \ + ${CONFIG_FILE} \ + ${ONNX_FILE} \ + --backend ${BACKEND} \ + --out ${OUTPUT_FILE} \ + --metrics ${EVALUATION_METRICS} \ + --metric-options ${EVALUATION_OPTIONS} \ + --show + --show-dir ${SHOW_DIRECTORY} \ + --cfg-options ${CFG_OPTIONS} \ +``` + +### Description of all arguments + +- `config`: The path of a model config file. +- `model`: The path of a ONNX model file. +- `--backend`: Backend for input model to run and should be `onnxruntime` or `tensorrt`. +- `--out`: The path of output result file in pickle format. +- `--metrics`: Evaluation metrics, which depends on the dataset, e.g., "accuracy", "precision", "recall", "f1_score", "support" for single label dataset, and "mAP", "CP", "CR", "CF1", "OP", "OR", "OF1" for multi-label dataset. +- `--show`: Determines whether to show classifier outputs. If not specified, it will be set to `False`. +- `--show-dir`: Directory where painted images will be saved +- `--metrics-options`: Custom options for evaluation, the key-value pair in `xxx=yyy` format will be kwargs for `dataset.evaluate()` function +- `--cfg-options`: Override some settings in the used config file, the key-value pair in `xxx=yyy` format will be merged into config file. + +### Results and Models + +This part selects ImageNet for onnxruntime verification. ImageNet has multiple versions, but the most commonly used one is [ILSVRC 2012](http://www.image-net.org/challenges/LSVRC/2012/). + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ModelConfigMetricPyTorchONNXRuntimeTensorRT-fp32TensorRT-fp16
ResNetresnet50_b32x8_imagenet.pyTop 1 / 576.55 / 93.1576.49 / 93.2276.49 / 93.2276.50 / 93.20
ResNeXtresnext50_32x4d_b32x8_imagenet.pyTop 1 / 577.90 / 93.6677.90 / 93.6677.90 / 93.6677.89 / 93.65
SE-ResNetseresnet50_b32x8_imagenet.pyTop 1 / 577.74 / 93.8477.74 / 93.8477.74 / 93.8477.74 / 93.85
ShuffleNetV1shufflenet_v1_1x_b64x16_linearlr_bn_nowd_imagenet.pyTop 1 / 568.13 / 87.8168.13 / 87.8168.13 / 87.8168.10 / 87.80
ShuffleNetV2shufflenet_v2_1x_b64x16_linearlr_bn_nowd_imagenet.pyTop 1 / 569.55 / 88.9269.55 / 88.9269.55 / 88.9269.55 / 88.92
MobileNetV2mobilenet_v2_b32x8_imagenet.pyTop 1 / 571.86 / 90.4271.86 / 90.4271.86 / 90.4271.88 / 90.40
+ +## List of supported models exportable to ONNX + +The table below lists the models that are guaranteed to be exportable to ONNX and runnable in ONNX Runtime. + +| Model | Config | Batch Inference | Dynamic Shape | Note | +| :----------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :-------------: | :-----------: | ---- | +| MobileNetV2 | [mobilenet_v2_b32x8_imagenet.py](https://github.com/open-mmlab/mmclassification/tree/master/configs/mobilenet_v2/mobilenet_v2_b32x8_imagenet.py) | Y | Y | | +| ResNet | [resnet18_b16x8_cifar10.py](https://github.com/open-mmlab/mmclassification/tree/master/configs/resnet/resnet18_b16x8_cifar10.py) | Y | Y | | +| ResNeXt | [resnext50_32x4d_b32x8_imagenet.py](https://github.com/open-mmlab/mmclassification/tree/master/configs/resnext/resnext50_32x4d_b32x8_imagenet.py) | Y | Y | | +| SE-ResNet | [seresnet50_b32x8_imagenet.py](https://github.com/open-mmlab/mmclassification/tree/master/configs/seresnet/seresnet50_b32x8_imagenet.py) | Y | Y | | +| ShuffleNetV1 | [shufflenet_v1_1x_b64x16_linearlr_bn_nowd_imagenet.py](https://github.com/open-mmlab/mmclassification/tree/master/configs/shufflenet_v1/shufflenet_v1_1x_b64x16_linearlr_bn_nowd_imagenet.py) | Y | Y | | +| ShuffleNetV2 | [shufflenet_v2_1x_b64x16_linearlr_bn_nowd_imagenet.py](https://github.com/open-mmlab/mmclassification/tree/master/configs/shufflenet_v2/shufflenet_v2_1x_b64x16_linearlr_bn_nowd_imagenet.py) | Y | Y | | + +Notes: + +- *All models above are tested with Pytorch==1.6.0* + +## Reminders + +- If you meet any problem with the listed models above, please create an issue and it would be taken care of soon. For models not included in the list, please try to dig a little deeper and debug a little bit more and hopefully solve them by yourself. + +## FAQs + +- None diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/tools/pytorch2torchscript.md b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/tools/pytorch2torchscript.md new file mode 100644 index 0000000000..cbe8da9c46 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/tools/pytorch2torchscript.md @@ -0,0 +1,56 @@ +# Pytorch to TorchScript (Experimental) + + + +- [Pytorch to TorchScript (Experimental)](#pytorch-to-torchscript-experimental) + - [How to convert models from Pytorch to TorchScript](#how-to-convert-models-from-pytorch-to-torchscript) + - [Usage](#usage) + - [Description of all arguments](#description-of-all-arguments) + - [Reminders](#reminders) + - [FAQs](#faqs) + + + +## How to convert models from Pytorch to TorchScript + +### Usage + +```bash +python tools/deployment/pytorch2torchscript.py \ + ${CONFIG_FILE} \ + --checkpoint ${CHECKPOINT_FILE} \ + --output-file ${OUTPUT_FILE} \ + --shape ${IMAGE_SHAPE} \ + --verify \ +``` + +### Description of all arguments + +- `config` : The path of a model config file. +- `--checkpoint` : The path of a model checkpoint file. +- `--output-file`: The path of output TorchScript model. If not specified, it will be set to `tmp.pt`. +- `--shape`: The height and width of input tensor to the model. If not specified, it will be set to `224 224`. +- `--verify`: Determines whether to verify the correctness of an exported model. If not specified, it will be set to `False`. + +Example: + +```bash +python tools/deployment/pytorch2onnx.py \ + configs/resnet/resnet18_b16x8_cifar10.py \ + --checkpoint checkpoints/resnet/resnet18_b16x8_cifar10.pth \ + --output-file checkpoints/resnet/resnet18_b16x8_cifar10.pt \ + --verify \ +``` + +Notes: + +- *All models are tested with Pytorch==1.8.1* + +## Reminders + +- For torch.jit.is_tracing() is only supported after v1.6. For users with pytorch v1.3-v1.5, we suggest early returning tensors manually. +- If you meet any problem with the models in this repo, please create an issue and it would be taken care of soon. + +## FAQs + +- None diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/tools/visualization.md b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/tools/visualization.md new file mode 100644 index 0000000000..9bf146193d --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/tools/visualization.md @@ -0,0 +1,81 @@ +# Visualization + + + +- [Visualization](#visualization) + - [Pipeline Visualization](#pipeline-visualization) + - [Usage](#usage) + - [FAQs](#faqs) + + +## pipeline visualization + +### Usage + +```bash +python tools/visualizations/vis_pipeline.py \ + ${CONFIG_FILE} \ + --output-dir ${OUTPUT_DIR} \ + --phase ${DATASET_PHASE} \ + --number ${BUNBER_IMAGES_DISPLAY} \ + --skip-type ${SKIP_TRANSFORM_TYPE} + --mode ${DISPLAY_MODE} \ + --show \ + --adaptive \ + --min-edge-length ${MIN_EDGE_LENGTH} \ + --max-edge-length ${MAX_EDGE_LENGTH} \ + --bgr2rgb \ + --window-size ${WINDOW_SIZE} +``` + +**Description of all arguments**: + +- `config` : The path of a model config file. +- `--output-dir`: The output path for visualized images. If not specified, it will be set to `''`, which means not to save. +- `--phase`: Phase of visualizing dataset,must be one of `[train, val, test]`. If not specified, it will be set to `train`. +- `--number`: The number of samples to visualize. If not specified, display all images in the dataset. +- `--skip-type`: The pipelines to be skipped. If not specified, it will be set to `['ToTensor', 'Normalize', 'ImageToTensor', 'Collect']`. +- `--mode`: The display mode, can be one of `[original, pipeline, concat]`. If not specified, it will be set to `concat`. +- `--show`: If set, display pictures in pop-up windows. +- `--adaptive`: If set, automatically adjust the size of the visualization images. +- `--min-edge-length`: The minimum edge length, used when `--adaptive` is set. When any side of the picture is smaller than `${MIN_EDGE_LENGTH}`, the picture will be enlarged while keeping the aspect ratio unchanged, and the short side will be aligned to `${MIN_EDGE_LENGTH}`. If not specified, it will be set to 200. +- `--max-edge-length`: The maximum edge length, used when `--adaptive` is set. When any side of the picture is larger than `${MAX_EDGE_LENGTH}`, the picture will be reduced while keeping the aspect ratio unchanged, and the long side will be aligned to `${MAX_EDGE_LENGTH}`. If not specified, it will be set to 1000. +- `--bgr2rgb`: If set, flip the color channel order of images. +- `--window-size`: The shape of the display window. If not specified, it will be set to `12*7`. If used, it must be in the format `'W*H'`. + +```{note} + +1. If the `--mode` is not specified, it will be set to `concat` as default, get the pictures stitched together by original pictures and transformed pictures; if the `--mode` is set to `original`, get the original pictures; if the `--mode` is set to `pipeline`, get the transformed pictures. + +2. When `--adaptive` option is set, images that are too large or too small will be automatically adjusted, you can use `--min-edge-length` and `--max-edge-length` to set the adjust size. +``` + +**Examples**: + +1. Visualize all the transformed pictures of the `ImageNet` training set and display them in pop-up windows: + +```shell +python ./tools/visualizations/vis_pipeline.py ./configs/resnet/resnet50_b32x8_imagenet.py --show --mode pipeline +``` + +

+ +2. Visualize 10 comparison pictures in the `ImageNet` train set and save them in the `./tmp` folder: + +```shell +python ./tools/visualizations/vis_pipeline.py configs/swin_transformer/swin_base_224_b16x64_300e_imagenet.py --phase train --output-dir tmp --number 10 --adaptive +``` + +
+ +3. Visualize 100 original pictures in the `CIFAR100` validation set, then display and save them in the `./tmp` folder: + +```shell +python ./tools/visualizations/vis_pipeline.py configs/resnet/resnet50_b16x8_cifar100.py --phase val --output-dir tmp --mode original --number 100 --show --adaptive --bgr2rgb +``` + +
+ +## FAQs + +- None diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/tutorials/config.md b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/tutorials/config.md new file mode 100644 index 0000000000..46c4c0257a --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/tutorials/config.md @@ -0,0 +1,403 @@ +# Tutorial 1: Learn about Configs + +MMClassification mainly uses python files as configs. The design of our configuration file system integrates modularity and inheritance, facilitating users to conduct various experiments. All configuration files are placed in the `configs` folder, which mainly contains the primitive configuration folder of `_base_` and many algorithm folders such as `resnet`, `swin_transformer`, `vision_transformer`, etc. + +If you wish to inspect the config file, you may run `python tools/analysis/print_config.py /PATH/TO/CONFIG` to see the complete config. + + + +- [Config File and Checkpoint Naming Convention](#config-file-and-checkpoint-naming-convention) +- [Config File Structure](#config-file-structure) +- [Inherit and Modify Config File](#inherit-and-modify-config-file) + - [Use intermediate variables in configs](#use-intermediate-variables-in-configs) + - [Ignore some fields in the base configs](#ignore-some-fields-in-the-base-configs) + - [Use some fields in the base configs](#use-some-fields-in-the-base-configs) +- [Modify config through script arguments](#modify-config-through-script-arguments) +- [Import user-defined modules](#import-ser-defined-modules) +- [FAQ](#faq) + + + +## Config File and Checkpoint Naming Convention + +We follow the below convention to name config files. Contributors are advised to follow the same style. The config file names are divided into four parts: algorithm info, module information, training information and data information. Logically, different parts are concatenated by underscores `'_'`, and words in the same part are concatenated by dashes `'-'`. + +``` +{algorithm info}_{module info}_{training info}_{data info}.py +``` + +- `algorithm info`:algorithm information, model name and neural network architecture, such as resnet, etc.; +- `module info`: module information is used to represent some special neck, head and pretrain information; +- `training info`:Training information, some training schedule, including batch size, lr schedule, data augment and the like; +- `data info`:Data information, dataset name, input size and so on, such as imagenet, cifar, etc.; + +### Algorithm information +The main algorithm name and the corresponding branch architecture information. E.g: +- `resnet50` +- `mobilenet-v3-large` +- `vit-small-patch32` : `patch32` represents the size of the partition in `ViT` algorithm; +- `seresnext101-32x4d` : `SeResNet101` network structure, `32x4d` means that `groups` and `width_per_group` are 32 and 4 respectively in `Bottleneck`; + +### Module information +Some special `neck`, `head` and `pretrain` information. In classification tasks, `pretrain` information is the most commonly used: +- `in21k-pre` : pre-trained on ImageNet21k; +- `in21k-pre-3rd-party` : pre-trained on ImageNet21k and the checkpoint is converted from a third-party repository; + +### Training information +Training schedule, including training type, `batch size`, `lr schedule`, data augment, special loss functions and so on: +- format `{gpu x batch_per_gpu}`, such as `8xb32` + +Training type (mainly seen in the transformer network, such as the `ViT` algorithm, which is usually divided into two training type: pre-training and fine-tuning): +- `ft` : configuration file for fine-tuning +- `pt` : configuration file for pretraining + +Training recipe. Usually, only the part that is different from the original paper will be marked. These methods will be arranged in the order `{pipeline aug}-{train aug}-{loss trick}-{scheduler}-{epochs}`. +- `coslr-200e` : use cosine scheduler to train 200 epochs +- `autoaug-mixup-lbs-coslr-50e` : use `autoaug`, `mixup`, `label smooth`, `cosine scheduler` to train 50 epochs + +### Data information +- `in1k` : `ImageNet1k` dataset, default to use the input image size of 224x224; +- `in21k` : `ImageNet21k` dataset, also called `ImageNet22k` dataset, default to use the input image size of 224x224; +- `in1k-384px` : Indicates that the input image size is 384x384; +- `cifar100` + +### Config File Name Example + +``` +repvgg-D2se_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py +``` + +- `repvgg-D2se`: Algorithm information + + `repvgg`: The main algorithm. + + `D2se`: The architecture. +- `deploy`: Module information, means the backbone is in the deploy state. +- `4xb64-autoaug-lbs-mixup-coslr-200e`: Training information. + + `4xb64`: Use 4 GPUs and the size of batches per GPU is 64. + + `autoaug`: Use `AutoAugment` in training pipeline. + + `lbs`: Use label smoothing loss. + + `mixup`: Use `mixup` training augment method. + + `coslr`: Use cosine learning rate scheduler. + + `200e`: Train the model for 200 epoches. +- `in1k`: Dataset information. The config is for `ImageNet1k` dataset and the input size is `224x224`. + +```{note} +Some configuration files currently do not follow this naming convention, and related files will be updated in the near future. +``` + +### Checkpoint Naming Convention + +The naming of the weight mainly includes the configuration file name, date and hash value. + +``` +{config_name}_{date}-{hash}.pth +``` + +## Config File Structure + +There are four kinds of basic component file in the `configs/_base_` folders, namely: + +- [models](https://github.com/open-mmlab/mmclassification/tree/master/configs/_base_/models) +- [datasets](https://github.com/open-mmlab/mmclassification/tree/master/configs/_base_/datasets) +- [schedules](https://github.com/open-mmlab/mmclassification/tree/master/configs/_base_/schedules) +- [runtime](https://github.com/open-mmlab/mmclassification/blob/master/configs/_base_/default_runtime.py) + +You can easily build your own training config file by inherit some base config files. And the configs that are composed by components from `_base_` are called _primitive_. + +For easy understanding, we use [ResNet50 primitive config](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet50_b32x8_imagenet.py) as a example and comment the meaning of each line. For more detaile, please refer to the API documentation. + +```python +_base_ = [ + '../_base_/models/resnet50.py', # model + '../_base_/datasets/imagenet_bs32.py', # data + '../_base_/schedules/imagenet_bs256.py', # training schedule + '../_base_/default_runtime.py' # runtime setting +] +``` + +The four parts are explained separately below, and the above-mentioned ResNet50 primitive config are also used as an example. + +### model +The parameter `"model"` is a python dictionary in the configuration file, which mainly includes information such as network structure and loss function: +- `type` : Classifier name, MMCls supports `ImageClassifier`, refer to [API document](https://mmclassification.readthedocs.io/en/latest/api.html#module-mmcls.models.classifiers). +- `backbone` : Backbone configs, refer to [API document](https://mmclassification.readthedocs.io/en/latest/api.html#module-mmcls.models.backbones) for available options. +- `neck` :Neck network name, MMCls supports `GlobalAveragePooling`, please refer to [API document](https://mmclassification.readthedocs.io/en/latest/api.html#module-mmcls.models.necks). +- `head`: Head network name, MMCls supports single-label and multi-label classification head networks, available options refer to [API document](https://mmclassification.readthedocs.io/en/latest/api.html#module-mmcls.models.heads). + - `loss`: Loss function type, supports `CrossEntropyLoss`, [`LabelSmoothLoss`](https://github.com/open-mmlab/mmclassification/blob/master/configs/_base_/models/resnet50_label_smooth.py) etc., For available options, refer to [API Document](https://mmclassification.readthedocs.io/en/latest/api.html#module-mmcls.models.losses). +- `train_cfg` :Training augment config, MMCls supports [`mixup`](https://github.com/open-mmlab/mmclassification/blob/master/configs/_base_/models/resnet50_mixup.py), [`cutmix`](https://github.com/open-mmlab/mmclassification/blob/master/configs/_base_/models/resnet50_cutmix.py) and other augments. + + +```{note} +The 'type' in the configuration file is not a constructed parameter, but a class name. +``` + +```python +model = dict( + type='ImageClassifier', # Classifier name + backbone=dict( + type='ResNet', # Backbones name + depth=50, # depth of backbone, ResNet has options of 18, 34, 50, 101, 152. + num_stages=4, # number of stages,The feature maps generated by these states are used as the input for the subsequent neck and head. + out_indices=(3, ), # The output index of the output feature maps. + frozen_stages=-1, # the stage to be frozen, '-1' means not be forzen + style='pytorch'), # The style of backbone, 'pytorch' means that stride 2 layers are in 3x3 conv, 'caffe' means stride 2 layers are in 1x1 convs. + neck=dict(type='GlobalAveragePooling'), # neck network name + head=dict( + type='LinearClsHead', # linear classification head, + num_classes=1000, # The number of output categories, consistent with the number of categories in the dataset + in_channels=2048, # The number of input channels, consistent with the output channel of the neck + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), # Loss function configuration information + topk=(1, 5), # Evaluation index, Top-k accuracy rate, here is the accuracy rate of top1 and top5 + )) +``` + +### data +The parameter `"data"` is a python dictionary in the configuration file, which mainly includes information to construct dataloader: +- `samples_per_gpu` : the BatchSize of each GPU when building the dataloader +- `workers_per_gpu` : the number of threads per GPU when building dataloader +- `train | val | test` : config to construct dataset + - `type`: Dataset name, MMCls supports `ImageNet`, `Cifar` etc., refer to [API documentation](https://mmclassification.readthedocs.io/en/latest/api.html#module-mmcls.datasets) + - `data_prefix` : Dataset root directory + - `pipeline` : Data processing pipeline, refer to related tutorial documents [CUSTOM DATA PIPELINES](https://mmclassification.readthedocs.io/en/latest/tutorials/data_pipeline.html) + +The parameter `evaluation` is also a dictionary, which is the configuration information of `evaluation hook`, mainly including evaluation interval, evaluation index, etc.. + +```python +# dataset settings +dataset_type = 'ImageNet' # dataset name, +img_norm_cfg = dict( # Image normalization config to normalize the input images + mean=[123.675, 116.28, 103.53], # Mean values used to pre-training the pre-trained backbone models + std=[58.395, 57.12, 57.375], # Standard variance used to pre-training the pre-trained backbone models + to_rgb=True) # Whether to invert the color channel, rgb2bgr or bgr2rgb. +# train data pipeline +train_pipeline = [ + dict(type='LoadImageFromFile'), # First pipeline to load images from file path + dict(type='RandomResizedCrop', size=224), # RandomResizedCrop + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), # Randomly flip the picture horizontally with a probability of 0.5 + dict(type='Normalize', **img_norm_cfg), # normalization + dict(type='ImageToTensor', keys=['img']), # convert image from numpy into torch.Tensor + dict(type='ToTensor', keys=['gt_label']), # convert gt_label into torch.Tensor + dict(type='Collect', keys=['img', 'gt_label']) # Pipeline that decides which keys in the data should be passed to the detector +] +# test data pipeline +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', size=(256, -1)), + dict(type='CenterCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) # do not pass gt_label while testing +] +data = dict( + samples_per_gpu=32, # Batch size of a single GPU + workers_per_gpu=2, # Worker to pre-fetch data for each single GPU + train=dict( # Train dataset config + train=dict( # train data config + type=dataset_type, # dataset name + data_prefix='data/imagenet/train', # Dataset root, when ann_file does not exist, the category information is automatically obtained from the root folder + pipeline=train_pipeline), # train data pipeline + val=dict( # val data config + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', # ann_file existes, the category information is obtained from file + pipeline=test_pipeline), + test=dict( # test data config + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline)) +evaluation = dict( # The config to build the evaluation hook, refer to https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/evaluation/eval_hooks.py#L7 for more details. + interval=1, # Evaluation interval + metric='accuracy') # Metrics used during evaluation +``` + +### training schedule +Mainly include optimizer settings, `optimizer hook` settings, learning rate schedule and `runner` settings: +- `optimizer`: optimizer setting , support all optimizers in `pytorch`, refer to related [mmcv](https://mmcv.readthedocs.io/en/latest/_modules/mmcv/runner/optimizer/default_constructor.html#DefaultOptimizerConstructor) documentation. +- `optimizer_config`: `optimizer hook` configuration file, such as setting gradient limit, refer to related [mmcv](https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/optimizer.py#L8) code. +- `lr_config`: Learning rate scheduler, supports "CosineAnnealing", "Step", "Cyclic", etc. refer to related [mmcv](https://mmcv.readthedocs.io/en/latest/_modules/mmcv/runner/hooks/lr_updater.html#LrUpdaterHook) documentation for more options. +- `runner`: For `runner`, please refer to `mmcv` for [`runner`](https://mmcv.readthedocs.io/en/latest/understand_mmcv/runner.html) introduction document. + +```python +# he configuration file used to build the optimizer, support all optimizers in PyTorch. +optimizer = dict(type='SGD', # Optimizer type + lr=0.1, # Learning rate of optimizers, see detail usages of the parameters in the documentation of PyTorch + momentum=0.9, # Momentum + weight_decay=0.0001) # Weight decay of SGD +# Config used to build the optimizer hook, refer to https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/optimizer.py#L8 for implementation details. +optimizer_config = dict(grad_clip=None) # Most of the methods do not use gradient clip +# Learning rate scheduler config used to register LrUpdater hook +lr_config = dict(policy='step', # The policy of scheduler, also support CosineAnnealing, Cyclic, etc. Refer to details of supported LrUpdater from https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/lr_updater.py#L9. + step=[30, 60, 90]) # Steps to decay the learning rate +runner = dict(type='EpochBasedRunner', # Type of runner to use (i.e. IterBasedRunner or EpochBasedRunner) + max_epochs=100) # Runner that runs the workflow in total max_epochs. For IterBasedRunner use `max_iters` +``` + +### runtime setting + +This part mainly includes saving the checkpoint strategy, log configuration, training parameters, breakpoint weight path, working directory, etc.. + +```python +# Config to set the checkpoint hook, Refer to https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/checkpoint.py for implementation. +checkpoint_config = dict(interval=1) # The save interval is 1 +# config to register logger hook +log_config = dict( + interval=100, # Interval to print the log + hooks=[ + dict(type='TextLoggerHook'), # The Tensorboard logger is also supported + # dict(type='TensorboardLoggerHook') + ]) + +dist_params = dict(backend='nccl') # Parameters to setup distributed training, the port can also be set. +log_level = 'INFO' # The output level of the log. +resume_from = None # Resume checkpoints from a given path, the training will be resumed from the epoch when the checkpoint's is saved. +workflow = [('train', 1)] # Workflow for runner. [('train', 1)] means there is only one workflow and the workflow named 'train' is executed once. +work_dir = 'work_dir' # Directory to save the model checkpoints and logs for the current experiments. +``` + +## Inherit and Modify Config File + +For easy understanding, we recommend contributors to inherit from existing methods. + +For all configs under the same folder, it is recommended to have only **one** _primitive_ config. All other configs should inherit from the _primitive_ config. In this way, the maximum of inheritance level is 3. + +For example, if some modifications are made on the basis of ResNet, the user can first inherit the basic ResNet structure, dataset and other training setting by specifying `_base_ ='../../configs/resnet/resnet50_b32x8_imagenet.py'` Information, and then modify the necessary parameters in the configuration file to complete the inheritance. If you want to change the number of training rounds from 100 to 300 epoches based on the basic resnet50, modify the number of learning rate decay rounds, and modify the data set path at the same time, you can create a new configuration file `configs/resnet/resnet50_8xb32-300e_in1k.py`, file write the following in: + +```python +_base_ = '../../configs/resnet/resnet50_b32x8_imagenet.py' + +runner = dict(max_epochs=300) +lr_config = dict(step=[150, 200, 250]) + +data = dict( + train=dict(data_prefix='mydata/imagenet/train'), + val=dict(data_prefix='mydata/imagenet/train', ), + test=dict(data_prefix='mydata/imagenet/train', ) +) +``` + +### Use intermediate variables in configs + +Some intermediate variables are used in the configuration file. The intermediate variables make the configuration file clearer and easier to modify. + +For example, `train_pipeline` / `test_pipeline` is the intermediate variable of the data pipeline. We first need to define `train_pipeline` / `test_pipeline`, and then pass them to `data`. If you want to modify the size of the input image during training and testing, you need to modify the intermediate variables of `train_pipeline` / `test_pipeline`. + +```python +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', size=384, backend='pillow',), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', size=384, backend='pillow'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) +``` + +### Ignore some fields in the base configs + +Sometimes, you need to set `_delete_=True` to ignore some domain content in the basic configuration file. You can refer to [mmcv](https://mmcv.readthedocs.io/en/latest/understand_mmcv/config.html#inherit-from-base-config-with-ignored-fields) for more instructions. + +The following is an example. If you wangt to use cosine schedule in the above ResNet50 case, just using inheritance and directly modify it will report `get unexcepected keyword'step'` error, because the `'step'` field of the basic config in `lr_config` domain information is reserved, and you need to add `_delete_ =True` to ignore the content of `lr_config` related fields in the basic configuration file: + +```python +_base_ = '../../configs/resnet/resnet50_b32x8_imagenet.py' + +lr_config = dict( + _delete_=True, + policy='CosineAnnealing', + min_lr=0, + warmup='linear', + by_epoch=True, + warmup_iters=5, + warmup_ratio=0.1 +) +``` + +### Use some fields in the base configs + +Sometimes, you may refer to some fields in the `_base_` config, so as to avoid duplication of definitions. You can refer to [mmcv](https://mmcv.readthedocs.io/en/latest/understand_mmcv/config.html#reference-variables-from-base) for some more instructions. + +The following is an example of using auto augment in the training data preprocessing pipeline, refer to [`configs/_base_/datasets/imagenet_bs64_autoaug.py`](https://github.com/open-mmlab/mmclassification/blob/master/configs/_base_/datasets/imagenet_bs64_autoaug.py). When defining `train_pipeline`, just add the definition file name of auto augment to `_base_`, and then use `{{_base_.auto_increasing_policies}}` to reference the variables: + +```python +_base_ = ['./pipelines/auto_aug.py'] + +# dataset settings +dataset_type = 'ImageNet' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', size=224), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='AutoAugment', policies={{_base_.auto_increasing_policies}}), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [...] +data = dict( + samples_per_gpu=64, + workers_per_gpu=2, + train=dict(..., pipeline=train_pipeline), + val=dict(..., pipeline=test_pipeline)) +evaluation = dict(interval=1, metric='accuracy') +``` + +## Modify config through script arguments + +When users use the script "tools/train.py" or "tools/test.py" to submit tasks or use some other tools, they can directly modify the content of the configuration file used by specifying the `--cfg-options` parameter. + +- Update config keys of dict chains. + + The config options can be specified following the order of the dict keys in the original config. + For example, `--cfg-options model.backbone.norm_eval=False` changes the all BN modules in model backbones to `train` mode. + +- Update keys inside a list of configs. + + Some config dicts are composed as a list in your config. For example, the training pipeline `data.train.pipeline` is normally a list + e.g. `[dict(type='LoadImageFromFile'), dict(type='TopDownRandomFlip', flip_prob=0.5), ...]`. If you want to change `'flip_prob=0.5'` to `'flip_prob=0.0'` in the pipeline, + you may specify `--cfg-options data.train.pipeline.1.flip_prob=0.0`. + +- Update values of list/tuples. + + If the value to be updated is a list or a tuple. For example, the config file normally sets `workflow=[('train', 1)]`. If you want to + change this key, you may specify `--cfg-options workflow="[(train,1),(val,1)]"`. Note that the quotation mark \" is necessary to + support list/tuple data types, and that **NO** white space is allowed inside the quotation marks in the specified value. + + +## Import user-defined modules + +```{note} +This part may only be used when using MMClassification as a third party library to build your own project, and beginners can skip it. +``` + +After studying the follow-up tutorials [ADDING NEW DATASET](https://mmclassification.readthedocs.io/en/latest/tutorials/new_dataset.html), [CUSTOM DATA PIPELINES](https://mmclassification.readthedocs.io/en/latest/tutorials/data_pipeline.html), [ADDING NEW MODULES](https://mmclassification.readthedocs.io/en/latest/tutorials/new_modules.html). You may use MMClassification to complete your project and create new classes of datasets, models, data enhancements, etc. in the project. In order to streamline the code, you can use MMClassification as a third-party library, you just need to keep your own extra code and import your own custom module in the configuration files. For examples, you may refer to [OpenMMLab Algorithm Competition Project](https://github.com/zhangrui-wolf/openmmlab-competition-2021) . + +Add the following code to your own configuration files: + +```python +custom_imports = dict( + imports=['your_dataset_class', + 'your_transforme_class', + 'your_model_class', + 'your_module_class'], + allow_failed_imports=False) +``` + +## FAQ +- None diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/tutorials/data_pipeline.md b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/tutorials/data_pipeline.md new file mode 100644 index 0000000000..cb43d70d89 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/tutorials/data_pipeline.md @@ -0,0 +1,148 @@ +# Tutorial 4: Custom Data Pipelines + +## Design of Data pipelines + +Following typical conventions, we use `Dataset` and `DataLoader` for data loading +with multiple workers. Indexing `Dataset` returns a dict of data items corresponding to +the arguments of models forward method. + +The data preparation pipeline and the dataset is decomposed. Usually a dataset +defines how to process the annotations and a data pipeline defines all the steps to prepare a data dict. +A pipeline consists of a sequence of operations. Each operation takes a dict as input and also output a dict for the next transform. + +The operations are categorized into data loading, pre-processing and formatting. + +Here is an pipeline example for ResNet-50 training on ImageNet. + +```python +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', size=224), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', size=256), + dict(type='CenterCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +``` + +For each operation, we list the related dict fields that are added/updated/removed. +At the end of the pipeline, we use `Collect` to only retain the necessary items for forward computation. + +### Data loading + +`LoadImageFromFile` + +- add: img, img_shape, ori_shape + +By default, `LoadImageFromFile` loads images from disk but it may lead to IO bottleneck for efficient small models. +Various backends are supported by mmcv to accelerate this process. For example, if the training machines have setup +[memcached](https://memcached.org/), we can revise the config as follows. + +``` +memcached_root = '/mnt/xxx/memcached_client/' +train_pipeline = [ + dict( + type='LoadImageFromFile', + file_client_args=dict( + backend='memcached', + server_list_cfg=osp.join(memcached_root, 'server_list.conf'), + client_cfg=osp.join(memcached_root, 'client.conf'))), +] +``` + +More supported backends can be found in [mmcv.fileio.FileClient](https://github.com/open-mmlab/mmcv/blob/master/mmcv/fileio/file_client.py). + +### Pre-processing + +`Resize` + +- add: scale, scale_idx, pad_shape, scale_factor, keep_ratio +- update: img, img_shape + +`RandomFlip` + +- add: flip, flip_direction +- update: img + +`RandomCrop` + +- update: img, pad_shape + +`Normalize` + +- add: img_norm_cfg +- update: img + +### Formatting + +`ToTensor` + +- update: specified by `keys`. + +`ImageToTensor` + +- update: specified by `keys`. + +`Collect` + +- remove: all other keys except for those specified by `keys` + +## Extend and use custom pipelines + +1. Write a new pipeline in any file, e.g., `my_pipeline.py`, and place it in + the folder `mmcls/datasets/pipelines/`. The pipeline class needs to override + the `__call__` method which takes a dict as input and returns a dict. + + ```python + from mmcls.datasets import PIPELINES + + @PIPELINES.register_module() + class MyTransform(object): + + def __call__(self, results): + # apply transforms on results['img'] + return results + ``` + +2. Import the new class in `mmcls/datasets/pipelines/__init__.py`. + + ```python + ... + from .my_pipeline import MyTransform + + __all__ = [ + ..., 'MyTransform' + ] + ``` + +3. Use it in config files. + + ```python + img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) + train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', size=224), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='MyTransform'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) + ] + ``` + +## Pipeline visualization + +After designing data pipelines, you can use the [visualization tools](../tools/visualization.md) to view the performance. diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/tutorials/finetune.md b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/tutorials/finetune.md new file mode 100644 index 0000000000..2e0b9155e2 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/tutorials/finetune.md @@ -0,0 +1,237 @@ +# Tutorial 2: Fine-tune Models + +Classification models pre-trained on the ImageNet dataset have been demonstrated to be effective for other datasets and other downstream tasks. +This tutorial provides instructions for users to use the models provided in the [Model Zoo](../model_zoo.md) for other datasets to obtain better performance. + +There are two steps to fine-tune a model on a new dataset. + +- Add support for the new dataset following [Tutorial 2: Adding New Dataset](new_dataset.md). +- Modify the configs as will be discussed in this tutorial. + +Assume we have a ResNet-50 model pre-trained on the ImageNet-2012 dataset and want +to take the fine-tuning on the CIFAR-10 dataset, we need to modify five parts in the +config. + +## Inherit base configs + +At first, create a new config file +`configs/tutorial/resnet50_finetune_cifar.py` to store our configs. Of course, +the path can be customized by yourself. + +To reuse the common parts among different configs, we support inheriting +configs from multiple existing configs. To fine-tune a ResNet-50 model, the new +config needs to inherit `configs/_base_/models/resnet50.py` to build the basic +structure of the model. To use the CIFAR-10 dataset, the new config can also +simply inherit `configs/_base_/datasets/cifar10_bs16.py`. For runtime settings such as +training schedules, the new config needs to inherit +`configs/_base_/default_runtime.py`. + +To inherit all above configs, put the following code at the config file. + +```python +_base_ = [ + '../_base_/models/resnet50.py', + '../_base_/datasets/cifar10_bs16.py', '../_base_/default_runtime.py' +] +``` + +Besides, you can also choose to write the whole contents rather than use inheritance, +like [`configs/lenet/lenet5_mnist.py`](https://github.com/open-mmlab/mmclassification/blob/master/configs/lenet/lenet5_mnist.py). + +## Modify model + +When fine-tuning a model, usually we want to load the pre-trained backbone +weights and train a new classification head. + +To load the pre-trained backbone, we need to change the initialization config +of the backbone and use `Pretrained` initialization function. Besides, in the +`init_cfg`, we use `prefix='backbone'` to tell the initialization +function to remove the prefix of keys in the checkpoint, for example, it will +change `backbone.conv1` to `conv1`. And here we use an online checkpoint, it +will be downloaded during training, you can also download the model manually +and use a local path. + +And then we need to modify the head according to the class numbers of the new +datasets by just changing `num_classes` in the head. + +```python +model = dict( + backbone=dict( + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb32_in1k_20210831-ea4938fc.pth', + prefix='backbone', + )), + head=dict(num_classes=10), +) +``` + +```{tip} +Here we only need to set the part of configs we want to modify, because the +inherited configs will be merged and get the entire configs. +``` + +Sometimes, we want to freeze the first several layers' parameters of the +backbone, that will help the network to keep ability to extract low-level +information learnt from pre-trained model. In MMClassification, you can simply +specify how many layers to freeze by `frozen_stages` argument. For example, to +freeze the first two layers' parameters, just use the following config: + +```python +model = dict( + backbone=dict( + frozen_stages=2, + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb32_in1k_20210831-ea4938fc.pth', + prefix='backbone', + )), + head=dict(num_classes=10), +) +``` + +```{note} +Not all backbones support the `frozen_stages` argument by now. Please check +[the docs](https://mmclassification.readthedocs.io/en/latest/api.html#module-mmcls.models.backbones) +to confirm if your backbone supports it. +``` + + +## Modify dataset + +When fine-tuning on a new dataset, usually we need to modify some dataset +configs. Here, we need to modify the pipeline to resize the image from 32 to +224 to fit the input size of the model pre-trained on ImageNet, and some other +configs. + +```python +img_norm_cfg = dict( + mean=[125.307, 122.961, 113.8575], + std=[51.5865, 50.847, 51.255], + to_rgb=False, +) +train_pipeline = [ + dict(type='RandomCrop', size=32, padding=4), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Resize', size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']), +] +test_pipeline = [ + dict(type='Resize', size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline), +) +``` + +## Modify training schedule + +The fine-tuning hyper parameters vary from the default schedule. It usually +requires smaller learning rate and less training epochs. + +```python +# lr is set for a batch size of 128 +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=None) +# learning policy +lr_config = dict(policy='step', step=[15]) +runner = dict(type='EpochBasedRunner', max_epochs=200) +log_config = dict(interval=100) +``` + +## Start Training + +Now, we have finished the fine-tuning config file as following: + +```python +_base_ = [ + '../_base_/models/resnet50.py', + '../_base_/datasets/cifar10_bs16.py', '../_base_/default_runtime.py' +] + +# Model config +model = dict( + backbone=dict( + frozen_stages=2, + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb32_in1k_20210831-ea4938fc.pth', + prefix='backbone', + )), + head=dict(num_classes=10), +) + +# Dataset config +img_norm_cfg = dict( + mean=[125.307, 122.961, 113.8575], + std=[51.5865, 50.847, 51.255], + to_rgb=False, +) +train_pipeline = [ + dict(type='RandomCrop', size=32, padding=4), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Resize', size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']), +] +test_pipeline = [ + dict(type='Resize', size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline), +) + +# Training schedule config +# lr is set for a batch size of 128 +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=None) +# learning policy +lr_config = dict(policy='step', step=[15]) +runner = dict(type='EpochBasedRunner', max_epochs=200) +log_config = dict(interval=100) +``` + +Here we use 8 GPUs on your computer to train the model with the following +command: + +```shell +bash tools/dist_train.sh configs/tutorial/resnet50_finetune_cifar.py 8 +``` + +Also, you can use only one GPU to train the model with the following command: + +```shell +python tools/train.py configs/tutorial/resnet50_finetune_cifar.py +``` + +But wait, an important config need to be changed if using one GPU. We need to +change the dataset config as following: + +```python +data = dict( + samples_per_gpu=128, + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline), +) +``` + +It's because our training schedule is for a batch size of 128. If using 8 GPUs, +just use `samples_per_gpu=16` config in the base config file, and the total batch +size will be 128. But if using one GPU, you need to change it to 128 manually to +match the training schedule. diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/tutorials/new_dataset.md b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/tutorials/new_dataset.md new file mode 100644 index 0000000000..a5068cedc7 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/tutorials/new_dataset.md @@ -0,0 +1,141 @@ +# Tutorial 3: Adding New Dataset + +## Customize datasets by reorganizing data + +### Reorganize dataset to existing format + +The simplest way is to convert your dataset to existing dataset formats (ImageNet). + +For training, it differentiates classes by folders. The directory of training data is as follows: + +``` +imagenet +├── ... +├── train +│ ├── n01440764 +│ │ ├── n01440764_10026.JPEG +│ │ ├── n01440764_10027.JPEG +│ │ ├── ... +│ ├── ... +│ ├── n15075141 +│ │ ├── n15075141_999.JPEG +│ │ ├── n15075141_9993.JPEG +│ │ ├── ... +``` + +For validation, we provide a annotation list. Each line of the list contrains a filename and its corresponding ground-truth labels. The format is as follows: + +``` +ILSVRC2012_val_00000001.JPEG 65 +ILSVRC2012_val_00000002.JPEG 970 +ILSVRC2012_val_00000003.JPEG 230 +ILSVRC2012_val_00000004.JPEG 809 +ILSVRC2012_val_00000005.JPEG 516 +``` + +Note: The value of ground-truth labels should fall in range `[0, num_classes - 1]`. + +### An example of customized dataset + +You can write a new Dataset class inherited from `BaseDataset`, and overwrite `load_annotations(self)`, +like [CIFAR10](https://github.com/open-mmlab/mmclassification/blob/master/mmcls/datasets/cifar.py) and [ImageNet](https://github.com/open-mmlab/mmclassification/blob/master/mmcls/datasets/imagenet.py). +Typically, this function returns a list, where each sample is a dict, containing necessary data information, e.g., `img` and `gt_label`. + +Assume we are going to implement a `Filelist` dataset, which takes filelists for both training and testing. The format of annotation list is as follows: + +``` +000001.jpg 0 +000002.jpg 1 +``` + +We can create a new dataset in `mmcls/datasets/filelist.py` to load the data. + +```python +import mmcv +import numpy as np + +from .builder import DATASETS +from .base_dataset import BaseDataset + + +@DATASETS.register_module() +class Filelist(BaseDataset): + + def load_annotations(self): + assert isinstance(self.ann_file, str) + + data_infos = [] + with open(self.ann_file) as f: + samples = [x.strip().split(' ') for x in f.readlines()] + for filename, gt_label in samples: + info = {'img_prefix': self.data_prefix} + info['img_info'] = {'filename': filename} + info['gt_label'] = np.array(gt_label, dtype=np.int64) + data_infos.append(info) + return data_infos + +``` + +And add this dataset class in `mmcls/datasets/__init__.py` + +```python +from .base_dataset import BaseDataset +... +from .filelist import Filelist + +__all__ = [ + 'BaseDataset', ... ,'Filelist' +] +``` + +Then in the config, to use `Filelist` you can modify the config as the following + +```python +train = dict( + type='Filelist', + ann_file = 'image_list.txt', + pipeline=train_pipeline +) +``` + +## Customize datasets by mixing dataset + +MMClassification also supports to mix dataset for training. +Currently it supports to concat and repeat datasets. + +### Repeat dataset + +We use `RepeatDataset` as wrapper to repeat the dataset. For example, suppose the original dataset is `Dataset_A`, to repeat it, the config looks like the following + +```python +dataset_A_train = dict( + type='RepeatDataset', + times=N, + dataset=dict( # This is the original config of Dataset_A + type='Dataset_A', + ... + pipeline=train_pipeline + ) + ) +``` + +### Class balanced dataset + +We use `ClassBalancedDataset` as wrapper to repeat the dataset based on category +frequency. The dataset to repeat needs to instantiate function `self.get_cat_ids(idx)` +to support `ClassBalancedDataset`. +For example, to repeat `Dataset_A` with `oversample_thr=1e-3`, the config looks like the following + +```python +dataset_A_train = dict( + type='ClassBalancedDataset', + oversample_thr=1e-3, + dataset=dict( # This is the original config of Dataset_A + type='Dataset_A', + ... + pipeline=train_pipeline + ) + ) +``` + +You may refer to [source code](https://github.com/open-mmlab/mmclassification/tree/master/mmcls/datasets/dataset_wrappers.py) for details. diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/tutorials/new_modules.md b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/tutorials/new_modules.md new file mode 100644 index 0000000000..33bfde569f --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs/tutorials/new_modules.md @@ -0,0 +1,272 @@ +# Tutorial 5: Adding New Modules + +## Develop new components + +We basically categorize model components into 3 types. + +- backbone: usually an feature extraction network, e.g., ResNet, MobileNet. +- neck: the component between backbones and heads, e.g., GlobalAveragePooling. +- head: the component for specific tasks, e.g., classification or regression. + +### Add new backbones + +Here we show how to develop new components with an example of ResNet_CIFAR. +As the input size of CIFAR is 32x32, this backbone replaces the `kernel_size=7, stride=2` to `kernel_size=3, stride=1` and remove the MaxPooling after stem, to avoid forwarding small feature maps to residual blocks. +It inherits from ResNet and only modifies the stem layers. + +1. Create a new file `mmcls/models/backbones/resnet_cifar.py`. + +```python +import torch.nn as nn + +from ..builder import BACKBONES +from .resnet import ResNet + + +@BACKBONES.register_module() +class ResNet_CIFAR(ResNet): + + """ResNet backbone for CIFAR. + + short description of the backbone + + Args: + depth(int): Network depth, from {18, 34, 50, 101, 152}. + ... + """ + + def __init__(self, depth, deep_stem, **kwargs): + # call ResNet init + super(ResNet_CIFAR, self).__init__(depth, deep_stem=deep_stem, **kwargs) + # other specific initialization + assert not self.deep_stem, 'ResNet_CIFAR do not support deep_stem' + + def _make_stem_layer(self, in_channels, base_channels): + # override ResNet method to modify the network structure + self.conv1 = build_conv_layer( + self.conv_cfg, + in_channels, + base_channels, + kernel_size=3, + stride=1, + padding=1, + bias=False) + self.norm1_name, norm1 = build_norm_layer( + self.norm_cfg, base_channels, postfix=1) + self.add_module(self.norm1_name, norm1) + self.relu = nn.ReLU(inplace=True) + + def forward(self, x): # should return a tuple + pass # implementation is ignored + + def init_weights(self, pretrained=None): + pass # override ResNet init_weights if necessary + + def train(self, mode=True): + pass # override ResNet train if necessary +``` + +2. Import the module in `mmcls/models/backbones/__init__.py`. + +```python +... +from .resnet_cifar import ResNet_CIFAR + +__all__ = [ + ..., 'ResNet_CIFAR' +] +``` + +3. Use it in your config file. + +```python +model = dict( + ... + backbone=dict( + type='ResNet_CIFAR', + depth=18, + other_arg=xxx), + ... +``` + +### Add new necks + +Here we take `GlobalAveragePooling` as an example. It is a very simple neck without any arguments. +To add a new neck, we mainly implement the `forward` function, which applies some operation on the output from backbone and forward the results to head. + +1. Create a new file in `mmcls/models/necks/gap.py`. + + ```python + import torch.nn as nn + + from ..builder import NECKS + + @NECKS.register_module() + class GlobalAveragePooling(nn.Module): + + def __init__(self): + self.gap = nn.AdaptiveAvgPool2d((1, 1)) + + def forward(self, inputs): + # we regard inputs as tensor for simplicity + outs = self.gap(inputs) + outs = outs.view(inputs.size(0), -1) + return outs + ``` + +2. Import the module in `mmcls/models/necks/__init__.py`. + + ```python + ... + from .gap import GlobalAveragePooling + + __all__ = [ + ..., 'GlobalAveragePooling' + ] + ``` + +3. Modify the config file. + + ```python + model = dict( + neck=dict(type='GlobalAveragePooling'), + ) + ``` + +### Add new heads + +Here we show how to develop a new head with the example of `LinearClsHead` as the following. +To implement a new head, basically we need to implement `forward_train`, which takes the feature maps from necks or backbones as input and compute loss based on ground-truth labels. + +1. Create a new file in `mmcls/models/heads/linear_head.py`. + + ```python + from ..builder import HEADS + from .cls_head import ClsHead + + + @HEADS.register_module() + class LinearClsHead(ClsHead): + + def __init__(self, + num_classes, + in_channels, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, )): + super(LinearClsHead, self).__init__(loss=loss, topk=topk) + self.in_channels = in_channels + self.num_classes = num_classes + + if self.num_classes <= 0: + raise ValueError( + f'num_classes={num_classes} must be a positive integer') + + self._init_layers() + + def _init_layers(self): + self.fc = nn.Linear(self.in_channels, self.num_classes) + + def init_weights(self): + normal_init(self.fc, mean=0, std=0.01, bias=0) + + def forward_train(self, x, gt_label): + cls_score = self.fc(x) + losses = self.loss(cls_score, gt_label) + return losses + + ``` + +2. Import the module in `mmcls/models/heads/__init__.py`. + + ```python + ... + from .linear_head import LinearClsHead + + __all__ = [ + ..., 'LinearClsHead' + ] + ``` + +3. Modify the config file. + +Together with the added GlobalAveragePooling neck, an entire config for a model is as follows. + +```python +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) + +``` + +### Add new loss + +To add a new loss function, we mainly implement the `forward` function in the loss module. +In addition, it is helpful to leverage the decorator `weighted_loss` to weight the loss for each element. +Assuming that we want to mimic a probabilistic distribution generated from another classification model, we implement a L1Loss to fulfil the purpose as below. + +1. Create a new file in `mmcls/models/losses/l1_loss.py`. + + ```python + import torch + import torch.nn as nn + + from ..builder import LOSSES + from .utils import weighted_loss + + @weighted_loss + def l1_loss(pred, target): + assert pred.size() == target.size() and target.numel() > 0 + loss = torch.abs(pred - target) + return loss + + @LOSSES.register_module() + class L1Loss(nn.Module): + + def __init__(self, reduction='mean', loss_weight=1.0): + super(L1Loss, self).__init__() + self.reduction = reduction + self.loss_weight = loss_weight + + def forward(self, + pred, + target, + weight=None, + avg_factor=None, + reduction_override=None): + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + loss = self.loss_weight * l1_loss( + pred, target, weight, reduction=reduction, avg_factor=avg_factor) + return loss + ``` + +2. Import the module in `mmcls/models/losses/__init__.py`. + + ```python + ... + from .l1_loss import L1Loss, l1_loss + + __all__ = [ + ..., 'L1Loss', 'l1_loss' + ] + ``` + +3. Modify loss field in the config. + + ```python + loss=dict(type='L1Loss', loss_weight=1.0)) + ``` diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/Makefile b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/Makefile new file mode 100644 index 0000000000..d4bb2cbb9e --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/Makefile @@ -0,0 +1,20 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line, and also +# from the environment for the first two. +SPHINXOPTS ?= +SPHINXBUILD ?= sphinx-build +SOURCEDIR = . +BUILDDIR = _build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/_static/css/readthedocs.css b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/_static/css/readthedocs.css new file mode 100644 index 0000000000..0df0969129 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/_static/css/readthedocs.css @@ -0,0 +1,16 @@ +.header-logo { + background-image: url("../image/mmcls-logo.png"); + background-size: 204px 40px; + height: 40px; + width: 204px; +} + +pre { + white-space: pre; +} + +article.pytorch-article .section :not(dt) > code { + padding: .2em .4em; + background-color: #f3f4f7; + border-radius: 5px; +} diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/_static/image/concat.JPEG b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/_static/image/concat.JPEG new file mode 100644 index 0000000000000000000000000000000000000000..e5e69974b839849b5cd0844b06cbfc90f8380f6b GIT binary patch literal 45505 zcmbTdXH-*N)CL$Eh=_^cNX3dY8xmowFl#_G!K6jtJpY!Z}*nikFptHsXMh2iG zM?jz>zzf742kC*19{GFxy&nC096$E=dgA!;W5+p8aB%$3$;ow+gOiJsgX1LkNv>0W z58&Nt?o+4#e)!wT|NiypiDSo3oZ{r*{GTTOpDyhGKzt`bheue)j$8p9ECpb6(F}Q&SxU+!x$BqF)p8zBVT1NuEgO2l^ICuG`F2{Lu zXU;4B7nEKmemyC1r@V>ZqMszG{50Sd*C_!(p^L&&SEa9Amr+qwQ`fkqsdrc3!0?`t z@uSCXvb zDk`gR)it$s_027XB>xhIYbEW#>;CH;nTZ z$?P?ri-~33V%#FKBtp68L*6f=TE08>Mm!;ojo*wJ8=5qp_DJBYj6E=>TY1ihT*p05 zdH5fAARYlmHg(kRY4S2I@HRud2jM>zbk5r5R)i7O*q}L=V(Sl~#2MJ;&4|*_!28t8 zbVG0arMi(lBu67Ldc2KTU_Q9D|Le-wrkzjwhj*uU?iZI&=9tO$G(GvXJH07Om!uvl zj?EiWBj{FCRQp&7;tKV4*;>|l;)_zh-3OrxRpX8i$X21Mk&w-E`b%~nJsN7cxmt2_ zBe&!t1>d#oNc-N|^zn3rdy=LqDPqQJ*ESVOHeFFHmXSk3BRR>GEY@uMN0;6mhKvXl zWL9Al_Q1zNbK1mDkkwJdIdPO58+3B|&w3MWr)R@T zupFM@DRpzV9eE#8JGk zePIRKtwgCY`Q`zUcmD=VJ!?%&`pp1zI9|)lW5~K=)!*ags3u0kd%noFdR5e$eeDMf zFU?=Q(JIBJ_U39|<%Q2;TpHr%mAkout?tWyv<{*oxjxO}vF}H;!@=e)6V~s4dOX0C z$6MZV+@0LSl?)nBxerso)oqz?KMMrcpFu5-vq4474GbfYm2RDdZ0_z~98uUuRU5Jr z<?bFguk63pUL>@9XS38 z*3c=J-mDg3@~+^na_5!ORyoqlckxK_IK_=xN|J&rBcR)kL-CM?`oq6O9DvMj`>ga7E3GyylAC@>M;_E9nZs;g1jk+$%shx|1 zEh>&!>-@(Cy-oc>OWVz1g9zpQLG$wBjD{ObHi+g5|Fgb(XsX=tL220@5veGQkEITi zeo(yo*dTXE6dQC_M`RVrr**rGaVKlv4+j;5Kdv6CD!nYPh@LWXt*PwGu1&kh4f#nQ z6qU>M)rkuYJl2z*F*wLTnN!!>&gT~Xj=wGCo43r|R?Z_|Z7hVYqfLuMkhl&yVG>zf zf_w)#(O^`#5)3(?+lXRbS>Ft?tvs{zL4rU_??gNJYE}PJAQ^id54? z5x+|2j8SC8?sDhwCraQv4xHf|;Zv$50|2#9yLz zkZW4R&qFz=SW6P(Q+-*%xBK-hY{9$yx>qe5DT8j7S8+N%th>-jCwXUZy26%-B9T?oA&REs@LD#v$JOT7e!_SD17CODIq0lV z8^fG&HPTF;zdG${i@1Mihqyv^bsV6X_g^RT95oO5jiJ|BO8&5@s;as;e2fp~3Ub(|n9ubD4py z)=oUt&{Nb1%SO%kkN9hY9633;Y>-Dy%;|2vYf}ytVF9Pr`1_|*!5?Pur9O%*({1qW z%w<}Bghuy;VWKy7h+xS^Zg}WYgx=^G=gqPJ#n)^TK~n7rM`npDeA4u1jP+Vfj4A z)jfc3kR7F8B91WpsQ&Z3LW~P!)=Sk9!VL2KghzVrk=)G7$rrPe%{v~;y?H$$dVo#G z{u}C}^f|A5v7VaX7(^3~6?``fSU|N3OqmHwTVjKV#*?XYZ$I0;Ioq~m$T>cr^otEj z!K_P`SrGqoy3t=Uc%RiVB!3C5NZUuhYmk~NE`79tFr9CL2{Lfs(G4}yl>46ZH*bV^ zGJ6>)J+fgdm}jMbT2JowAS;|kXkbnRElph`>!(!CT~u9#|CFH1At?hGELFM9`T(sb zkRh4aA;>uFq}LZBXM4r%#=~xUR+uf>(ay2fnGVQx8MI08i;`zjerd%)WG@jO83@j{ zbCHI3D7TD{nByH^U`^pNA`6lw6OEjZg0F+(@B9u8;;jlx?50mwwV622XuFv7&kXuL zt3@ivqV~&?#k(?&n!Xc+%~SFAkMN|_HmUxK zc}>(hR_Dsk)y{Uyx%B>mNkQQt*RpKZQsggvxG9(+2%|jtn29-#kmVaCRH!tl53;VR zmICqWL@6^J`{^y!kgRay-Pm0te{lm!tKqih<$v2v9Ume6DPsIPDoBzo zVKl|?kB%7QAD+*k7vpD;JHZ8PP;>{Pr)4{ox5F#&-!qvm#KG9EyjEKYHYk-0Ab@Au z$nNS$H+}e5X@&rp;{Nt{Qm9DBwUY6GvB-DJY|t?oAvS2HV!EOyRn|-E^8CNQKA+DY zolh90J4(0O3QslY(Ury7pv7?=TsGb^nstgsS9L%ZlC0ws47(65Pn7}(Gk)V15UOm@ zYc#qUL_)W23Itp?^Pqi9j_2=suGquaS`J}7ZZukyE4$aVkg(L z(y-dhFOZpPOnJe^e)b>lu?E21i3-2nt1jo|Lb^LP_@vPmtQzkWO`;xUy=wHbRr9oi zf5^E?`7>kFBa2c>FW=wirws#cgj1E(G~%!IdRDP*G>zl_^A%0or`lmdZv%u27-Fda zpkNJTqdnQ6XKWB^>)oGe-q(BiJbRF_&uq|sv0S%&#{$WseelKL+HeZ~*`M$aZ9mKp z8hM9|Wp`eUvv^Md>i9osb7iIj?(Us&HE-rjrliZxuqhP9Imwib9u88z3$A+I1}HzF{cs}0CS<2)kp z?IwyUtrne+^wcFO&R6IBrbJTr3*Z^}{z59m{&&G2Z5iD)+!885aRtWd3mih)rNUa+Aku@MJvWTX z#7>!Hh;)MQ##&Vgr25-0(Y;aEKq!rL{Yzr|^j>`sg3p0*0{CNfd%Dy#0Ggb`UN}B7 zjBP15EnOYEjcYFufB!5c<>o}mfyLb}iMWeL4PF_vZxj__yO33>O}yirGt!%ev)FeZ z5G~=b*Vq!cs*3OIpBX&5_fb_n-4WN^yF6{8Hu@CpVU{ZAX8rL`OAyjrl}ZLQWP_;j zWmE}D3#ms_?)Pa7k6i9eUYZB8DoA)Y(7~c@O0 zgL1cjbCY2m2-9vT&i|#w0^f7@ylV#^oG_B~XlkfYsw!LgBJkU0r-gEp`PEA5BOE+@ zL6Zbe2K;yN+A7imDQ6O)}js)4VQ@B_)DmJNN8BY z@*S~@%ZMu&N*Nj3O5kTaAS-UXoE`s(zqH!U`_7KL<69}}T^Y}uQ^snw7q=>|W6UMt z{#du)YtMgPir|V3hyMakw z^1}_c{c}PGwnPTQzUgKQ zMPXb*L_@lLC9m^0Hk?2UEqi>4hi`Fd;Ezx($(05pcrl02X^MSx+n99=nAdB}?`8HH zrM%l0aS#ZdG?bsa@0Kv59}=+?=6pf(PGPcSr&G5#S)_g7Cu!1K5N!xP#O`5f?so|n zv@C*(&?)#`+j@yE!0Icl zgNhK{7;#l7Czba?tzRApGLvWo#|pLQ$ne8(3d{&>vJfoA+l;xs;X+}#SiLL)MowLri3Sl+PQaM zfA3gwQ?L1S3o>RsN9ed#rlnWF2xflLQY~hKnmy-!S+@lX98OsGD@N~|{CpC%wf5h* zQ#;~CO;u0G)zJ62Hy<@_9e88Ky#>v4C0bK?*&tt6>_z7H20m9Hdtts`f-tZaNY(^OR~6E0p*x|m`s zM3(re@l6Faew&h$YJ;*SMZc}uggman{AaCZJr?;9<58K>9PTmrqRxd^R$(drZjzrB z@1?GH#0Bh?0hX|nV<>{|#0I@7x~EvO$Q(v(YjpxhBP+`d(p0}4r_yM_eajiW-jjOZ zWPR96cQ?+`M8BEn%n~mV4(_9$Hy|%}$P2GK4Qbt`^Mr~M^~k|*c~S_o-mb$Z-p&e^ zPw5AkQ7{;hl5)d$^U0USe%!xnU5EHRrA)1xLkdt2=H1%?%hvnqjnE*&>)YBfZhSQ_ z#s9Ho)OOb;X!qPHeEJXAm`pehHyKfAkoHz(sGoK05KL7}^QQk~)DP5C30M<%a0b>% zE3pk2Ou}Lw{3=8_)6+P)ilA2%rO2(Dzx^W8)_eY!+K7gtLVn_W(t$r>?Uq&-PBE?u zS*`0Xr_R?Y&D3OrO0cL+gyK3xdJNwW25_u4GR*#vx82mF+h1-9j@jvg;t)HUJIN+u z(_=P>>6iOFBZa5SX?-*Vl&Ol_StwGs%SSD;LEWDdI{{G^aoEqQW+(ywle5E+`7H>| zQ?80i@QBbTo!VK0>6~PPHuDU%e`G{Pa5Fj@*V3VH-Qt%B{d)io!MiiU*`VT*aV!Tz zC-ngGXT%enOrPGfVS~b)_=3pp&3FlPqzO~Whz(jU3Fs8F!%@lkZ7@l-`u0LrarX{> z*d(3Vlcji)b^R`-5OINVGi3^*Zur;7q&Qi#K@HeYAecRb-=aq5Af}x_!+R%P=nkBg ze28_{S8E(ul7K7tBbguqtwwAq(iX)cF9N{HnVGGsD1vNyz!C|yv%eiWGsJ;o6+sVQ zI|1retrLzC#;>nZQkNQjhkuGYU09o2M(KwgfC(z*`iPh?XZ;Hgk2oEp--QGBf9p*5 zg>w#;ZS`0=$yJbbq63q5wi92BeuX?z;rqe{l|?lk5+IP7+APHp9dT;Q462ov_qIIi zbZe%_s+!hl2GsWFe0Ik?vt}# zLo!^7SNo<$jhNKDcvn4Y@u|MD+^x7^$Tj3l1%}s$CCVBiQJ3V;sn?LBUY{5@tr02n zCG;;K`JrW}#ayMyWq& zx*!5$@}`$mb)zrGvTA<4d~Yc0>cGav%|?K_H@JIbPhn8ZQ`7g}xE&Jbv7|=Ntnr z{1Gx{dOcp9|H`9YVoYQ~`_jnq@W_V5ErZiwA&#PRo@~(R0?Z;geyFTBR}p{xr8|G? zrt6ZmxT}kQ&_NLzez)Aa$KLBXBqgAWF68)k0kl4T?!eoow2bq_=1*_u@hs)~(5v2$4{SyAvf< zKafk|Gy}5?XL3>5QT3$N33~mUSyagB)D+xgg%C51&y@^p`bo32id_=tD0cmMZZemPW_#2TP+SD-|H69=k=qiLvw^!ibL> zo<|2+tds9T3bwW{RN)c7&d*(&Ak^f%IhN@)eQI-oV`QNHVwy?X*T;tAe_VxW=CJLj z^|k0FeGd|-HF|NsAX7JGLcVb=N0g(dpulf$B z-}=zLZCBRk%50c;qnE};!|lSJN;6OK$6~Wrj}?T|uJQ^^j}wx_Ysqr=pX+7ucsc>B zT*5W2_;7=yXMrN0G0pFmRZ`pZ%%lTW{rQd2Bb;k62H(M)Tl6cWXQRn%PZI56XyqNZ znm0dh5YV1iqb@DnV;(M%h1nsDiT3Sg+ z*4c*17(rxMXdgf4hN^_M(H6ri9PuxI${6S}-eE^H1XT)9-OCSdE-plbsUq8r4KvfM zY~l=YCdj^&m}}TNEmC&OHPGJ^+Xka*bPY`+##lmplQbyVs{~jCW?{%ThrB8Igmb{6 zc0Mv#UY4>#!lgyc!!cYX<&utsIhP{h%hnh(`QR;rilh1o#R!)fBe)_f>Hs}uC|1Cr zTfL}P#iSkYM}FHyZszGIQjJv2l!WqY69GoEIw*Xk#R8WA!Rf)ksej4lZes>5tkUnE zQN(vwO8uDxdB(Pm5zsQF+_JXe7VjU%N(tkK5oEC>(Gpy<-o(I2|Exl>>M#}bv+~TN8r4ivT+4ql%bG1XZq~k92;a# z$L?oY+V_`e8Ob+Lo^{=tNB!k8yN!j-?JA9N&*>d^hODjp4aGiJ8d3c>Azj2j2(g})+OzXq0!k{lyG;8*96nFwzlG+nfrZTS@@!3+w z7MRvF@S)MT4r2EBIu$I)Re!+JL8{PgU`V^V*iv58SJ zq*0Zcb8HsdB<3>b_?fgCuDy`u09}7tig^prWB@9m=6LdM@_t#%G8}6llsk-diXxzl zJ+v_eI$H2JePp`xd*$xd<|h1SX)bd*%_-+XeMhT%66+i)x;g~u9^1W>OF;FoK|hl> zhI(XocO8XzinOPz6UdjohSewB`-~ zi};Ay)ViD%O1)NyH4>I!R%t{h><1m{&LYpRR`d1F{(zpE@U-^tUJA3YqakVM8nRfx z(xaLnE=^68&rCPV`(q?pVaNu_OC}v483C!3+~hO1**7ohK)L8)d&y(oCXVU_=#{YE zR#>!pmLO4bP;s`#q?SQY-{=nuCKGQVXW~t|5aZQWJZrtz-gi`&8mi?Rw_MG>P ziLg|3r}Xv}Hpu#ZJ^6(uG?fR-s7&qIMTS6?txBXUJ7qDSU62)PPuFU7zwO@k_AdS$ zPi!&i<}p{5WpeCjXkquwO>7Rr&y-(4slMIpNCo%GS+qIgE9WD6`2fUBR;ooKY3oZP zLDA7yhZS_%g2F^U7%kEWjiruWNmk4=C*I~*Kz}T0L=;2SAELcWpF$=9vy|b8=NSqx zbyfjFx370Tu|edkXGoUWpM?aCIp#)|rTKT(FZ!J@J01Ex;B$GK^kKc2SMh8JO*H5^ zZdO(;P*a^DnU45MAI(YNrMWF4iwhO7`?8L^CX_I(TEgN-Xal5(p?*bxp|&t zi!2!7DEvZ&wAB}6ep$2EBG48U8I@%*L3!$4v83}Y8tq#iEg5rX;=TP@$un#a-J3r$ z69Ah$Bk;>50AKZ(#s90o*1joZX&rCP0KoAFfgGsp$`nr&@$qgxu@vAtZCUY)hEkEh zv^y?`>nlOd6i%O-vF~oh@8TxL!~EZonQy-l5gb+G3h6P(bBz8MD@KmhRgfH+YuN<- z|8-x#2~@=b#sHF@zy@t;67*$vu`KRYHb@1-nkUc`%0#=FT}Z`WVj7TxcMLX2Vwy06 zM)E^u-4U%$!sT_J8<*co*Y^2U&}6==t#GoA8J&ofho)mlx@=Gk<_VdQz&bhQ07_56 zz{)(yslZJ9KuCQ4&z5nwFVzs57ZfU-MdTLTp}%i!LH{sJVogGhJYg0PT(;j$ADQ~; zU^WNgnrtdC=sHRF`kFHQ9r?pw)RbzyQ&>W6X||IP%Iiy#AW2b8dvTI2Kbr*IzJia%HoV$WPXC&6K+U85W-IP)>*5+R9UC|`x9?V!sT+zk; z@|wF*^>y<)WN0}G8doUmk+G|^=g4W%Shb7Y{J=NN|DAKz3Tm3{cVe=1uC4Ooo$#0y zYuy3)r;U@v-C7?=M`$Lz*iv^wbXDm+kyvXtk$>F6G!s{yki=U{cYN$ z1i4K9aPHvb{Q6V1a@Op;x3K(Ab#;VYQB>@Ur{yVX&rN0R`zW&?`*U;T9_KCRW3IZb z;su7bMs(eBJ1@xoq~FcF&1bKPY~k(&Q_XBh!5R7XlMW6xx6mWAO1~cm#WMqrFk~%| zf}?AdU9g5XyMvkkzWhN1%{==`s(H7M0#VDx9s6MbOPsP(lZgUbdzjYbw1FXxTCNtCT<>mHa7d-^H1JtFnoXr&mf8QM9%|pC|zE2M4qlDX+ z)A5$gbyflyZI8VC{rt@*Rko6#QUWNH)_Kx-qv378@um=U&CJi<^w@XKGGhMihR?*- zm0N;0fv&67L#%k-DKt;m{l-M9gSu(Y@YDSbwllm@hbE!OmP1jpO;1ybcf@_a`Jp%# zZg+cA{p+9A`mWHJ+uEBV?`y8LCPdQ)6N2o8GEc>Rd1m`eP(JUpDI~zy7|N6GHHNLt zpu8EXGJ*#v?A?Zsb{5#9J+=m#8DW6DNb>)%zlheD4kJ@B6K7c!gvtu(2#yc^ALnTfbm`u>e? z@729=>&P=`kz!jpJ%@W=m(3>1*YHE51J}`){V%wFIr1lb{#%&`bjopD@BH{HsoUS} zKXz!8_HNo=f0N(!3-iL|=CU@Y#$K7`$4ps=xt2K>jjVh9QVs5^aGqM>;t)FnG8=IW-jeT--;_%5Zm0=1e8J+yldZb04A*Yjk?#+A^ z$||61&3tju`eGr~+eiT6HL2sud7V+L(|zxCSHj#(Nq~Cyd93bMq^!ZNgU8XPFjsUg zCcuU&YpKfLdalaqoSOf_M@UekzrBT}m}<{Pui&V&=P$f{!KeVSIaO`V$8XD;DRs|t z9@dciLX{~5IBg{z)tZjpKi;!M6igw-ey>kraNnZBj}0=xU7d2Er^I2}37>@;OiJ|q zFFc}!K0J0&;u!cX;y+|l4Mc1M$U8-Y&l21VhF#Dw-t`abc5sn8d+)02myY$Ba@R0; zr|{b?6TbI#UaWMkS}1PYVFA`Sw5a3ss#6xK{Lo+jg7sFkkh%T^(BIQ~u9qQdmNVu> zsGfe?@W*8u-26k!7`Y+wjku|bkVQ7N??z@&x=4V7blru_4coFos~r|t%-rKCo6Sjt z$4YaNU&XSu&ZXEUfq?C(k=<@4aT^)Isz{T6b5lA=Fj+bkNWB}`*zr=5_gfKud2|$0 z(2=9j`-^9|s9>U#k@f_pA!U364;=-1#xPZ}zugc8<<|rBCKIB>hMBV?TA%Wn69H_{ z_$`^?(vb}LW+#1~Rv7qjnzsDV5lZ!2oBoX(Y9J`HY`bm8-qiDyHc*g0N$$*m!b)8K zas(2YEb(-`{gI=QRA%WiK*ijjE2*vWeQJ-=v@JWwyN~hQMGg;_a*X`851m!EZ_l#c zMlOC%uA4y}p(f4o#>l}rXSCIJZHI!m;I*Zt*?)23U{uRc$L~fnFD*;52*6Rjc{VW5w8ZspD@dYP zb#1I7d>z0x0gO|V5?q$9?h%YR%etoLUP2wYM;M_wxrQT|$|yD{!J0Ty4R%BQZbk0@ z#?uvM8{}n-g9c%^J*8wz!L2rlIcPM5q^~%-ZBU7%-p6g9^%i&EViYQ}dPM_}<1#cD z>(p&PmgVRf_vT0!8<)G(++&0G93zmHO^@=f4d(H?MdW>3dHmV`q3;$2 zavZ_KFo|O6!dve%p?^&bM zI<8#OTMWXX=U-ZJ`<;_uQYUUG7br7WPG#(26I%P?trn^xQG?&9Yky>ly4J?Qrf^rX zCrLk}Est8Hy@TwA(w4=ZCn7qL;0AjV0FyjSIh!=!9z<_Jkn+E3wk3hS!oX<&2U&tu z?8j}Ng9nn`N;C^TrKlkr0EC~POwR!Zh)x1i)HkMQ!90$h<*Z!;x*?GAxxauml7$OE z4bO*1nhCzUsS!g(`YHx9RiTAcOYd)?XejrzH<%PZ<=Ej)ugylYAWFus@5j;~-9vTu zNtR=}0IVCxs({c|p%u`o8pb)^S;h7=#Rg{*K9r$cW<>9Fi6Qn>)+Vt4W9Zgoj4<;E zu7tSh3IHNW_aP)gVLxB99285lcjdMq>z|Vsp54+>KB2B-(owG?bHSraCOfq=%lga8 z^q+w+^$-lu&sbQH-@5LIyenEv(E{a)*%|!BTu6?0mV@{tTvU_?aGE;nFO6E3uqNAG ztie;3M={dEDl&aT#rvl^v!cYhU@0B9=4Ea1y<(Zeiw!V;psYc?Cy7;W%{6DnLEW+c zq=Mx3vPOww))BuHQqNL&;at znx~eEEZl^wAZ*vDk> zAy2)Y_YxGb-jiaBT*NQ~i-4sabFzeJ74Q$ajsAxK%-F%xPSRoWXN;Z{cGy4?7s)2` z-rOind9~>t)Ik7?f+qlz?6j9Kz*#c*O=b-#Zv1~XAIz)1Xubox^^=$k-}<0oFEn9)w& zwqnn20q3c7O#k8qb7+3t-&E}X4JKpzjT!5J5QoTL(ri%eHfz2(8jv9X)4gl>fR{N3 zf&8Qyvba{sHgFMUCH&Z2cWiRZz_zWh!I$5E9`Vd&JTcm{m5gUXf@Kd*5i;MVZ%vLY zH7|b6QsX_|J)+Jj(=u_EcNR(jSF%BE+|YRBu?8u!SAQSsEH!-K(DFfIx1_PFSrL9w zXU!WWT^QPh=~?NyT;jH)dB`vWGVYnIQ+A=Ulvtoh(`eLVJgLn|1ap^=EIF3H)(%y> zGF8&=m@J+>Jx1`6oh48;tY@TZ+F?{_tMa*@RV&X3RAiZL=&Rucl-!4CwHisjzLS1V zdgBLreO3-;Z)|!Js>1Vn2%D2g*(@JySMM_ZpyLou%(j6&dl?soR{byoexOkje`hPw zPiklbs~`}f_pgaY(x2{N_in!VJ2M6I=R1lk>eF+(?5^ z(G8`g~ZmKy6vMa2bUvnhhKkpcKjd7 z5K-$i8FaxC>S?ZZ00j=0GNC)=b&m)<4Mbl4jeZNpZQ_nnc8F!{O!qlX_N|J_di z7NL1)tg!9vH4P(d>jZ?_eJbi+t9M*S-Xu}6_5`C#17lL}exK$W*SIWZpi9v@YxsP4 z6LSWC$&;m&(A5I%&#oORNj=+nCFPv&ypYc>^bWE~88Vahkcw@ugojWGGkewjfBXja z(ww>Zhu#D%nmy$7+Et{=DE>#sjvCH0oV2L^`P`12kIQ4`Ok|yRbg!TjerleHxd)!Y z_LCaaHQc%?A6!3x7|%So{Oh~T&smwFfH+Ohf+^1~mNMC|(K~1A+Rpru`E#eQS!f8Y z+;lomH|gpvTVsR9dNMXPADX?#AxAV9WiqL+xwWsyV=Xk*N<>N@q?*{I9 z&en0r#i=#n|DOG7wefA_Fm*|N;#Fd(t)CmR2eIhYh~-Y#xRX$1ZK}Q1z%+AVqLA*8 z^Na|RYJT*L_oB%t?%BU0Z>2a->GtB0dQL<#jK!r#x1;K|wP(zh-gc--IJ+H~-xU#b zg6nr=F;G%E^Y4vSWvYmsQfx4Dn(ynou9I1!LT_F|gURYsW@2|j#x(YAfS zx=h=!5fOccclQ1u*QvqWp+!pe^c=>8)~%f-&HMRro>XXArrGlAb||M-fl*(?zWIss8+Sc`cQ!O$uiwCTqJ)UO25i%KmUr<+mP_(y(@&Tuy{gcNV8c2UKn|X zVL}Q@$rCNyF;#ieeaxlE=-?wum8MRQL^i2wA?ec7Q=#iE7A7TBO)H4|kc8==jh~j` z4YBBF*r+*0UYRG~y7}HQ6=cnzsM>GDB%CZUKzLxhsW64sR&Xm=& zs2L2BRaQ%OExbxj_J8?UQNQS)>}ivoQI>mXWpC}_unDelK&bOX-;0UkDO(Q=ymwE1 zERsyG*Yq?<;2XEX1nlqQ%A)whM{ebQczI>{vf;O4f#;*Y-aq-2{qvKkTY%<>y=>T~ z==1L}`XcWoH^oN*lH6~PeqWc%!<=>3e<({s_4hdLH$y@8^qSrE)xHF}?hx)GPXndj z_nfnvE9c@ShKwzO%tS-aX3V%VE*Rb^AA+Tf^|p^W5~H_1#K{_WN&m|RO+QZt?+;Gl zeNv%>etEZw`ZqcgvMGAWLcIurUDAJGp&;H9jDPxdu2REZIflB}R1M3IhDMI;lzaj| z%tJ&q`e1HkK|fiv%F-)L524UTht;+x0UJ*Ph9N_&FAq~5wg!n^_m=)IOLh<*H1kWg zJzcfA+q2lS&&jT<;Broi>T@J{Z3Mff8Sv;8T1UV5Ms|$MfBzF~$cn5px+#2gU#bBw zj^?K$I0|7s4iW~6;aibZjjewGngjW!RC1e?-a|`70_4$$gkmV9wTR}j4^7Lv6<~az z!&~)z5CE95HZ;cc)ZdcR-y?2B0!|B4ZKUPT+Gf>Tnv@EIZ%U`^E~8kso$|Wg-V42= zYV|2VA@w?ajIr2e7_4rW6H*tx)($Q=yun;R?rttx#&Y>=`bqDU91xf)0DKP8ej8QJ z(~$1^u$=`x9P1o`E#Vr>WKEe9{$T+fa*rROKMqM0mbjL_z^VG-w!a&R=TWq`r4Ngk zARu~5FjNNNZu6#r^Bk3KJGN~U6JA+B7U{z-#(w?CvtfyN^dAn**t=ic>k&bnQ^!yHT$a#jUuM)H1 z@?oSso0debdl|uIQ|>WF@kWs4*@UwXDmQcE#`W6m+wH$v%t}47XUu2d%w01VaV_Kz|+>7qs=J zd$S`)uP%x%-f|F}4mTgOqd|9nYtp(|TnF@P$mVF0HZe{MSZtJeB^qiIGuR*ne*pVx zutAi(NWB?Z+tL>r49q?nv4c;#*4)jdKLpe#Q+4EeF2}S*oPwT4tnhN`IE+$3Uh{8zGCy;Yx8*`h}&vsn!Gc7;ZpcuaHeD`>4_XS-Xw5Bq2RR z4R+SMzt^D^_hN5BmOm%&no)jzuHI!9o9-?E8IulZ<225?B@;Z~@gLzlL%lxc)_lXj zLc(?A@@LG>Z(|mc;WjI8((H68>o>6{VQtD*qMo_^Hz|>lOwuw;8El%9(2URq>Yw>6 zPOS>;oAIq2SRw8|Ld*lJh!nyPwArD%%})23%7V_tu;A~1msX1aAYM&9%!#dB&;=hB zy8P|R%GOy#ZZ^Z6v0sOVP7;3yti90Ohqii+t%iTB&^@gjmO|Je9ymFg1qJ1=2D9$t z-6n~Qid}hRh?w)(^e8Ps0V|2>W=^Lrv9@B1YZ3j&m0IvXUB+VRDRM}78Z+SRh^Cs( z`s0S30Z&9Pu>WHhUUZ0MolNw0S7%%RU|ThoKNEwIrVuma%#lOF6#QcXSyZzXdJM^TtKZn z1JY{44=(*rO6~tEC%s!nu!YRLC-8T~Bi*>M(SoUNTMdG9vazo5^|AJ-y>A(-c%wcf zQInB@tt8C6gYaiGL#mc4N%dQcDa3Xi(PvTG6RMLL7hQz*cU{;SI!2%ClhA2WAHrGRF*DFmbAH*81DaBKk!&{$J()lh?kDq$B*7H zlr_F&otr^z|@12elanKXg%105VCM^d<0nK;PD4mPK>i_WO6!H(*dQM%c zt$Ht=bzNFtBXs&iYR%j&Y(9V}a2SqF9Zq18qRR+`@h9bBVkKX<)`!2j1jc>c{i)4+ z_&9`rXDPA3(XkoklXIzA@5CmuaaY0n)E{_r>8awGB>UiqtR>Dr&gsdSm|`hk z@u}WNg3ZjMGokY$zv=%CC`%yR?lbXluX_toRDf6qo5nDLjw<{a&wN-67~ zsIWiV+lSXoTJx6Db^Jlt-H~4ca*@4eKYK=0evk`|U?<-GXudIER0}vEA~c&oGTNp} z#}i^V<&hp{abqGzUP4!+b0-ZGZ;DCke3H0C>_UDIX^uQkiD%|&-6l<2zUy^U-A?-k znl-vBQrT+`ZP2+u!pF{RiWBel$z8!k4V;MTxl?=Xb=;x&E$lEuXLikUu6*ArHfM;| zV;TA=B>N}y-k?gv4_OZZp|4%7F-80w8e^dC7#fsyDv7SDPlnD+YXU3+QnL&-Z3NGiA0D_S!~NLWA^t(C_AX zB}0$ji_>wFsJ%p7|FfYj`anEixPbY?Z|RnSyH?JPg7*nA>lwX6EO-26hUyRlTpU@= zl`HJqBJeV-HB9UJro-Z7Rm!2g7UcFl`n^XTc1$_I%|#*F4w23A!MhIq{m|yDOKG_k z@?Zfp;nxB^@UwVk@h+h~OQexxh1GchxPZ6*VCMR_@SS-`dQ(r={_X$3O zZ3_Wglft#qUwM9FJFwKtz0s=jx95fdAphiMyrDj(hf@QsUB?ODwq9TUS3%@4>FwwE ziziAigfEK*A7J}Kz4=ZX*b8;!7tFCPKXKxYaTlzAJGRw8D0>^Pu)`VGp>)*$m3^N6 zoZ?B3ESbJCI0WmNa!@V!P|IR$@~+rL8|MmMr`wN%15AW!UrC;L-!77XvBRC98yrpk zPe#e)=rQY|J2#&%dh5*&;g$^NYZw=HSzL4%`pwXLLMsV}esjQEah-%_LPA56YY+I zS-VsxY0@4!GG^?8+i6=}A)oM9p{nVRULPp*SNvM^GitLRVp0IBlVbSA&Z{%dFE{m@ zgzq|qJ~W+OAH3du#D^LUu{rHd#+DZENU(ScsY@dWO=>J%)2Yq|w&4rc&r?i4wH?RI znmMvwHJ;84!LBNtZ|@fS(Equ{1R(6j>w(heo*nOi`qGYP7tHk|FMj%8z8lIAeVNxC zME4DSw#BnTW?Zdg0*Te{DS6Q9WGX-!a*ABcXVx`{dGe;QL6T*^ja*_`&6>&ZgY#+? z9e(S(dJgaDUCT=c42ksQ;f3>dEg}H;oK}M}3#_vTwzYohOsis;m4#aK-lD0AVTqkf zH1yy;M#femT~X)7=TA0 z3IrI$8DMi~eUVKXz;05(t*z2Mu!7>Ddm#9*p6+)pxYQabf=g7$r5%c6vEIsXouTsv zgpBUKeJ=Pf1v-FAK16mL_A0XKh7Q#V!@MX2uV`0SXUM$nB{|gzGR&&X)DKd2kiGX9W3vQT@TiFCu#SS! ze6PSSQ3RMYfp@LwR!jwtxnK(s*SubNzX3K3*Cuek%S~`JR-}ah?0E8wW+m%XHIVcVJts+k_v6Y%cPpT z>TQSeYv~rz62k)8E|mWPpdY%wgAK}&O{1ES26d?>vl?2mGn48YDT&=nCiH3L9(4V< zqkC84Y|d*^YIxk}s*@%#?Ej)yci|tp3mv3^d=-6~=6-)j9(E=EY79(?SDgvlT8pc(ORhSn7vwIQ z*rB+Uhhn-0ohWNmPh)0yyVHLHn?Ab1hqdeKXIum&i!{dCOGo1K$0h)Zdgt$ckn-+D z;xL$W_cj!mbxjs55uxAI1=biFG^n}>PYViSMKkjR@Q8kl$f#{|9I971h-CMvr=qT|^Y6qj;n#Rgoe!KMOS?(xpZOL1cXSJ5<>4J(xpocgx;km1VSL?+~s+=WBl)Zy7!%ot!uBnzWL2>&bbolV~opm zj&`Eu6`S^7j-^A>$T?4}eq~ZjO+Zr3dQ*cBe&jr$Dpo_*Gv*~Qceg*V&v?~ zS()pFsoyH_wdAQf8F(DGIm0k-{#UC%y#Z+y<;kgVBzg2_e?fxP-)HqSdrt8-{mHV+H z?!hh{_3qYHhq6A8fh^N~tjGeMTKbVl=@Fn>b~A9J6xiS8vE^6I!I?ff zZj<7ID(w^O@Df^ldgAW6-`WNv{@Tlxua{Y8=YRI83u$U4Tpn;T)G*{MLm!6EL!y6D$~>>JHul%SH2dV=9*TYAqRqW+N7K}y37Xr=^B7_>D>KIQ zR9l;m^|K$Q?MEIj5danNrS@%AIKji{X#35my|SLa>-3|>Gj6e-!)bNUk*wkM%(QEF zU}N8?j$ZSJBUydT$yGm{4HY*7=y}FI2B*h}9#`tgyHgR;n+)^1y^8(2Z9?z;EBy+URIU z)UWI7KVpcYB*GoPp-etvO_#BLs}+2Nq39H1Ooz-uJ|H=V@!a&j`!)sAZ{*It6qfT5 z;M#k&5KLT@-+(eMPXKH*A7I%kc($Q9QDMchkRa35@l*55namPgzeqIX44CSD;Jl{v z@1a*B!U*jl?8_S~$s--J34e9z5}sU_|@AU zj{!t^LI`JHqDR=!V~X~luOv%+wqS1x`TIu+JU`YdaD2jY6mkgVwjnf^FqC$VZZ*rO zdsjHC0L8kUa(w%%YVxC_ub$dWeoWB*@mWHn(FZl<*uoGMGNSO)s(+QSHT+k}se)6x z6KY?hclY=jrz6q48UrByA?xpYJppnqe5>Jg*EzabV{36 zt$(QK)H3nkLpa;Enmi+f6c)|j$2;F=VgEl|LAG=z^^C63WGKhr{tfz>xs#&>hEo6g zOW=y-C&x?mPYKe%6M!mPm^u&Kgp|R%gFNZON6>2N+OzWiJUQ$Xlmn4sDGgSv)N9Xc z@-=?ywU$1j7M@|SEhhWcL_K4Xz`2Hi~UVXxhj8y4Et&o912#D5GuxnXmlorW#CBOZ36 zwC%5Ub@DY&rqbz8n{lm9;dji@*Ge5B28!k7gN~8Viw2K8J;q%=v$#rdiyuuFaxxN< zJv>-1)K~k%FGZr)hB}A+-W3k0e@N2dC~@-ctN!kh`OD&egAXoT-RDU>hK2PY0z+k^ zD0Tz!V)v(;&6F;3SPgkR=qu~4^EoSgKAK=qYgxMdLSwCty%D+$Jl{}AR0+Fpx{3%m zH-QS$-5j@|j$a5D;1yHJcd$Kp;`kRRJ;WV%jNj?y=n7`~Gv(yUgzq+Xu5n*Hu~u^K zf8+1@wl(&@y4ZXb@)q?FR(8C#iz^0F))%IAW2NY{y(oD&m2wI>UvFTCOfTg`5@zvQ z=hH1uhCUfe%OIo5Y4XBIF&KZ-p}CR( z-Fn;U5IhSg=h_KUgx!HObI+(h{= zpt`IlYC)&>-WG+AhJaaPpV5^5{n~!Ni^qDHSm@7qpla+gNG3w-hh%4{4%3W0i2ow0 z5Z9-Axl->?Xv23DtFLl~t(4b7S(~Ltd@~0B+HEcf3pDzlUsB z-xiu`3uB!7nFK(tVq^9YPu7}0vC=piOiuIz-qL1?y{#{uAGz=uw$uAl60V)-chOWA zy{+c~83M4PfCguuz|PBWY>gQ)j&JJcuEy!|v3zkYUS~oclHXWIq{=R!9I6so(g47w z0Unp|1Tqtk;X(4#eVY%803Ug>GUix9zP5)}7#kye4x|e(qWQaS`2AziV0hlz|F`#0 z-qqnnxbF(&-4r+EWJut@hd4NN%HkO+W{GTx*m3A$hcJdZP&R$UUB4E$iiN@i($PI7Ql%w~OBevu= zA%O^4xN{bt$;Ey#i{h@iFaJ;p-d;RNz*IN>K=Lu5k4<>e#(si7OqY;i-`MSZQ8?g3~xs45F~@;qX0SzqHw z*1;)^jNRD2rGv~!Rhs{}*lW~2TsYP&HlWHR@_v_W9V?UNo(QSS7mT1KylwgSkX!id z3^k#ZqR#|cS*~wEUz@yM5(1+SI z^^+l68z2~E0);Ice^j`8TTG!x%(sJ`pIcF!+|(oS?;)5lrVec+Wi&Z_>(M_}c~g1~ zyooO2m{fn216v>V8VCUzo+jT*LI|K{zN1bdr}Okh+myaDYs9FKCC=}l$urnLvj!c) z%1I{MsESg>SwqHMjM@Vt`-H=6)Z;%kIe~!uMFD10$2OW?_FaO6#YO$ zKP5_K#`M;LlngUk-=GowAIVmSf)H5>uF<9|LE*RckQ(;0)3)s9{M8@9Cq2eu>hZgK z*+_L%<7KuqS2&>(ACo>g`L2-ZkUjakEP{b|HTFJvkXUsBvX-}t8%;j=SejejBtCw( zwe2ptF0}KYD9HiqnkcAM;1*As{6U^*yxiF#bj`wa{kA9tx7bAsBOl9SKtdcTxF3@y z;|g?Wt7R=LIZUR%`KFkO?^vucI|ep{MV|e$uOP?9XSp_ZQS*~GZ@&U#f%jVH%ZJ4i ze!byC&yeolYVWD!@E7w>(OjFazx$&JZ)3zgQk;MXo%QS3qQ7%xb$R!6o41rRon5`e z`op{J9VE9Lhs^aVSjzBpe}301oVA{MGLxSV{4->=DDI{ffyn`xp~bJ4H`6kJYR@%L zuE67|f!f<=l$TsZN0B$&RDP|s3>2s#*Vea&>atf4dZseCWg3M`Kb7cBh&*tcs7-vQ zsA&PU(>Z$P*ut&8wfh6|JEJ&3S!0{i-}q@?xu|Q_$1MjoW51OTTFF}w!Saa=Azen9py-X||C)(WBEgop`68nG(t? zpVZF=4GCIC7H$sp{T)HIgmX?fnPw%(%<&mrn$)5{vFN>L5~|7Red(Wp&5-k0@dX8y zQBR3B)ho)HZR&z7le2ET>%MKq-gRnqSOfj{Vg8dL-yIJuqu9StWj%?lp%%n;LH~EV zhC(ete!;Q`S%oKA^P4x?ZH1jKoxtg&m_bL8L&}5%%&h{C_UB`His0LDRa-DTlbUaB zdqjGP!_{BG$#JPM8zKJWb7p_q4KmS;9-9+DB0iwEKCfg;*gSouL_fqkC0b$5zO1#d zc0l}vJVudAEevoylXBBt|8%euzs6~oKOis-Hye7>zBuuALzRA3m65`6(Ipr{^Gi^4 zsN2+sJXXBU>o)$gwV*;a{O?cz<@NGKks-hDQCGDm1?QxPTaC?D)p5VpcV#^wrBhRL z#;?Epyw!NDdW$n4jkeaJVu4=h^ysiL|LBBvnAR*Y4>2xcB~SFE_Rm~XoqwSqL}4Rr zX1oVIEm#OWuPfDQEAwdqKmR(*l@KBPxB70;7k8hEt`qGgV<7_MqHHC00q&lEm#?bs z=jdZ<-<0^m%7eKs(8EACD8y}JTRPVHjdJp-&=7p^4FYhTwRSCOwVaDT#y`+qau(I{(QWAC9W`&M*Oiu~=E#_99* zx-;spXn}qko^!(%_dKfgPuSnyY4!0sT7=6l7+*W07il#leSW5Lzy{0nc-xkK3Y?8AO&1f% zMwX)*OM<>y*D*cuRX+J^v#I1WJWk+9DuP`qR1Ra$;L2tMhrfG^-?AdPK^HOr=~$Vm z6{s%#8;R}L=3&e<{daUEG&^KXG4ueG~ zPx+H~zqfymnFY4|{fEQwA9J_zoUvMKRBaup2kni@eAb$8zoLVXDTJ=XzDRv>D#ZVx zTbk&I_>hPRA^8@t*dP|Q*+t(X*E*w?`g3A606y{HJE#j-Q(n^<%BYGXyAnLZdRXhR zmdp8{ul4r%%&!lW-WRW}XQ+Y6{hU4=3hgIU`k^FEQ+`Gp?Yjbi`5lZxC-slTe3_4f zOhGT0|9a&5d>`drn~aZ<>tbcin-slw_biXN);St-nQRm;yBwkO3$VNZ2H5!eGXCJG z%7lYZAVTVJcc{@}}0ObZF z`O-oM7e*j1F;U6wZ#)Uh37_%K4eczyXd?x+gKjxFMJb>0J1*KiD|sw|-&5!Nci@3b zzx>uT)dC@bOOHtVNM{!~si*`Ya)3tmTG9`uy}-qH{(4^Rpug@{vhN(z~J7!yk3dpRW`z}MMMLznbL09?l!Q%R8bKH`^BHn(XZjeu3AM4I zqjx!a6{5Ukvk;CMMKQFhG9otXkLH6E;smw{4hi1T zp$aa#&Mt<>-px8OVM*IL+{$VchyzWAo9>+KgO+mpmhx@?Qe zfUna^GW=RrdHf#Iy|fj0FN;Pz3pX>Mb>Bm(!8b)nFf~F`&B8j+t{O2@$URV$tYBZS zwtdE341JJHN0?K!-MptCkBC>r7>*!51m|wo_f1+vg-7AE123k&+2an)kl%%^iRR;F ze?5KIcqU3Q&6xmv1TJFU{qXtV9-+IKW}hCh=eqJq@=1ILKbd`#Y%1G|c-L>=i^n&- zL9o*@PTrdC+Y$@B1jCCBV5OYn^xd5aVB>70h63XJ$o~>eQrM?b$Rq*8%>|o(54rE( z=zuToQ^54bO=8XF?rK-^L~$*e*2 zpRR(eF2pc6jIpH$9*!$@q1pRi?wEM~WPkXb5y{f{FZk{MyS|vYmJunvXyiUZ7Kz%0 zh1;w3Yl;1KZ=yDn8Y*+$&lhNL7p5a4JONkoo3A;UojBLW^(Y|sZq0csHw$Ast8Pg2 zwXVUG3jUX!ImXLVZUt4RNpA7)Wu zS;Gs2Dz8RxkUqnVg0oVxao&-ZR1w25!FqkMW-^d_pEiLa24oZ!Qje6K&Zszs3#bdi zTF%8NbLL5;#w^Ksyr68H2p?5@)1@o$moICbYXvI4v3^Sa%8=Q9Bv%~2!z+;olTz?^ z8rOmmJWFq{zMk^yytmElS`WRIF&YqBBpU3R%rDb?ll$A2*4P@XXKMZhl&prVG2wd< zdW!U-4y!1!ez(S)>?EZi1rN2i`7mA9c(X%(y5qhR!}0F|Xg-$C5YO&-wCH2)c_`G)jh$`+;_)Q+@~HtW79OHCT}$)LTAfudkISQ-}b$0zn(>5y$RgoKm$%5Iyjc(cqZMuo#szMuD?%v6Ei2klKvH9 z_#|$9XrtpygPw@rh&{oQCdq2S zW1?8>D`MrPG&rV#OX|HEVSS52ZN_VjxMz7t8gqCcf^NQ}Eu4-Y68;$E3)2rdPse-< zAT%l`M<;}yzbkXOplSa!mniP8RfUc@>BC;#MYqCvp7VnUtDMlo)nWX0_;0)l={4@vW{CuYaQ0%jPr>sOuH(N&rsb_;v zFCcj)nOXyLzmOth*4+6ex8MCrtuBJ=6r;|On!R$nTe@enU2bti&o_9{H$#y`T|4zz z7B-u;STxbB9#y@0b1DyB68|B3d5Mq78#7~;C~_&jP4M})r>CM*+Orp+ebBz;etlD# zie8njmNOiQ&5JD)PY;O=+koXyldU|A=H*{&om_AO^NHh^uXcwhMF(6)xe=Fyt<8HD48Q*l z%6d3yb6WrO8}7Bym?i??t7b?__2ICn7keY=!n(7G)I&c)fqP>1fY|V(!jWC>FoOXT z(G_luRzp*C#A+y*>HW%i_1{eI9fa(I{lh?VX3wV76?CWobACY?YY0U2J>k|My-Y_xQ1u{Eb*YM3Uvr(1e!u(OjICIuR~_}l z0NgTZU9!MpdltjH)C+n(>B}dv^UE{(%cslr{AxDE!dOlwsKR_cm?K(sSDLb%g`!gB z6NM4vhjHjD8k4;X^IcD!t)rA>O@e#mI_H1J!KNlAy4a<)9GoU1W&1ZmJE^1f#y|wH zY~8$I-Oi|k9k|+EN<^KlqSmi64B+TE?{y;?H6cgy@Kq>QWdoyu=m_WnEpJu4VQmV_ z308}a;q9Wqgb-)B>J|2+tah*!b5I^fkV8E+H0f)CU6ib+ z)>rSo%LIQCOI+~+Z2dVFen2%dh2>IB3G(%hGVbR$l0s&Ba@olkK8o@(mX!b@>j)+@gj7Q5v|= z1(u3v&@eOkz-#0Ej(~fmH837pLbiMBzfOH#)K_A;yM}-8{g|1PI|j5emT#9*pcs2< zeGPk9GDQrwJ%CRt{$PmSKA92G6*ooPBkT;+AE$AR_)XI-Uwrz4L@68Xns3y~5JpG# z{Hy*wR0o2@*Wk;GM+MKBK&(34ZnX(+R%qF?ig&;~;bbnY9Rslc0ZswNjIh5cWBu9w zo$!j;!-eq%b`DDG=vP5arowH=f3o)k5t=y~9kPOWH~**4FuO8XE12ps zuwKE+QyT7mzOU@t2p*f*2}CeuxI%DxyrY$AgMdq2x)a2oLD;~lfG?pKs0X4D@I1Rf z5hsR5T%a*~8?uYoB{0v9Ln*Sk%nP}*sf#}PH~ z;tx!`$3%Xmg0m*~Qow(LsQ*JP0Lw_&CxVCW{Cg-Fc*4A=S?!i*oY{+z#HHx3B8(O! zDt4a;9K@Yzqn5u_m}hDZpTa3^|ODRy9J2zVHrhSZM z`09<3yonXEJiG0$Q3{x<_C}BMG;dZt`x^6dIEmD3G^hcU8L=omR4C(FIXS)`xc1H^ zDuBI$lPvN2+wI_E?08S!X!GAyqa#b)zkcT8Lko6pvd_4(l6jkHU&%)84f6W09#ay- zZ48HsvYvmSHE6lNZUHebjR?UQW6w}dq|c*AU72pz6zuCuv=hR6Yyw#7mei@>L=X(W z%#+ui`C=ys*Z7tC){IuG6mt7p<7}4=Y@_3oVm~_Sa}-WM;P}49i?ECUuZPJ~OXG&y zo-RLC1b)uvG4rE__Nvy7r3U8eUD0<^Q0kb!CHfNid*R&ZeB^HpzU-XT3qhfa1dO7i z|Ca}U=dFiw2AWGV)ReuZG=+H&^H|7{B!7)mR3$bY04sXqmN@CVDF0EPK&EymI1rD( zh&D~IiWWf^TD&fAO(yj8NH@|#bjeo4@E?TR|j?dD)E z3ho`s2|{Xb1(nh3GPyb6i`Lhu3U138Iqw5OMiu*%CPn%OOc*abX&oRaU9cbZHLK2A zK?WLqM(>9zb8Y)c`yC+K$CilWFTN5#(VVx8Fpo{RrKfjNgU0QOZB#_}@=r!ZpISiRnlW!h z$fm)4FOkG0Gv$xL+2#c@+7I-3=p~Oph_z(3LNeX;pF&&z~32)^tXH z?8(NU@dU1C6~V7tgyB1 zG^t%;v>)qkbC+{o{{0eG$6+6#Dy4p@7j8P`V!0Br157@8-VBPz^`W&*K#{u5xZk-h zN6jD6&wdJroA@9r*6c40g(zi@>51bjIlVDS3&I%tiv9Naal=!v_x|n%2A*fEK7M%; zs?gxQW{YX_dSmUGH!Ji(eq+*tH(5ejyr@9x`usQ5D}p zzQL+b{UoJMN~RI8p*LoG{HASMp4Lzge>0nwmF9_5xhz^@=&Bs1{h%C-+cd{b?*m9WYYIkn1pr8*{);c0BAkcJEC@XUjhQM)W>-_QsTzGMEa$5OvsAHg`F&TO&_eMb zgRO5fpAkBj@=z{?;~g#+rX`tRk#Z1=QwjuzZ1WiKzW4%JL$?o5N)%ODeQx~@Ns!+X zMHe%jY3eNbjYQ+X7?3|9Rh5N>ud6Cz)Nc_u*aKQpWJ0UvnLe}dTQdV&{~ijKWbRO< zch_hS=~$OJO|^mZflHDJOO|uWdGO3Nt;-nGTB;ZjOCV{4I{>*t_$7#$<^4JJ`x}ef zi_z6(g;J^FS22c7s!;~!X@Wk}?Bn|)-%DFLM`A=a3#jt!Q(oi)4|&?B3W`J`(EWaQ zMXi{C(Ws_q9N_^|U}2h$9B@S~W|{AUCgW%_h{zDnr@OTA@}lbER_C}>{ylWXGAYqu z*O52$4C)9un#C0lvZ`d|_aNXkR+>YYVkmf7w15|oB%H>pl%e%x=2A9@{vwoJc_uYu zmF*?pTU^gtK^S3+IQo&t7@>+egF5FE3Rf@`MRo=_2%U$Y}X9NM;{xI=) zA=84GF*t3A_L7AR@0$>B$d%Cw{KK2z?>Dy~{uk^$v;(RPb_KpVa#3U>&S2eI zy1#26&w>wl@N832BbGf+p7&QNjIva&woG%0+?|aXhqhTK;ND&C->K`nA&Rc=`r*@) z?=>Qq4uTr~#14}-*(%U8>6E+X$?U6B&|uso6zE;`5t?%3yvpm^B^$#2c>IH!^OJy;8s5PL*O2h07-~u1A($qUps6`-W ziSH~rJe!k;o75L0?GU+jz4(9<)Z+!ZvbYP+j~^4q9rR5SQ!U=)dAb;xztG367EkJm z4UCO9YG76g!S@M~@PZSq!(Y7{m-fA(!gy!P+QjA{YTpF4;7t`@h zf3X8d5b8Vg{f5ggH1y1)Qj}!m(kr5GIoE@<-@w>FZ_8m6`R<8upr1T?hmI&n>57?v z7RJa~K~_1x6Mw!aZKl27>7si|f{}{`PNf@H0^sRenxF>&oS50qH<~E*sBtX|)M74E z9|ymG3VJ=;YYCQo6XASIv7dF^SexhI)$-d5#{WBtM1a#M6tWZ#q^kY^Uk)E!`+o{p zpD)2SP4|({RvJMNukP}`NUEQ)c1YgFTQ!b4me!o+U4xqIlQus~Nxcn@33^TroBW6A zrq8}{4FQG4)#VMlxdISMu+_Xp;sC3QeT0it@=)G9pf;=Qot?DSAvsTuid5HBD(J{L zgsWz{nxEFO>p2TkgS=Mc8dskOWx#7o0~*M9LSHP$LUkfc(A(2O&Ne-54$Y!Gch2`y5NGr1+T$@fYpp#HO5ALb2XvDiz2mrIj74>p{i zy;g(rTRr_)m~ZOjQJ(c$1@RRhC;d6|6L+w>ttlwvm#iL~ z9la#&`xK$Nh`u59^sz=cciW&=YhzFCAX5By<%48Gn5Wlbe0RTWk!SfJhmF-Et9rv5 zRKb8W*(&b9!fc1n6FS*XCjg8No9%neJem(Do_{8|74Z*>*M5wZ1_yU8JyKQfpUS-L zajN~F*ZfuP>b!a{&7N62>9wz>o*c&Nn|;s7yynwZD9W?>yCz#)^6K{~lYXVE*;jK0 zKi4yl+G^g{ke>e>pOEqOakc?`EVmQaxo>W<@HJn4cF@pDVed`-#f(vjw}HXrE~62d zU5*Xa-i#@}YNs^P(luFZzlr6mIYd(Oe0My@Ei`{;J$fL0aTj*RW)kj^9sq_`{eHm> zSEk7J)~+w=Hb!kGr@+{5{n0X7)p`6%*K#|fdU)=$2SeQN;Tq=R0N22?mTD7TOQgPT`uoqR_w&<$nQ|cz2l04X7 zM|YW7LkX;;3E5n{!nhk_8Dr?7YgnqkT>5qV!}sLf%hg8ZS?}S&4VSsp3h%vHx)&2i zC&FjL2RBGYM_gar;Z3TcE7N zwlNm{k1t(Jc{C{Z_zKY&;_O>FY10|LYrsm1nkzT*2-?1nwmJzpa!`)%eKbDuS|$IpQaKaM_$XzC@K7w_99JiBv^S|OH^ zoQf0MaOUq&7l+sQ6xqY8DKl&HqKU=@yvtp#5<|0f4@2@p`faEAYG);XX7X=Oncp%@ z&K{gFT=>RY9X=)De#}H&CFmf6CO(XH7;xzqOR`q}Ux8qi>BRhyWeW|dK{*pE+{CHv z^!`*E<<_QpJ&j)jj@d(gk(~LBbDyC*yNw`aw*%>ZG8?y_FG9IbD+4 zQoKX{N^Y}+W}SJ4E3m&UST>53w>Pg%A7Yx-Ra810EzFm=r^&ykv=^QolxS>4k1_K4 ztd=Ud~5Jc?7Bh&N#cS)yF^l;j4 zD}5c-*pU5UGYoASesF59c-PIuH~X5~h0KlTJ=#{StIS$9K6mx_de^)X>s$SKSBkSn z$K|@z{zuVf;g?w_M0a`Crfw8h2nJM0FCBf5Kuu(FOSil@m|ib%%|6Mm?~-SCSIk6C zK?IUumBgW!xuptt)tX$7MJ1FT=ay-*9kQ4GfbLue&J1 zyx==Kbm@A_ia>~KI^CXsX?T}AW6oFj-Wkiqt#$ayw)HmpaI9ubYyF)j-Q%eS zW;^B=r?FU6jcBF0eLt)oo}{?FodYl;Wm7c0U??6g*3-L}G6VMCT{$C*N> zHCQrmWS(2V}{0+TaAsa>(4L6FSD?bpq9OACG zkl5KC-t7$?hAc?OrtK~*(yje&^>N-RfMLM#*Ae@k=H09>FW)1QKLo<-D+AL1Bdy<~ z$Ue%LLObM`Jx}sTUw1hOvWGp56FG2YWr8%hY@{5h{Vf>xc-lDmvswUamJWM*AO8UG zRBQ>gDB!PCEbT!hLz3_UzUC$S9q`$%uDr)(72$taYb!?QZ#le(A#YkdRI|lZ*xm_m zYhH%>emA${QnumzxgE-=z>9&nk2vOrd`F~L zTCFSdYHz7$yndRjoL2v`Hnl^Hu)6j;hAz<_wWXm-*&e)I3$WHh&`cpcLT2SR7k~m4 zXfKs(ac}S{DL;@J{Y>-h9tH!*9TjT5ubVbANXgFhgz#<$b63X$b-Fp?G+v~f>JY;+ z4`~44t%wc<{AuEP1ftNom7S2e@6UGkMp6M9M`QNSG$hiXlx3zQ=F|?>u_LCtx;Q%U z$sc=JCc~00+JfO@|3%0A2(n8hVDg_4uUoTnL9m7xV!PUD<%t{vlka$~OXSVxgtSB| zB<-S5+ksaEN3#s^Jb}5pT~Cm!@FRQxDSvO`p^9lRmQP^-UN|7$Y)G!0zg+0uR^etP z^g=9htGg(r8B_X4mNKri@#Gd{g+!89FgVEn@=;vvl(`rZDy$jkdmC%2P;WCodDtut zQXGic9m%J2-KJPZF&*GJbo<`9gS8D^z79Itn>KOmgzXa5QLgw-4Cd)|Epo_72 zUmjymsH5zlq+WozITBk0aV_#3V;u-rAk=X~ljLu%1CkuPwFHe4I4@q8l+J32MfJfZ z0g$(ou4knnwLpk)C|w0HYiHW162EHwmKopW>%lHE+IJB~{Dzgsyxa+OnML5D2>zL? zT9!$&AOCc-f4bb}4?hN$m($hR61$}xlA+72?*flJaIsRZB9m6A`?W2> z0756i-R854qSClVq#h0!v@K$>UoB&smvtopb*lNK;6>@Gr6u01A30|<$p)HoMK1v( z*_Kas_PPr;z%^$HcU@R{K>%|G7t~Flv7v@^n${~hwfW_DRPO0u3yVhsU!dGs%L#za zDpGs+*)X-^`mdg9}wvAZc3Ep1f{TCQcg@a5J}5j10QE7XzY)u z4z6!OKn(Kfion8o>O7kiea&|X)h%R$NZT^w|OxHU^o?ql+@`2MEojOej8d)e@^>1n0HMifd`Nvu~pfSj#vU z`E=-cgN=%-c{*#lU5Z7x28G(DtusS4A-h$RR@@#9^IgP)iFm9{j+SRBz z^9lMkgtWajSGArukM*VbP&SsbXF0I;t6dfq!3^o+m60b)x31aT_7;9em2JBJ3Kj7E z{mWZ12S;&WyVF%OXOwyiT*>3}ZAoq~cuaQQE{mU z8*cF?-b)1O55r>FPix|AiKXRt1uXgAEf#p{j4zhknlqDKH|lZHs=6s?6B8zdoP#5G zHL-0c>jUNgp@pJ#y*ZR$CHKp*0H(AHkw%JBaWrKq} z*eGZoo|@xknOK5t8F@BnW$5e=&u!)@KYP+zAd5TR3x45oW`TkEz1N&E;_a{AA%rd} zZEim1*2{cyB>tInn(s(fyk)SRY9TQA&f}NlUdG1$QVF70(<4$xGV=1xKARV1Zt#!{ zRM_J4!EWhYyjzxrYc1cuOm2;V-5=C-OLraE?%}Jsa zByqUK+xD)<+T;ws<_H*055JCWxMGrZ{l@xZv!s)kyfd4>8=V&`a=!EC><GZCG3V z?;T1FIGR6tek+Qp(hx{ztM=abV(>-R;IrA>JVL-u8~-%k2L(1*4r@4H{^;xM@T>$0 zqI}cDep!gaJ<+cP(4KGE``?wm1$q#wk`C3c|n}G z^;e_7!HD+9+UySkj>5A4*0m_|HKn-Ou3|ags|!2*HF&HLNXaV)PM$5O-5zN#kgxFTwf(gSw;cAw zKWXY3Yn+@~p2U!KJ*)_`M=6kq5q(kRiE%)QGu$l3wzzgSH5yFvSc%fJrk4U7oQA>ldTwzg?`>gtB zJ=KpGqZ~b49%Lq;?dhEW-`|4HdocVD$!cG}*=yVZXrsnXISSCZ&zY)%__u#o;EhF_dseLNe92iUmC|18v*Qmgn*CYBK1l6x5Njh}M3M54UHUZ)`-4@c7?fFi(snkMawFnxb54v0=u*VHOt+0ZTJYyTs!t7mgp0ObUozg=6mk|IxlPV_L4#PoBPAq)fC515<_f3E#Kn zHa4?fugWuA?O<=WW)k;q7!ta1zZ}?GBDb6Ldw=K6ZK<#OHrvDDvjGSUKd>!{jER2O z;BLM4AD+LzHWcE_m*4`~LWgEPoJL9b@WJ3ge)NP0EiP>T(P7OVke!>ECb=)$f=w$y6~ ze{NWwV4(ye^CB9x9i~?7a(~T0W)0~8jEfR8%Bb>1;#4_8@RGhHOA-4F&vQ4s{Rolf z-40>-;Bpa|u;+9isEy{aZ7NG)1X64Z#)h)eMIYEF=P6g#8ht|aD9cfPkkjpq8X1{- zeUz|2Uo04wfWiZEd6{wb0p{(F@LOhkDEWG_q_EsY`!joMBg-9^_~@aWg{UWqdzc0m z5$v7XHLkzLTD^`;g`|a`w)?>Uod_w72tz%Y4o^&zD5F#6FJ9U*ud0aS;#dVnw>U$& zZGBl_rehG~jCN7my+#Mi_L7D4X+sJ+PlI^iQL8UjifaE1bBSTIMy%2V(IThv!Pkk- znw9E1XE~OgZhD)QCP6_bva+*RsxZ#c*Tm4v>v$*cc9Fy-eK5RYc8;;K|6Kb1hVU0t z5KbmLKDA=O2Wgg8~v zm~NonKv1Q7Lu`1n5xISVfa8{hEN2ncq-DXr}0Kj%E?2N`-ui~A^QFCLfEZB^#ffVC^;_cgQ^@YGAY$I*g4GVRlca1Q8YnM4yJ;~;{xlF>(w&&{uIlf2~bVgsZ`K?Dp% zK!iX-wj!d`4G0ksqGBjgB28K(DkV}w4}>0iPk|6f@t^E-b)NG*&$;+7auY~dD|5{? z-tmrij0)HPeBU^|S{ORDF=AaLQ~dc;@v%NDVA2Rt)Zw>D>Mt4@<%Kb2qXMXZ*@J-jZ{bmhBZ->dMo-GOEvKs4 z`>00$jWN~XIv6GO-cdMir$rgO=idUCnv~dPmslNaA~9?m0dfnml3{bn~s|Oyj0QDFjRK^YgO6T>HYQxU09q*O$F-{iZZ=1j1(94cHf zme2`$X9*qXuz_P!az9o^z-4&`uav=lre1*g?BJMhmlWP;-u(2$3Cp)jQU#CyYzWn5 zPQ7s^J?k)}LOdIvOCsfXYjp+M^%9Gc{%i;|93!n^IOK7@Yei43sWTz6cjPq}D%cms zI6e|OU}v26xZ^ePed-}y;~pOz^qF|7u&Dc7|I#_!^Gn%#-%e~W$oLW733Bu@uvant zjywD!d46i{+13n8UD>T-@B-{b&MzXtM7<%fack%!;*q;6P#ogA%{0mOB{wMPT-ieC zMZ+kBh9r*s<*Da0nE5xSkYn~%&YWJH;{S@$*5vH~3iv3kc|8bH< zG_*G~h#WwySR+cz?aZ0wayzcequ&pty#sY#fYXj{t$YE9;oK3_jkwq{fJiVbNTv^q}j&$doJCCFSGmATAmq)ByQCpKhxxBfzeTYK%S0Pfc zt2$Kw+WTnyZl*nSMTXe;hW^*DPMKTPcNH4QpdmimE8*c)7;2>E?BX)!uK_yk=M{k z`~Zo#?y>15MQj)B?0(?qBqFxIzaZ0%k-sGPAuT?04whh=OLb<=eymO1@X>NSlu z#H;-t`X}qkETMi1F=wr>L&5ogudWQB10u(@DJWmrdttGt>~QC|_!Y?O)f7_sTTJ6| z!1R?((Fxr&k@EAlljufKkLa`dc!6o0)+fLMz9?_lJBx6OYQZg@LHRjsx-#%!?|DBF zGN$9rLNf+Nw<*B@v4Y_7!21MbX@&i~@2Y1Mn_}f3Xf~b2`&esAW~L7jTF#8vVQ_0? zHnlmS65oG>d!w7=#@J!#GOoFOoUg8j4VbD2xD3B{=5R7@R5KJS=?*&Z{H+Mui`N9g=fd@VeJs}BWatS`43g$c(?>Pf8%Ics3BWyM z+E9~^6}5zF^(bD9YM{$0a+*%1xP$@CiWU6T$XEOieE9GY{H=DhSN5EZU+D6JoyBi4 zysGFDF`+?YS^DPX{$|s%{DR@>k()_}+9$b@!{tVKlnh-a+DVGpkQR{d*Hqn{94MG%3Z z=tuXtp-V>AZ4h44;DE`bFazM*T@%$3e> zb^_fP%B@?IzL+#HfBP8wX!J`NO7&a;$GD1DrWGdh0kem=;lQe&G69;VP)C1}94l!m z7nN;+%^JMAwokOy6xG58`(_51+pALqH++VP=)<%--8&ihXy@UFVf?B*W*&Enmz&A^ zs7Jaisc%U`+waQCfM}s6a-c^F>P??&;Fypk(wvCvqxp-8d6Jobkpv0)xfW?>01 z)f7-xQ9=03*|Kgyf$wIKmVj8bngMl~&w>Coin&6s<-(Msnv#Ist26}scMd%O5scs}TgpTzV1L3w2(xUDWSOVS$Vn9LCYEZ( zHxTWiq_)9%>T5sOONELpu@{NhPZU`}$iERP+2JwQdLpU=fciH%5m#!r3e64%1{`%O z|4aXle`m;RvcY%N7g#GwlD#CPb~_GfJk2=UQCNzTd~CRS(|_m2l=#JE$TxJ6J$8@r z(MO1#A|)P{k^B#wj@B)p&-toHFSAu_cVP~y-Ck%Y`*6G8I+}l0{WATw?-KT6>E8G- zt5Qc7OO|Au2XSW2+=#GR8Yoku3R4Q$op1E&Xiq9%dphnrm3P5lv<|z2Jo;X?=0Z`7 zW|ENTT}#uLuqw= zr)xqY1qQE~=e|^~x+55CWF~~C3z<(k+0al!#Ae&O?+FR~e8>>z8ZEZ-$J$EaA;Zgi zY6`=A<5i+Bz@!dRb)2T_f|n|DO+5lU_M7gYPY4^g-Q!W}f}dsv9O^j!I^DAWrqs!h zaaN&&!mdxsA6GexeH_-os=kP<{KZ0Ok#|6ndB}8eJQnqbgPfA_SutZ_UZ+Twmsm@e zt=l7UmqqSRN?N<#9=}#W_wA#-tj%vG!X)FjC6R1Yr0x7L2K>YH4w9lCY5yZy!u4O@ z)p5{p{Zfzn53{W;L8>=OLE+Ba)q-oP-l-z-{@-GXB5lDXo@;>XI6`cYKBpj0J9d7l zeAlj@e3atS7v;n2D8ns`UkQunzF0R*tvI~;W7?T9RFxhP!_qit9B{Dfab|bG3y{$HKaL);Ia6J zw;J?s-13$gD4Mnltc4sHbVS+6{c1XxlK6BydTqwHw@R63&F#JRa7f~M-vdj^aLo{C zAwTqaK5WFah>I3UJqnJ=%Hj<4%{lGqA?$hDb&b%%g0emgP(B#1PBT1F5b_b z>9SXN5HxX6Mc<9H6=M>LNgprbnGLqVC{^bhXD$&`#J2@>HD~hU5=man4(5v6WUo zR?xmSoycA%%UyX}GFInY-`T$%RlE8zv8Yg0<-7BvG7H;PV@uv`W@dx21P|Qc)uCED z3{5;chreG7*wVc;PA9)lKMrF&KX7;iKXrUL&{Otiv|^oHq#&t+rAyP;G`K^=*tsBTIu(=X@P1NeIHlmUxrGA9>X4l3cmgNzHasx|b z`R$pY;|IOvxD~-o9XZ1O0;|>X4>ar#ACmUxVZlMZ*C)SHYn(xrGV^A&NP1ly6S%uX zYVhx|i}ylCvEkUu&;tTZW;3^4m{F)v+G;txs-tQ2;;dYNx-Y;&;h|z^U!Effc!tW9 zjYKFMk8tgc4|Q$`a%W$PUyS5y?X6QUytA(qcX^Uz(|qba%eSF?sZmyRU{2F5xAC02 zmE=K}55L9qh+a&vN%wwHzM>jTOA)oux8w63*_G0x#TUR__nGL}y|3zn#a})dAHz70 zo9kxyU(|o)ieOxSj3orvP;Msc2VqO|MI82^!YN1n4C13a&8Qdx3^<(70J9}HTvJD; zgsc98hQwz?h)(pZ>t$Z&T(OP4x@C$CIMLDG3d6LKI8$$Jz*v$+lk?dsU2-_$M4RAt zXst!c#t5K+=?XXgrGdwm1#-E>Ru{fckd4LmCmiGb{p7dM9*`q!dW$%yE&v8fVM$Ni z0U-5;N?2%6@w_{|Hz^0fSW#>N;!J(7?PZ_!%=Nbn`CNQPhoeXFAlAb?9|iv1kVG#> z=J!XvT3|efDFHsI8M3rEuWR19QY-h;{8wiXxJy|94YsS#&?5!uvi4EFP5)@O3$9t% z6M83W@}|MyR5U>=I|WSov0mmf8g&-6+!s}W^_mQQKnNUSJqr*LqAqM{`AY%JIF1N7 zMmL=DO$t{Y5cLat2LPF5C;<7n>Gt}q?>P&FWbEV+D&Bzt+Pv$tc4Wl>_|_qGP?!zV zWWs+vRWhBsZb%%W05-_a6!C!>d!o$kLbu)1b)`(HRtrSsgrc-FYwFv$`SM|2GXW&&paco6ASCRz&QlVZ78s3|i!Nfhn{h&<&AA;1-K#FMPag7jXz z+;N`~Avnb4rMZY#0-v!85;SwRcz!Q6P`-eK1GnxeQ{|^D3gDzxsLhEYkxc>qNAn-u zEPCI3nT{(px*b^M&3_<~| z*CA3FygR>d4l(OwgOg2@HbwG}y6Y22{^sKnamRsSn|J6BK3Q9YYE%h26l$Q=oWq6$ zd|PqbTA8&J&spsuCaEUw6;6QQmn=tf6eLBbV6x7iA}U`4)Z_?37+>lSerBZ?8EzZo zArlc%lzJ10`NQ51*{+{ARwq4Q8iouv`fuupH8x`!1tr8whkES;6tLeb8cHHfsb}t5 zXe3xnh?ZVIIrK46c&g<}!neGOJl#SbsBId1(RyzA^hU_CH+GUh3K&5S?_(;y5YU6UzcvBDFj#gOPr&4+U@z-L7;2(UsS4!MU{V1wx#+ZqnM@ds+j@e)FfyQK4AVrx zjjA|B0Vg3(J~gK!I=2=1#YE4@b|P4+7XSLNWb8&#!8A_J@3+`zz+n>dXK1M}{A)kk z4q~cc=tgfeaW!e)ZdCxCFRd(8FaT_$7a0)VPYTWJ{4E0XF?km3X*Q|Z9>4AZzSN@K zoilx}FAtlE$P*6hc1V5XN^QkUiQS^yymeGw^w#w5R^_A6) zaz4X>2JjcM8-~__2-k=QY!vW*-ju4-8my~aMG;#&XTOHJetYrQ9I`1XPvn`~rkTZJ zoO|W*RqV+);Ll96A8U2HySMRNs$qu71kjEP9fRJIjI3T;9c>G^g?xzMA*AttPjE`f zngU=p7l{}~1t1%olsZ-yFE8S_ez2`z-B5ucbC+3dRirz~IMJ@3!p+qp2W-wGfX2)n z@3Q&kJnTY9Zf2|jcJ=EBr|r$G83ExwG?Js%YBnh!bXl{tn~uP3meTW7-g!0>7>g7E zGxh?lu>@TX+GNrcB8;kvK@Ys28J13l7B!!H420|PsAAAIVESame7XxbRM(onnmte& zf}p6i=g^I3kD&9XI=|E6%A$2ZQfY6RcRmI_y8)(Kij4mA?{v%m?H>34&$Z?hf)^(l zec605c>Kvt6zxQ^>_sw^1(VT%w6qe<<%q>k?LOsl#c8Hz>hb>K|BNuzH};YB$fIFc zXddfJMpJ7!V z_q@Mh)CB8nc{cmcSwy3(-~eRo4!Kx&agh}@s+F?Keqd%;vs(1)>)o%V*0Nh2ZIihC zbIj;Cvvp&w<{@u0rqgQbH{ulec58jp>BvOGPi2S#cDW|`fk*m`~!e(j- zk3Gg-XOcX;a1Ye$uY0sBkgUKY#@reixzk@U<)m}{n-$f{jkSW_9-L+J(y4ismjlPf ze8rCxasKe_tqIg3EM%)&hH{+j-nz!B=cfg0?>A#>LV9`Ee!SUI-5$9D*^b=9kbf$# z@owQCRII`Dx5qX%+hzw3c5va!VG6>-g+}y8%nwB2uAr6*vkb(XKA6{Z764BBf!(^4GF`cdtdeTwlB2 zaBw<@7?Yg{h@h0lM!#$tyoGN_-1Dr(|IwY(WqN~2^u_tU4Ywckuf#0Pn(RIGEtgN) zT~j%h=&|)L(@SR@m!nCJV`MprEj|PXySFZv3uj&fT=z0z*CeA)lx zvr(Wmjb%Kg=h9rXBz<(nQO78X3-77f8$@Ib3VD*U zM=tm608?*O8tiV0qCaQO1X)rYQ&H2E-M?hGV?l9>s0aea$d?6Yh{uIWH#67ldPGAW z9|zYP_vfBnIvNHn1LMQO+9N5fh=_avj7h8Q8mRXPDBk|H;#tvD=JsDdR@G7RfS+Zb z4qA{oLGW~vO@Td1oQaLTQndG{y7a9_Yfr1z-$e-aXO0d{*N(j0pvLMzWk!yiimJR{ zUYc-wa$tetGX^~m?>;PO0M%+%ODo?2Np#DjT-dAC@1hvwa>4l1L-Lh~*P*I{Q?dNo zjKe+B=Nvtnoook>wP;3r-yTp>OhQNT%|yS}fal+W4)LJQKXEoiZ%b{QEk=;hvEmLZ z2Z_BE2Cp99jkZwl%1J9ruUX2H^Nl3DWe%S1P8xi=L?ZI=hM~MvPs1uD1_%lPeK3=9HJb3Q$MQKtSGZh1U%0gWZZNY%*%E{mB| z#zpU?jw9f=_g=qdY114+|4zrtf*@=M2(LzB|S-p5G+|6i1lT4qf1) zr8li0`)dUV7s^7<^K|R}DJVX8T>ZxnW!gfZ;PCiktpLTi+oy83p*7b`FN0xgG_7+I zi8~iZWxJKwQ6;A0dV%3*O$+DJzpimo*wIPEfjK+b#n#bzgB>+6W9fbpxf%qog0zkb zD;_|O;%C#e`2jq$LuerzeWHYtbe_8YYOaT4X!nsFft`w*az&)|&xD4UL;8J`Q|;B1 zGRD`yz>`%BuwL{;xQe=8LRaD-t%*&-$&2s%oJ<(vslUZ;#hNBS){&YvF|v=JEf12%0jJ=7_;%bQk2X+4>Cb~e~J%*zn!5=Tu0y3kFXNff`2JkIzGW4w-izu^;kpRJm>mg-2|AfJV9 zcpzMUCaHDQne71uLZkd0x0V9uwdm4s+d&3J2=tT?0!%vA$t2FHpN6LQbp7SoL{ObDj|gQe4(e-0Bet`Zxk(LvdUPvH`WX>@18 zp^i7o-0qpj7VMJuOwSubPIeXnS-(UMH3(pU)zrtrMFl{%uP6IjO#YY*<WSSFHXN&Qtkp79hF)D9p;lcoi*x*EP{F2D)I&;GQ#LmBkIqM z7F{k2Xi=Q(s~csPT|nq|@6#fdO|9K-T(Bp-p5&qI z0<2Kz^O0q&z-UOpFy{`faIZ2>WQeA_zQpMW{(0ndl7IN3s8G$YZyYpmX49WMtvSma z#`!3pnc)H^YcjvcWXed)h#rI>jAKzvBVk~`Rj?!%ARe+`4%MP1t1bIAw8n@^R{reC)H&7TFXJh6SY7cLyvTgFeU1|=v({$t4_hEfZCtm@kHzO>ER{p~4PMrE2 z`J9K@E*+)N=~Y!wr_&U2{;TB%8!tF8%{|B~%kIoQFzzS-BmSqbM zKqC;m^W@)R6Y9|a=b3eC4g*B`WR&;1W6!X{rMXza1q!zmba5h>^S{Nm(*QFOZu;Mz z>-NR|s+~Uvearh9$%l&MT=;g_j>|j%^}~?A#YC4f|I1Tz7swO$S5p;RP^N1A8R-32xFS4hXf@s9Hqvix!|0TC+ zOiLq6L?a6aJH9;e;jtIKEB++kE}iN=0)JNKK4zY|EA36WUq}AiBhQn?lG+|$6PFJU zv31n%&TW!b^74*@9#QvCedE}e#{`!5l|r1Rp(5Wd=rWhkaqqOEe2^($U-;AY0jX<( zewY7N%L_LvRjx=5V3iAGD59OY;}D)s6cv>wkYH|S|?z32$b!7d{{Cn!V2p!Xkj zcYVn?%Vk=w-<$dFd9>7QrxQp-)V%(1`;9E&;$i+OLdUi; z@s25#4vhOV99@pOWec9?`Pbr^kH9D{@){`tiH}j-cHKLBi{Zt*eLFX1T_SnAO{Jzz zquaA4IwmL*wwOeA#Ldd{yAO@BGn4E+RZIT-mBsthrZlnM`YTOh|4-Fhv#O>yP>Nl2 z^4%IW?2l}GIXo%~J{A`_BJM@Fa8C70M ze%H_taFzaeDUq1s=5lJ89ej_NL)3%QW$QOA%OnY7ddAh*)*OdP81xNY`+BfnwqWKBrR>~=xv+Z z~b%XfU1@5@H5;*-6jg*VW7=9b%R$wJ`33+a5@eKLScf= z4q{WjcA zmo%(vLoQN+>v;kEhXP4HCh7-6`{BK2Lv5vF#zUv(@R5qlM}Y!q2ty8*4FSmX*-hI| z&tCX2Fm9^y6`jZ!EKj>ywCmBDg2|P+uDuhMZF$x_q?|ajnF}`h9yDC!-Pb~omiB}XIQAQJCecJSaUUbfg;sIU%}KT*P?RR55D$?rs)jr_Km6Zd~iAd%gVIE`0dajV%?$7ux?+sUfvg! zZ6}7oGH$lQKVW}+8e!TXB#%X073S1P*k6I2IEl6|_#1X`TBIUJ`-9nV?(x|VFVmt4 zJbbK! zqcs9gZr7$1q6#43pqbv_dJh?GOFWT224IBhs7WU^%vUm#Vf65Bs# za>D*sOtpoZviWnO;DVylsikE_@c9J@=3U6^&&f4sgancJ;6TCm_9tR1<~QD3Kd#V_;yw-LbdpV5{WR!FX3{yg-ee^akRC|v z52#v0cczW3T8D+|pun9;Z18&cvwI*rLMtQ0+O^_UeX_Wr9LNsEAh~?eo*le*_`MN} z5(mp3gedZ^2pV_W|A`azFD@37((3R`59(sGCQ0E0(}&{og6niVa^+l}aY1fbDKmrc z)`MRjTk9K#0%d;M{(K$2QqJM9I*|#UvR0S0|7VQL#j@4kpb6n%h{+X z_op(t$iJ&==H2AnRt(GcSl$Tt=<>z8bzM60&PH=7G#n`L783py^6_qh;y{nGQ&=HT z(fJj3k7tz~%7*iI&Gqn!h!+T^KTUu)1wZ#f((K5_lb*ck3xu*H1ME4h_F>M%Cn{Al zc>XTmwJ>cO6&R>k9ko`2SenoJlI+8~3TmeSi|u;Ddgx!|=}((-1p=VC0LcFtvvY)h zi#7#i&3g@#&*~*kof?@r@7IU*4DxH^2o*)aAULh6jinIfmOtxTO9aDpY?CPvj@Hbc zZ`X1ZQIQ-)-o|SF$dTV-lx$(~ztr3nd9$avht9RHV`KDxy5m!GOvuKMP<-lsh03C%weiU4GGtr?6^upQ>Z{6@VeTRnZRKW#vG?S%!9OAe?|s zmr+I0g=V!P!==1g_p8jJsLxqTNiogDy{#b7BH6t|3G@!UZqEjC%-xjrvCGPdAbgG7 zYi%744h@_FhdlGKHNi{G*{ko4FU3EuvVK_q!tPBKnn|Alj=uP{gl3hqG~0|`Xwi@b zP;$sR3KT1O*qn8!s=yK(5x4H<FIXMp;xu%T=j+WR>Is( zDt~0i1cp4&*(e7Mje=J~gnl`oTwa%?V`+9)2{+rP{E#m8SIIQektu=Cq}ZVb>+8 zw))h;Zt{Eq>dlA4L`SL`=hUOXeX~!4+mci3%H!#cZv4gv2{oWN3B#bbX@Q?;TPP30 zFuHWNW9b;}Q!E|S9e})$H(kT8@2in#Mx7~5)WG(b4%cjeTgt9M2SuJb<@*gcf6+6B qRz7xSTTbs2N2|zi0)aLaGlTYBg%UOZ3Gi$6k4+NbfBBI5-TyyBS!l=r literal 0 HcmV?d00001 diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/_static/image/original.JPEG b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/_static/image/original.JPEG new file mode 100644 index 0000000000000000000000000000000000000000..fa23a029ce8fddc3efa4405d354a612981d8308b GIT binary patch literal 9414 zcmbVxXH-)`*KR0+g(6i{Bq~h^MFpf25djea=~9CR2@r}j2@nz$0qN2eq)RW6-a)$5 zC@pjdy-Gq25R!}UyViIA+#mPeeb(%C&N}y2oMT}4Ph-4ro`LZKBLlFCcfFrLAqP=?gP+ zsO@Vzn7xCeho{$DZ-kF;z{kL#;7=i;F|l#+35j36CS_)Q&(6us%P%M^uc)l5uBokS zZENr7?E2N+GdMInGKw1;pTN&AEG{jttgfvScXs#o4-QF3$NzZI0O&*Wjd9j`G zqW!l9jQ@Di(0ZRWIyU-qx9%~ptLihpbmO>v|NVK+N6{IjEf@Hu3@G05ITqQlDtVO>k9&Cl6EZt{tm93zXZ( z^aD9a`6xNm6iS)oM`Z1sk>+k5yS9%Fo^6{Ev5+pU8QKfke!0sZbK&uiSSS4~J&~xd zHBZjB5Ss%aG+;;82`C;MiV{Ogkpl9j)$ocICn=gbF|(G@N!Jzm{iIGyu4{?iJngA9 zs;Lc`{#;PV{czUw!pl3gkKVXoYc0FhselkFz)_sLuWC7rDATqMsay#joy)NIJ;tp> zb-0e97ao+(>s_#KE)VX&&)3hguR>T;4_gP})xsu&mLypu#_=^!bL`ulax;k98sVoRxYv(k-Hhs3~8&bJ{Aex&?#-I1=?4Rdj9&G z3cmlV^Us%bv9j;ceWur|f~4rU$g+ole^HV4X_X8i1=yv|R~utEH#`*(KH-^u8A{$D z9O5bQ^Rt%`9PQTNu=$aoR3$m5Lx*d@pUkhHYHzWBp#py6H^7MzgEkSNs>>Bf;5_gX zp9m2ZKQ|4YXUKMbP@J#&nrUao>PfV8nJ9zpd)xSTVi#Wgnc6dkzVk&@exL%R;-0ji=-7DZfL7_HKLFRD&TT4+a!xLzLndF+(b=X zwgi!&SgB9|mrZi8w~d@(6`H2>l{vZbYg~OF{j=Y5B6zcrxN`7_G?|Wo&JPqS_hUkp z6qPAo<$sKbC3r@{0!NA~wJj{ZOxuR<5b|EXwzV_q_r#0N-FuOzZ#3clc6QMi`{xpH z0`oYINg9&kFi9Dz;}5r!9B7glgx#W9t}5Ohzmkv1rvgk?*|8YxmQ3dRQOSYK*n{Lh zfQ>6F5J8oav84#r2S3L6rO}0P^!^a=mUc#``!jHg=9Mjr3@NB5I0YGu&MOzWnbfD_ zko9?gLsv;gbx%ZNWy;reOFi?c1`{O3*#h-vLaEf2r_LAGrXU;g^+}tlD}KuZ8%}$w z&L!vak-Mb_Ti`+k*cXdvpuLVptyZ=`S?V#*ASno@RlDFfMb1yV`WR_z@iF-8`Z5!I)xw*NLu2(-mKu6^@eIuX%_F+}c>P?AVN=&s_q?I~d+-%}zJh9Os5q zqJl6fzbRg;K;~wL=_>}&k9ca8BTxv5BI36>obenugv0(m2qf3C2};5+8+uMYVI9HJ zBZ4HkCQdV_!kw|4rlF|rX72CJzXPQEDY;GdnBb1rE-BuejEmSXvJWvBFT17YLtJS| zWv(0vo{(^J&MVY%K*K#84{+Q&pB5t@#;A#vR&{C8gqRr6F*Vbnre?3AxJcHn=MfeM z&IFK}vup?&e>LG?*6=G?o~8EX!6lNoN1k&u@*S;~XL(Oh6SxEJ&L(O*Lw1cx9 zi4S%yv=6T4q_uT3ZQTjhOY#d5h~SY;ST>2D=A<3(<%I;6qkeGJ%D7n*!hC7}?Bs1VgrCW1b53u@_9#N#RLrD*= zTqXBl-dnKS44T$AyCiZX%f?-M;*4~`sa)=IqylL7PPRYcW@L#~mHzYfr$UddxW&9XnV3{nq_>+{eD)91cctcS zBb(1|rI^(1y3kuZDN3>K?GG;~c6jRiYm{3)`BL>Kau-j@LaRd96Jw(tH%s;9M7QgB z#B%+15FhLC{`Ft_Qr$NS6JeOGosW4YDeXv5y9&>w70fe<fT)X1J8I$~>1aYa2P7hCuq{vc?@FZ)F0%Rc>C@>M*V507keEiM z!l<1hs8;u7g8L>?Tlj-ZK=c9hNh)V;wfqh%9-;{cGN!iTvtq^;`4B~*?V!bP0>Etl zMkX9$v^&i`AF(wx~sYE6h*5b68l;xgFw3^HUI>_!n#u9W1Ta z+s}XC)R4LWZMk(IfGa~@Au%b*h`EZVX<++mF%GeKuf}(~`kZMmO+9tEUsznaFn4TP z9P`{TxB-4az`fd+(?q{a?4sOeO^IT=Imqg71n2IW^&~iuc}{Si+yQHDi%W1i;@`n! zdm4+ozrRKr!IzXF~_~|KxTQIr2z@6mZbsIcbk@C$Y5(auQzfvv|9hxtme6QoJsS5BF zdIS}(zsG&H%0_6>f#ko=+)46rgJ$*DQbE8wo+Y8;z^Hvst*pp;kVR(W(6=O7jX%bG zKkvz3ElH^eCvZpyt0_P7Iw%ZXJp7c+C>kuPbngJi6P3S*@gXr2PEFuG=1g+|zc6w? zh8|0N5BHd9#NdT;Z+s3reCLVzrKEw)*8P~ln1779wD_mp+(2`5_M+2PvF8&Txb3ff zRV%$vaXehLiXJHW(i%$G?jR;37IwQk=8&B&B`{Zx*qeYkXxI({uAKzZCeJnDr zenYqw@624o{_ta5rx|0mH8D4EOGf;@sl}aRuk)bP-(H2v;fka_VRRrcrB zcegFg{E^n&1rDiPyzv7Td|X5|{IR)Zzg!iaJmuO>VZ?3e=-@bo)Uz3w*R6K-r>jZP z^_XdZnS6!D_uO5FA8WRy@_Si248y|OuS%AFpU^BuULX9tMg^?@JW{+U3JJLKaqVo= zDOL9mf|{mX6^iP2)Oel}#au(cN8j!4xCRLl&q6l%pMpwLXT~I_xKca>9&`nE_y@7W znS(txy?zJeONR=pmxVh|9vc=b$#4$Q@^5VE#2ndvHTFx|P*SujX7A}uLz*s-UFQXU z$mVLT``dAd#$~(>Cy^zc=^Y2VhJ6hM3H>Rvmxa(5R~xj&?wK)%(p)l967~nQe4*RN zbS8ePEq;24+g5BYLq?)@e?~$jZY}V|W7n~P4HNVX>s7`@rv zf%f=-#A_9~Jfn8Kx8t8WfA5m>2cBgA}-M$6UNU2oH>*>6BfH#YGu-Oni|mMHqS zNP>y$OZ(u6A#ITh-f1K*b zNtw9PEx&MzOd@wh;#PdDyv+P4O4%oJ7QA0Fi9e`tO;OWIh3}zR;iEw>&n+X-SLX8D znD$ovO$qwi4p~u0F!dKC&d_5U@ZLZu<=r>lC6A+pD>|ZEd_JCTeDDOKb@qV_v_8Eg z_PKvF>@B<>B@AbnknM%iHg00sSC_c1DeUBV%(FRmLD(%b?xVSUVL|)xlmSKrd1D!& zWM@;Sy1cNwk9kH&i@N0}V^Tv$#LTOJx=h-acXif0_NOG6HJPtKhh?-g(0r}q3!||Q z2aN%W@5u0wCSv8cyTKM2-+YJ7UE4{S)-FH08hvVfIzfgdscd=~Yll~~9XM{f9pOag zBfQv!PFKkrF{2a_^(86su>oaxKhlcm@>U|#MWyohMo--E{DCr=8^Xw)q&SXzo@vMD ziZ)+GN+BhGKiDKD9Suy*YxhgJd^z1V-=_jTqWG_U1bqbuHD@9vu#WS?Hsb^(XuOz` zWsI^Qf9p_d0Qq*Bt-*`(x?BrStWa}CmcE%KIS8AQSskNCi0VnQ0h~@W-O_K#Ta56z zT@{y^;iuv|MRte#%wK-$%DpBBP&IHP9enRLZ9E z;xq%J()qg0^rNHooqGd}UuQ=fVF&3WBhOkOU^R&Jh62$dBXN5JGLXvu`(P|z3pk>5 zE-X-t>89_w=FkDDW#B+@*Zcu+XN=;tJW9EF4m^4>zCp7BS%#`4?X+t38u^n;?<(tJ zI|DVPP?ax{o^%TXr|@nl(J+??9LRFz)>(eXgEQ!jtK;<&9fH*(JUYP+b>{_7;nPhQ zr%p?r?0Cd(K=_bgBJb`x&3)g<{k`oLE9b4g$1w`tm_H;AVYGSpRykN~p~y(&4=Q*T zO?z1);Vyb9t(a?oOh>%xFUC#<>|jSjPa`pZm*>%nuS7jYoEBv?T=_9F3&88!w$(vu z^+0jI2ibiuFqcm=jJ~sbkb#5s9!c)0hSHIn@AF?}d300)xs9 zRG~*3(mIaYCT89@1C_yg#7jI3g5VT1r<(@LcPaCjYQw`RE*`xL9Amkpuim$vWtAC= zVf{q*Q>mpXO678ZWqe&9TEVcmN^-o7*-Z zf+;ZeYAo5hau2+T^$?uWKZi;;-EQK2nqbze5ZTp1%r-Yj-&ZO8?R86lZn@Y#SKy6Q zQuZ8>ZhHM~*zLp?+WKTj;FZ>!OV-WM4&sb)&z|Th`A}Fmw^#uBcrc%m3k030i`QNS4&tB zjvr?^DWc?+lY7<&Dy>bf=e-m@j(#CP_VbJP*>}Qa+}+lLPi9jb>~T5p;c&jn)tBy- z23mY)J+kQ;jyAEkxcL;G)Aixn*K3QoqF)s$1lVKDM(&>|uj)t?#oUJpyv3A}uV!{I zH-Kw_Zhn5In@Ym5nyflcyB3jFDN4`2x%hSc!} zZ-v0|%-co{7%$~_TUCGMW=3pp(b&I!Pol7{S$BE<5w%U)1&4E&&a{(Omexq7zj2ub zOS{i-f(2{y`ot?8S}I+6fl9BQ_6$`bH5kYyxGfqdXBh)dbnnyMJek-_PU3^jiKJ>l zy{jS?`6r|L5r%E*H(J*lU?}i4+q&O+3nf-wEfvUp^KEMYuQF0#CyIY0fj`kOr{>o=UYd3v<1{!#+!AG?q}aat$@--R;xB#m zI>`*pZbF()rIjYOE)UzhpBX#-Gi=&DTPFJf8?tX}hW~Z{@ai@suet274K9AM^WIWm ztFLKT2QNed_#LlnVo%0I)Vx)^RL9AH26nDxUK7Y8tV*E2_U@HvVPK z^5o)TgT;d~;@waN8BzyIsoz6kUkZz5RNw} z-u+H(%NSKcyLrI3ap7kxgxmV%_f`_(~a$J@awAn2V}S&tf~la<5yY}P8U-BiGY z^@UebtYU{-5D`-E^O)??6QvUU+F>T^!SyNR!jNfAoGFN}xllq|<(vx5C z!U712Y**N=S7{<$$oDyZ@h^c7oTjEk#;|~wQn+@$oHSU5-%b?y1ws?3Iz5vt+KV7SMF(QPbRE)wjwBsAJXutkvHjR_eB#N zw0qU@*khri&s#^z#!F@=D)KLrd9SlFXs6^(4~L5hSoha1qEqC7l{4}qK-s7cXZF4g zC;I^N!4$pmTqVB-n0BW2I=(J77&}%NFXHtU-!Pwyhc@LQ9vo;>0c<{s<-g|3W#C&q zs;;BCBt7A;3+HasP`q5v=fBJuTtz0pld~D^P#-8QDm(!v8Z%n225cE~8&L^Jsi28g z&KI$C&L|&L`ytzSzvCEPG{hUB-4Sx}ri6ki5gRgAFd`LVXaNj`k&bYABUtDUHXWX+ zS}sScOM3NDtN@wr)h3CkZu%PXh7Qt?xnQww&UEFALKHKod^tbY!$X%~om3oG1&nu8 z&oVa?d3m0v!~hs>QWbD6mRK;6*jZ?%|KZoSW7AR#-j!#YA9XR$i8qSB*|BS`Si2ET zP0ELh!jZtZ!y+s6{G+KJD?8x(61Daw8M93JL~w-1>o0W`ZiG0$6t}^yFfyFb7{8YI zU~WZb7uF6AFiA>Q%#PHPn{WC~JjlPX=Azo~W~$&RE6>0se}LJ;wS>cPB*!>dVfCti zkeP_+c~VqoT6y+xS-slpy(9RC;byyOc%O4Z7v>PYca~{e4ytdam5H3mx}%S&02b#L zF;0dQ5@zI{nYc*Krf+niq6<-BvLzAUhyZ*JRJu*Z1s^wc9 z06IN)F1|h+p)#X%u2O%`e5G|UZf)RzQNJA9aEm~GVcwa^bXgfxGgzSo6$Pv+gYrrV zKh6U_p=c|Lk@6KQy%=}t5jiv7L2p+zA?VG{J#l?&Fg1lw}gc9q7$?g97h61B%~U%E+U zVpC0&>@^WXW8PaLSJ0KkQwJM>-I{GHY>FgPM`EbaX#To z%mZ(CjS|!<=#EU)32qW;9gY;W1^$%W(UkAxdqn2`35K${R0PJJq(eZ#ujieK8WV^W z=T|!yV_N%Ve($(>37az=M;(}td>>LTzssL%eq60eJkrS>I8%3%2{Y1vpkci{q73T3 zInndA4|&j|&7ou+JWf*~E(xsAcSmb<_*m^JDXts9=R;mQoaMOQO9fb?6$q1hSNnM( z8(?$KR9P601KOM%S!F6LXzTtcCCa#-lz2;+J#8Rgi+Bsm=SBrI74~Km-TBH8_MLng_>Jf9H?Q!i%zXUXY~DKB@b>B(;Zt~z3+2}R9Hs5i(h@Rimz{|U z(1lO*)Hr5aKJ%2ROj5q0Pn2&EK1?g-md$*0f4<@58Ii;4G96AW*HPjpa)c54thJwK@>V~ANNKAG>J;m(!4}>j(3h;^60xCO>mhrH7SmtDBA018AF4y<# z-uS&hw^JmQ>*^5=fxd^-LhCf+5xFfntM}MY@hd8~N%p%v?QJbofL&8`!|%uGlueXz za2`V5RrBe0?@x-dnyB3!W|rp0hRJbJ9HBg*>%H$lvbX(Ofbfco$6t z^a?Ial;#I!>(6xNe=kDVMG*`-hYP!NSroTQLXfgK4%{``>ITY%gOcfy6AY&ldwV4u z9p0gICGdnQ_BeYj0Rsp1%)m$tC%dy+IM94eI*LsMa1_6MC8@MBI>cpPC3%7Ea#!*( zZYL^KNpe4QuqYFGAj^F9=}K{RXuI2I+f%s8Lg(B)2FPf`u`3T_c!1=FnJJ&p^sUpg zk^-xJ(=BgEGeVRbmRlWBy_dXS)M(i8Y%W>?eQnOP{Ue{?sbk||NKo5XDH0BKo&T^( zCXksW*nk*T$i)F1$eSeP=YGbdq;=Nln6?Y_xCw7)r#miNd~@A~WD=U0T1$jvt#lTG z3}96xQHoY;%>MM6hb*1U5*5HIDfPpyk^f1eoFk zV*X-T%vKq$sWTkzhx#4a@l~GB~k(BKE*5z+tQig+)G%P zb>8%O^gmCPzxspOTkVa=bgk3ea@pUH0dnT!C!8g|-Z{P5QZSK;3KnPxP9W~l&kSC#1)@1L2O)d8@|Ptk)z75!$#Ojv zaNWV+COUDnp}IYxTs;=9Cw8wb0RRG`G)5b%AyP-g}f3-=KcM@@QcfBQ;zJ zI?<9tft1Owm4sO(&x*#cUFY>!t#;aXv(*eoX50Tr^1UIi#~+5`FM7NIE9{&^@j;8I zfMQ!m6@~bnd|B&nxOlbTf_zEOQ67)qUXom$Lzl}}h{Ztq4Jpf+t>=3E)~Y#>^}2-b zv?{6?2ND>sqv0^96?-#!qS+L$3tWte)gWfs`JvkkrRW8jeW`#Tr|?o*)59gvIbQf0mHy8UGRCn{u3puZoatt z=%skh+4pah6l^S^hn}Xb3sk_&cLO_#QoG+RH8?pt!C!kan|0f5{RcHC;5AFL>Z$kv!(_HVORa7U5e_}-BE%+BIj?@L%~n? zFOXUJV^jvv+eukogz3LDz}MizexP}VbAN^>61#w9aWMq}9f{jlR>j$DO!qLAWKDw6 ztVl<1GTdm0Bufd zAGp2!j5%;PO8_9DvqM}9zYHqiW*hc|YaUwG$0BSJ-k4NNys;9z9dKFDH&L79pSNz1 z!FsD3^JDTrmLoO=)O-laCRA_wR@J*@?@w|6qathPtn7a5L=fkp9CVV$|c3oL>Cns#Q)iS>UE=%Dk;|`x$J26m#&>o99T<)M#SFw_DuKZj<3=D zihap+)0JrKIGQ*SLAiF9kvo@il*v;k;=*{X?j3W#ZX!O|d^MZoewHOAf++Y*zuS?JAdbe;2V`{d~?tpU|PY0!tTMs(^Y2VH6$=1uegk3+=Z5B}@eb`epk{p6=?n22Yr(Mx88&f(8>r8=G!5e*B+z`r3pA zP3Uj*thD~~Qe!IM%VSo;kcM#e>8>u2$cGPePW}C)jV+oeHv(&GAr{^FBOD<;t)(P@m|H-Ymd${ WZr4)FnmVWHfKjn=|GE}+{C@%2J?EYP literal 0 HcmV?d00001 diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/_static/image/pipeline.JPEG b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/_static/image/pipeline.JPEG new file mode 100644 index 0000000000000000000000000000000000000000..aa7df13b11150ec1428ce37955e3097e66d3eb4a GIT binary patch literal 19054 zcmbTdWmH>F)IORPiWf?8O^Z9li>5$<;-wUqphbea1ZZ)mK!Fx5?xYj~MT5H(cemmW z0Ydoa_r7c0`{jPP_fFQCm8>&4+5607?`J>roQK(mRlqY9MP)_6qelS1Bg_l%umF$; zJbv_F`>%ieU&F@wuYZD#jfIW#1PAAT4qSXZ99(={92`7CJbZ%x8s;4lApy~UAO5?@ z|9meF##TvKEfh>^w0$WV(fhKKjvYK{onTJ zF&6d{99)bUgqQ*FXBhLburNkH!B~tL9e_Cxz$Sh2{H1^_4w;rYE|V*{;FrW)JZ8D- zc8U*UCoDo1Zo&8jlvLC-w5)9G9GqOjBBEli#UyndHDr}Ma3nhWxs1`;dS+Y8X7w~ySjUN z`}zmQCnl$+XJ+T-*VZ>Sx3+iw?e3kPonKsDT_bO9|MTk+0PBCuU z;Qr^=qsQKu1B(><$x8v8=dxP3=B{K+f?x2+#(A9$_XAixdC?+@oM4{7CWN$M1jNM>aFK77gTCAz_~^ zu{|FEQJ)HX&tZuOha8~K%(^p*f_weAQvh|HvghO9eP~UY!y2E$I>}{W zTc8(u^Tbg2Z1xACGp;xiu|CrVoFBYuz0VX0qCrZod(~8QuHH4Ze7IJg1RDJr(lvCr z)_%wLd|s7I>`G}_@*8`v{{b|-&^G#j+q9J^;CXaeEWj&_v{FkvLLh%6*?Bi3uCe|AR3I)UI zv)R%~$Wg+llr3vdEjz{Z8ac!FHGZ`=fNKr+C5*i_fj_J)ZHL}d8tA^;=8jvDzW+LX zh9bJzH|)=tUY3{^{$Zp$;S?Q^q`wt4u0}memLzad2){3Z2k;xGbx&st2i2Kw6*WML zc$7DHPB%_qsZx@wAPS%SwV5a>Z>la6t^p8nK&(t$>3$J|{KvbuMWBn<#Ol3iac5?6 zk+M4?o&9T~lTg{BU5Wa5-eyS~f3Ip2h+$i!zY#0@4{Ia+qVFYpZ(W=GILqU;BPum# zr)%?W^{t{zLzb{zz*M@UQqM@Q573Uyk_&X>xH;0Ti>Qtq3;6#`^SArtvWY2xf@IdrOJm$ z@JFHy^5&6x?4i6S__lvTMbi?SKl#4o)A<_a7JZ|=iV7jPDH+#t z_DcAbzmA|-+jAfHSVxt1^S51wBPVjJ2kj~6*x8CSWBf!0^C}N>2h8Fh0EVXzfX|2% zjYG{Y%ny*XlZ1ikWW@i~X`X8Kpr8Mqg3{!!EG0bCw{4v2Mx^+$Bn?+s+Q75L6V=Aw z<@x-`Z8wvu*+3lho>Hy8Iq4~7?*`}73Wp~|YOe_2rbV~1E$m5{Ch;8W&MRBh*NnZ)wzlGV05ChLx%^J7 zy%YYYn8K>TzC*n@NcP>bII7~9vu;J*koCkv@75{D`DeS8ywO7F)27d|yE%sc&^=eB zA;Q__?bceEdJHA#?m$V+c<^%>1_PAm2_oVmC_Ip?`V<%`rCPP$nAM#;ySTWk8^ywl zlU^@aqh4wi!AZr!$vFR9GBcZYnq>47t{)qSvnJ>yt z?00_>1+UBCDwnQ4w>h(T0H{Xn&`1Ul$!I4&0H`Qb(@pE7sa=D}CQ9`;pFTF(kN)5L)7L*gK0PO#%IvkDb;0CP5SrfYmI_3MlU6 zpDS0oFV*YHnQx_;`t&13o)=!um=1h4*N4YyRfQ} zb*G9loNT4_+21&<4>tdG;)JMF@Ges-QL{M|f%&pwdwXu#HB&)}PCI>Bso5@&W&G#KPW%$+hEv6wQ!I@s z-k(#{QW}LH*tJlqe*+-x!p@&(=4aXwQ6TbKV9dPJ?`7;shr=WWNv+Al&Bprl-4{m4 z{67;@pX%N2nMl_4FhqW>zy}N09BVkYmY*Vtoo<S{HYXe9d(E*H+gwv74#jHLC@=?og*owr+jj~2JD7~F@jDb+Jd6^-5OUy zn)O@@?Wgb3J)+lf1o}wtsy0~|H?l8>qX%~)^0o`zMRZ%lhWrByPj%_&Uh`F&Fe$?0 zOmO8iSw8)F>fEh|=JHpUJ8F+r`R!{P>Gp2tvvirGcgO7k>?ypKZ-t-p$_sU?>w9t> zE8O&Y>vvs}^0OSiSKoV%zhy&4qgCClqr03xaiioWd4zpwbo9)0BPn|sQ|y6}uE?s6 zrynE>6vGvwKQ4>Av~Z`uPqGW$z1^KX$@gEH%zCa8sPnhg(|&Yj%^xx~TH{pDbP-5Y z?nw6L-%o~T4IdLj-=xoctWViYl-ed3ZNj1(_Uk>cE}L5kTwcNB)5gH78WkX)BFd)Y(n;9D(XW$-;Bc!SxX~9Y}Iv`%Gx0Hpuo^aT5M;O%prWTCk1jd7G zK_NLc(hq=Wk1S37>>bTNag_ejawFRled+j*RgyStH&fXF`LRhAGP^IpY9pT3P6SoaW0GLoBD`l`KZ;xfPGiEe0s2-)fwxrYn z{(Tj-b_kY_>G-9-F3qi!RRxc1mNaLG&!CuJG*QL+8O74AmTxT`haTS^M{kox)U{u2)SrRRTO@{iJ8x3HD#LEuT(Z zETAVO)LZ@pFDO4>by17!4D@7}IpikJaeP55*rXX;><=ww>}l0C*b-?1M=iZxd)Xud zYP`<~vfVz}iES3jMfdrcd^6=~s!v^BwX4w-i}*m3u0@<9d`-ggZmqdqu(smgPs$Pf zn`umh$KJ<4fef4479_ap74G%}K*>0}VG@^%uJg|?Ew#hm2);wYz z6tM%J?8#X-O7=_Jd7t4_z+TsI_fe}qL)>wLP6e++D)J^B81evcOu#d;qva0?1lSpV z%E;dj6qphY;G6E)%Bz=Q9y=6{>3fuPnw1y6Jir#>PGvw?oABzT(l`~Uwg}EzbJZJe(%bM6T;*2QmdMRA927CO+t`)ZWXMnX84BIgSH z*BR<~0@FXaT#nn293*@I)a^a9je9n6;LIF<&sz}2TwnjWQJ(&g9Iw?IJM;K4iuk?| zDo_p;`-A*)Ct&1L^9~7K8#Yq^TPtm;rc~GJ-{5?dmww46*z59ZX~j2N|1I%X9$#HH ztxGKQU&a@I!k9%xBZr5a#DX#R*>Uvn(YlwLRcolD8U@x1v~K}29%6-neRnrA9Y*8s zR3cDhHXBo2t!_}zv=MGe7zkN*AoCImM4qpL!%eZqTG^mv;dh-60OF&g;kfuhiY~1^ zh_^@m^G=TwcML#g$MtAiZ$J%!VwR#6Rmzu+)0zn*%BH&f>~xy9wBEoNH;i| zy-$Gc%ghno%RG}(>JXeG@ z%elapLG3DmAgBh?+W!R6jd%nF+T543MX|NOcBl2CO6)ZY<_2v(S=l*lhQE6GBZNiZ zZR{IKS&GdE01nFaRC0BvyAREW6l+Uu%#Tr*8`)Q#8ds`ou4(<9J#aJ+X4Bs!WNqo{ zGZa%YnbZC7h~ByieoAGWu76-=l#Fd*(465S2P>6^$ak!eIq_anm>3FSal10v(Co(2 zZ+_@p|8?{swyv72@PnW_GX~s-(ynyhbFU&|pqy(fv#K3b^GaO+# zLnCg8IX%bt<8fctRCjv+?_nRhR{bpgd_(TLOMNgHlpyyWO3b&~*@J-9ki$47yTyCj z-OoE7#YGckdWr1J4BM}BN;n$$26bi$XA@`Md`oa|nm|cu+&PkIW&qE|m}$V{rK#^1WIAo(1d= zdQzP;>=jbh+8&Dl0Z6j<&j;DVcy$aYWIqTcB6W>17erw%JgtkdXLa-TY%|>SL_{`D zO_rszN1BCmthbXX5fR5@aX1YpA;I zmw0j1o4=A{Zv*BJK~|X3fbrP`!X6ghSecRQ?;M#fTzc;28t{l>XBtdh+oUc^4eJp2 zH2BE9cg-Z?Apf+f-5XTL-^t51e|()5M0)XVN-T*Y{pw}x^-cASo&rvm{{EEbxgrJU zY6HW1>rwFq10iLYeyp_;`JTgz$Zuqs9NSso!fa~2of#Cpt3cg-ER>&*n51jr%`67i zzt~5o+!BIwa!y=Bb3v9c{G>;{U3aphnm0ImxH$vO&ASWS<^KkYGSc6nR2wD@$Lb%`8VW-jm`07{k^^fd+VXs}cx)`2Fd}e2di^A~n zy`w$xB6orft}oTm^+Bb*&e+L3rQtW;oD?`2KIuQ6D{*}dp^Ms=b8i_bUH#X&Hknny z)|w>Cu<(hPZU^O4!bnjk?@GTFO^<4OZrjq@`l~*Ejp?I-)%?M`d;8SFvYwy~vqQ1lonB?Dd@#yGo?5Kk99( z^P47Y`VdE~iL^Ave27pxw^G>TXg5#L8a7uwk?+cRCiSruI3^{$zIV1m%Fp_ZQP6#1 zapvzz4XmSFlN6%0xUc8%@|+-Y<8BEu3-!Evg)Hv)T^xlbMgCNBe@V#mygL{m-X?M> zI8-`e40GS-wE8GOo}$c~+L%KFHM&m<jtEf>LdU~rMu2cRv#BU>{Qct=} z420Iyz+UY1XSPV&dwm-bk8@i0qMPg@{Sn%j?JSYICe&f>u#fy2JmPb+v@=c^lS++I zL2@rqXsDUwx%Tjr{hyRWM?+m)5r?Lor}draXD76Ft@wX8I%O^L*37YZmN#dZA#1~A z`V&(7F)X1)cs^Tzb1-XkDSM>TEs!%~)5B#hPUThlH5mNX7k(HID;}mO`?o3e+f6H8 zK|if)CY!MV5+KJ(MNMZQG&pEt9S{z)dJ|{fH5Y(gheE7M+!24goy3uKwp_M7H?d=>2>tA0>$yQup}{#&z>v;i+bWtmeqp$@=RPp> zifV-2_IkT&aNU{j-*=Ipq~A;9GLj9h8=uzFMm3nQS!emq(JLh$cWceS*2lZVy#1sS zZw6d?WZpA|*A+{6n&egRFT=flz}y+jOH`6Nz45h@sD^gZFCGB1mddAOfr~&K{l)ez zg;UGUAK0jcAUJOv^k`ltzjUdaF{|PrNs$g~fIKrccBUbyUE2IqCg#o!#aJ^5t6Tw5 zN{PZ+8W3JXHAWveeIzhL0{o+(k8ih3(AW zG3ujzWo$0CQynD+)q{S`YBc@G*gkj-8)6l$lh!xOrSOYC*gEo{%ji`O`x{5$rZXaR zD8I1SE4VF^@QcE-i#2P#K3Xw_D5%+b!>%-U(jpb~%ftYG?hQAGgJWBXd-7uccu{t! zDEjfF)0kQn4dIbPy_Oo7Z8b*(+qvItNjwQm$gVfp@4uD0I>A{Bu}RN|*YoO4=wc;~9eo)pY61)JAFM?veV4y>A&WPS=Zp4Q2%+ zYy-(20PQ@8PauFg!_sIlsP@S5BW3E%8NJ^(dcB;$1;mLa;>qZmiKaf%WUWaR`_IHy zyiaBRJW&`vHA9o$#NRc;$p?V%hu@9=;KH5joMTnrbWo!M33K(jF^cI!CL`FIFZI6d zxFuuam4r^*aBFL^_UY0<$DFN+B@rT#sa5?Xyv27dz+3yId&o-4&77h!pW6(R+iyqP zXjei|0Vh}U_@b-_cAcp^Urcqyh<0^B+5@iS`GXpUi`hmi)OLX{xSh)l_jIXf82zmo zlqQPzO^r^$J&kX{&m3PiG})19vbm2ao=y}uRPC%6qC?iePic5XbX7x$JFr}er~j;k z0IMGW-J0{oowTC~GV}|qlYW-JNyMX@EAm|z;#;`0v!56Gn7+)~9&3{=Qx-;zzCKmh z>W#iMfGSB}5RDIr1}j^G<@LQ=U(Vsq9{G=rE7f$tP`D4 zFIsr2tQg$5ft_isEC_hgQ*XuK0ZV2M+jyPdJ|Y`4of4kDpGF6`^}2K`c33O2VQ#99 z5bY^ee|;tvVqo#vu(J8uok7(H8p}iz^0ZA$*mT8iBSY&CLGaCfQ+}Bki*8l^8j|m7 zw9p=hW0S_EbDOP3gH+<5cBtoA4X}G!>qx2gof>DV`6N1}uh&zw*mGM+n6i}a`LflW z5-`k5hG}9XrLgwRg~q)^uG4*@KJ^5WIR8dUnTY*>OH|^4H8n7 zLUr@!-I!aKAq{+Qq6i6dVV)hxP&vAi-WLVa&-#qtS#N*e$5dg1azDSsM?@!`ufFw4 z=a{&RRUC3}P=4*KQiXmz4dpHB;gS9Gk(F+c{(QW#wD-x#H`8LG)*6aI-lv4BL3nlf z1{S>2+c`$jTMXn>=Y$p5UOz!*+4bIc!uBr+ z5Hg_EjqM*G3)oX(Vx5GprZ=zr9{@HhGbvfMzZ>fhhB-yciUPU1y-Svs#Ad-(pVWlG zah_M{Z0Ym7!|2=!rER%9ky-+=zAhUdnYrFI_9fFtWBDK~}y*H_+Mh&H0*W@ceq|5y7+L)Ia4B3eV#hD@VS^+H%m(wQiuCuei; zN8mZpGwE^_M;UH>k~FWQ;}<$##4Si30H!qt7vGbF$SpWFSWW~?m+lCll4Gr}(PPW> zCyTna(<}A{{4`%l>x7zeEKTFF2qmWS37y9823@uEqAu%E&wJ7MrS7o%j15H3Jv|}b zqI$lap506~tBYT3e9En3(Z8_;rfzFzdabv)TF=bPh+j!_rG4h9^r&Rw<>SczSoh}Z z!Fw6437~AZ!;(gcXb-M2rO0g+6|T|~r8(`{t#1uk1HaWb7txyzd@?SQGa{!DwYrv* zk~qu$I~8Z`zooB1BlSyae%}qONJeqWp=$MBCoP?d4IEl&vslS`5KuCiqd`@~dCc7> zq?~hjQ0-3i#cOj&Q1$+K#b=T=-bB=X(qA+evZ>>|bixx?XOE=5FhC1a3}+y%=EV*b z%6yUtS>vCY;uU9oGovAh&4E#zu6LE1CAP9yx!@c@Ho5OotTh{M6(pb+oF64Bo!4Pe zXwG5hwU0K>)nA>1ft}fq%NaD@&Z_mvOgp}=#c zJ$N=*aI_--?Y;`bRA&H*g6tsWaef_mwLzrVW@)GF{d^MmxN-seXZu%$_X#q5=%GL^ z^x$b-G;{4&GUosy@o?9<1T*!(Bjf!j$VQaM9Qotaa%RB?01Z2E6f7<@aw$9DeGC;Z>{coto0?izu9m+!L;ilRv}Oz~_k#eJ8i5$Bn@x_=Un@=f&} z)K@A!IG=3Z8uk)}i!*&iwKvYRpqGIs8bI>E!9bw^W#I;XsJQn_2(N|=G}}i>24Q%z zFFdyV(#HQTnbtKvg5qXc2p^1Vl$yH~7HWKCt(j{trmITa@H+jY`({6osQk!Icvw2F zODmvCIs281wRpFqRBGxKUR_m^)Xy7@2r2I+eYR6d%9?R!Heb2>Gh*%^WofnXyIii* zYgOfTS2=dRfm6aq?hd{m84-qqb*<6o>R|>f;R@eV%-cM#OHsogdgfmj*Dsr#BD`F) zk%xwfb_8|tB6?B0>)Qg`fKffXiP_OqZD^(4wre)uufII3=(!{-WxfUuZ7E>dt)BVa zbL6N}#?>~G+m7KEh5@B)mZ*N6K>t)@fg+|vwO}!8L!NHzb7*3w*>(%n75B+{>L>#J_wy0fzR7oAIT4F=3|Z^zd4j&502LuM%;nL2#UI!g>AebJ zQzLI+BM$&1CPV~TzzDcUEI}u=j-n3$_ygei(%X#-*x3I9x76p#Ad4Kl8%P#{{NTJ- z6Pdcld^?ktS#UPc8f&k@hsmUu+b!liIaZ;;akaRPvlOQLMpI67o)_Fqin;RlcAj8z zk=o(|QMNZm&n4+M_O+xB&f8c7jaC^gw87uiOD@=dD5v}BOx4ino_f8+BFLZps5fY1 zj;*)Qro}h)$YAdc{g>YF4tc~oHBX8+-t}E zxU*h;`{wA@p_y;|ub$HJWzxFR(1(yPimMzHsnOa}7x=kn0>dI#{j8%!40B#>+?QJ{ zFi~fbXk!m~yJn_NCh5;r z>pjwLl3!28@BDi!=4QufS=!T`n@Hss|J}SDwQuU*m!d1KpCkML@Q>c+IbKhdOu$`Q zw5encEtyWnW7?!=qA;V$=YEO|0y(%)@u$SF|sC z=0`M_sNqa_arAhQfGN(0J|%kBmFU40GDW?ir6j=uzZvG!w6*Q(bxmTGW$aUio|y91 zC-4LQYu5$bV3G-uw5H)ttb7`FB85LrVJHkM>L0VtA0o^j`D*qiRQ?Jw925RT@KHN# za0`jQ9^*_Qomqk~i+9Kf6W^GixSd(oB!{YbPNMMwC%ko|ij9rld8i@}@dP#FWdfGa zDW{byolAy0-P}Lf(t98oIc&=u@8f12ysu!)(42&JTro($>?jN? zy>p)pYh{J1!IQO4ZcM0@V%3Fz$1}=a>)p=m>3un5Hc?sYGGlFw%+sL;Jc>R&{t`%n zbiLz%GSxU!?+Mr?@|LNWm>35?O6jGGq_-??@dA^8Y>&eXD}Q8k-ge#ACab6vV^HL)NImTScKX(@?A`!~B4{VN|fv;#W?twyWVF5Q5Z0-M>vnWKy_NWy;IY zw56%%{*B_x@>^!AJhV~d_BSa3#kSK$V(wEhG(pJftQ6MW2T1B}VX|GxH$+&9i@*ya zS7Z{1;Kr1zv!{rWf(fpqz}>d(jtoQEIhm2h-=I9F+5N~ zI;bY(=?69~EIltW8spdZc8kS*K`!0o0XST zrI21Dq;JyXx9_seU}AgO^O3*9teZ1}%&Yisy--*;t3lJ zyWAl`?4d%xoBy3&3A!uB-S^HJN>|sMeC-0&{A=>8w>HTW&+pviwe_$mWBK`8t|;&l zsMr?0x&h~9+OZuqc2@;g(&vJ6Kqszkq3E7Ka>x%aCa7lZCw2r%6$8d-5mJcN%}`&2 z#R|SbnX?_5%YX;+0B|{!Ypys(D|iM_Pi%REo_jKgssCw69a$AONcdhPI{)}?vffWO ztVniS-k$EISA$bC0o>Qd%egS(V4&^id+lFsB}zG>QJcndA&l>cak+#2X11=Q{o_Bl z{Zp`K&e1afcyJH%!}(Y z6KVdU;@h+&RLbJu-gxZ89uD*Op_0Rbg97?(YNEKZ|3xAtX#o+$>|OWCLh^Q;&b;u` zY&lN)na!&xSbv$3KpG3 zbrAkp#6Rq%(`S6ccppStbAm7#Z+^KEV7bO0y>PJRwAnOns9o>j+^?1Whh^%R(uW4h z4U4nXNJ$^rv^f;r{ga=#NW6S`%dnvMWSFk|)o2M`Xms1*h zW8r8p)cEfb=UG)pts6GCXsG5{}OgJ~xm8$-kbR zwOp59xr!9w+AI*W1n<5nk5lP&5Yn#zU^`0bXkGDpj zwk*R-{yhLrU(V99J*meeIYvZ^PjuY)H1}pDw1x=3X&HVLZ^wLuAn9mEWtfmd09!D62+UK3$A;%>Bvdurb)}F_QWpTyNHtz@N*0$ z;D!=|IG;4N`z`X;A6zHD(XV?Y%8B;e*!kE}^r(D6b|OLe8YJNPlxx|iySt*Fjwj5k zdhdytW`bJ%*Tt6Wfr6us6YkfBdt^(#uF2=UUqjE6^$w-@DRYXHpKKMJEe(LYA`3oC zs|Pxia+Diil*(xt68*`Z_F+w3fL5n6T0YwSd(WBnVc!1g=mFM!VX+O7@y2J7$mfe`|=g_NPD= z{!cYVuUe42C8h;QyuICy*f7hHLVNf4saD2}tFcRPIxp+MQo`gtQ@{{4@V znY1jUJHE=hS0LhrkEc%uNq%bvuab>A=*i}*a_#;$s_iH16JiNHTT$Uby~s94r0)#+ zKC(m{C1_5N59&J|y-Z(#@^mOXnkf7b8rI#6dbJK5%J9OdzZt&hfbd{LE=EX#^TAD#!lJAR@Wgcw1!1sA| zg>=G))ic=RzZaX+k7DQZ6biSrT^Gw*#J3SjL93x?K~?9-R+f`>D|uIoiXUh+Ooe<5WKJ(dE`eOJ1P=h$x{szw=A{iP zm?isDdL&?S+izF4(4V)|@hRglLgt=@gpNVD*Bgg!%r7Wjy(LXBM&b8@vT}Lea26)ZJD<= zO&1i5YIGwy|7}m@r5{G5k@4Yy<+v07Le&E{K(sNAg6*`gHKr189Vw>m{etZoU(70+ zRY!6x{B2>1LaCO|BP~1K_Uo(pQ?PjqSwTYd9xz6vGbXP}Aj39xp-;SR0t!vr)|lh> zn5z4fu9f$=r%>_?*`ebwxzS)u&;r%Ci*Bd z`(?xc`(0H<@dH2>R(XKCAf>{p zua)S7oee)vM_;bwN7zhBy$-^}HvdWd6CUB!R9#QkZ;pTtuA*Q8N zw9zohiJIk$*vPvHAl}lckpC zLWWk>c?#?O#A18)TfLMlr3{|3=I`U%ULabtiI+{ySt9Bt>P>B?c-4K(v4P(fDph4C zDtT+OXxlqeaqN_jr_1p89h_g(P5$lk!q*!By1r?DddUom)0z}{SI}hZkoeY;hvqyp z9W^*7!aK~2r(oZIsqPSSbDTXa>)9hPfd#zx6K>kilA{s6`nH;oNbY@Ima1D(N5${& zQq|>>pM<|_C_$@fCSdpMoc`&-VDfm2F(a{lgCZd$V`3}TBwkQ6M4}Tp5JZlQ=n}+s zk!q5Zk$4n0H@XWp3Tmfh0#orL!)=pYi+Vr8F_q~DP?L=Ek@%*@Np8j`*+ z>Rz8m4$#WK(O2NvoU6eT#DUO8IU+KU;{>vIHAA_qK0c>{eNwfmTGGq@xb^yp>1zWH z_F_xg+C{j{q4Sqkidi2&G_SVCl!$J+WI*2hdO)92vDEZPL#b_Jtu>rZ58hly2W=Cq z1V4ph@=ZtrXf=mXXmWLoY1(t2aOdmK`lj*OXRd$g&u{c<qwy_>!%NWMGVgK+6b`^4Od`P1oi=yuxaiXzk>$n`4%Vc` zA(`|SRiTFseTNpU)foo?jx%-7IA!Lr$p)*=c6hmabb##0^{kS5>Ucs7MjO7_?QyH$ukRLcIh)s2UZ z>YBeeR!i?UfN2QD#slC)D~@pieE(Gn;v96Aw`+Zd>(cfBc*VU~%dPOMj&uneti%h) z8=aVL9=)2I4rJV2sp*aOIHvMR8o_QE-;C!dHY;3kdV?ixXGxn%>Ae@GLi}M8MfSUP z;yAY{ep_wrO&y)0CPlZsRt)o1q{rV;dLBF}0W*KSp!IU*siFt9v>99 z=$1!D|NQn<7w}2s1!SpH%$?==Poa?G;T%b5VSd}7 zv3#uK2II0!ugXQvvvYcp%!ayZ#TA2WUyRFC&wQ8l<8Vi5uyQwk!gTM$rBo|0?7e0O zFY$z$i#0Oqr?M;j3sGFj{P~2)w&6our#y!_M&0%Cu|Rv%i9^tI})EB zZp1>u>f*dtj4S=ldU1qhQ{wkSg87tRt6q#LS=6wK?u$m$m_UX`?h5Et zxK^ys385Obgl9XrcarZx;~0U&*_J0b@jWu3YcPmZy>vz6H)WKQjLCtE%0-X;5_Dx)6SJWX?no*}Dwy_}4M z++CzjrD_*v(Ml&z>0O=x^Gb+Fp zG%Xw4*)~+9U$DF8^X&eu-T|y*U%zO~EFR(p2z}-r>pG9%2qd;6{E42LS6KP>1pe2K zEHO4r90P;HBtAEdA6mt7J|##ozNCf`ia1U;+}hxES75KVPBfmqXqt%#Z#I95`X$7r zd@hvv<#N>T8cd9G9~XK0LgBum37Xq;dP94zNr5yRG&N_OQp@GGo4CG%36+tkMqtwH#o#z~W`G`}#maC^IGk zhQhjBc|s)36c>jAUc<7`m}J>*c(G9K9kZ4boOYC=+SD@$biq|YGAo}RPFD-iMm&C%2-M;?*V;rt7=V$Ol;SFwBJ>NU7m80~0qMBtIBS!DsLP|dYfP|WG)<(95G)`Q|T{p)H?_+NU8 zH3+!-ioId;vO@Y5Nu$hs7O4st4PTgGkM^hG(!B}L_X<45jxva0T>@WeB;VDqzE9Fd zi#kRD7C2GtAFPR9>5PD~+(m|4Ml-Y7V%ZL>^DE{R){Q8fh1DU<(1BiT#@VY1)dZaV z>DzBn#f#tANWODFW}%ZSy?ta@ba+&mHceWfC*=#w8AEfn!9DM-rAG=+|?+MQ+XV~3ArTu&VAmACRX(Z&(?RS z(r*z$wQDztcF?Tp2^6eG^Z}_k$YeZJ@4{_)pZtLdL)xuvmK0t zm^MuC3HLueh%Ey}DZX=>>Qq1dbk}jFRA6dwb8^$^v3fQp(7R)#cRQRl6wZ-(=n(bn zT4aY&Ea#5t6TGGNoEdr}gmV{MTZny|!tXhIQORyJ!3(v?N7jui%-OiTYLCF{rJvaq zJGfgf@suW*T2FiN_D?&kOOqUBehLh!vlX?w`>~~`th~vUiIKnsW}`cRv@cFILoPCM z>K;2W#R`T@#OnRXSS!|`Ke!7d9kOe+=RLd+j95(7PcdCvc)?C1Gj;O=gXo-2Hs@`6 z%j1gZ5|Y%8WVBCT@F3i=aWP3piFf>27a~x8fXEjH=-)J3U1&4+1SCYwVRBjHjH23(A-?p9Nk^a|%}~X=#!gw5C(4 z+P%(99+>t>=N-O ztSfce`!=rDfdywvZYTP-amlJ~>d4dodzbA%MFHc~DLaa6|Kr3FJWIWGijMjyjag>u z3SD11yNvYji+L>TBo0)$v7g-1kct8pZ^FrPaiB|gpL|khbWd9Lm0a*i>XYMBPP0Y| z#!Yz(hbO*guD)=JTZeo{cmr$(JV_`JaWr4_Ni*Y z-m^cV;Zq;O7Tu?2?hs+7(bVd>FH?f+bVTpFFRG}tO%m`wcl z;rQp~TdI*$Xf$7x>vGf!5PEV~R11G<%3TgsF7BaVS|AUr&haaP`P|AzJ4(OhiZ*?J zbCclblI;8;U$*`P%@KA8s^xiRS3k@p02Qhdb#H?i+=oKhON;{NatL5nla;@6$c0G~ zXTyV#-{-G-4bLj2jOGvFsn?`6bxY-odd28Y%9H(-da;fO)l!7(sn{C}nm?dZ0eGSM zxdrncLP&@9eaJ-az2sZ(w-rh{QYLKz+o7XtGq-&5mg~_k7fcuoC2L#=UbVMXdF$SHm!#AGdJ%!WY0+3(9@WG)N{UP_ zSg^kxlJEnszm}!@Ml#Tf5eEf3qLh>0HO6$0~QkaJj@#=fBgsxY6UAgj6v&Tl3yAnuEp?ubEDN$>D{@TE66mym*i0JLs9T8gR zJOV{g*6sld{;+Q!8`$qkmedmd4h_d>HEJBjz(}sS<<(CLx-SB_lQ9;=K}9s_FdO1L z_0gif7QgM7=-iusF5Po)os>>=6$EBvC~6XNb1wlC#7qU^&${*{GIL+(&Jo$WsB7nF z{W*i~`wF2{@DEKP1$%y3M`h;I7C!lL{g35C_s1LcQEw`J>1{WN1;WHBE_V&-?=}1F z)!A7Z8RF8qn<~ZBL={CWYmc$rxW8p|&3Jn!l(iOFzLgIDU|X=6Xe}>YiPwp+2t%?KN2Q^kGgynLh7&doX|wBs;sQ`o z3jS`;N?~U$To9k405}`;8wYNlXKa^#d8TAvuf^5C`8j|X*YI{m{=T%CVRcCxO**)5 zqPS;H%Ew!gdH6=0Js!8k2X21&b4B~eP0Ep4D>}J)4OsWK?slh(ast*BLmm4XV9C~O z`T$V=GY-AiR0Xpr1Bc2dH~uixm{#BE`lR=E=R8j{I=P%i7S*hTiqqF4#bJ+pR`jMlUpsDV(e~_kA;VV@ zhR5sYjzzYW@Hfm2rqowepC*(iE~H&Av9mS}v z?kOs;n>1Y)|2IF~Q0SjmxTw{`r>m>=`R7UJ3?uvp#9K6D+rT&DYgQ#$9f0ZgP#S*Bel@vKW5Z7JZy^aEFFmJZ^o2i9%7rH*n|K z2SA6PgBu=dC2IO}lLzpW4*_GAPUF)uxS<_GbAhP!It)KfldkLvRphiXnvF=fK)f&# z($0Cd?TKm&X=5-%hXJo%I^h{CsP-7^z&w!v0 zpp|`jlTdfPF}QfOyUzJOM9VN!Gt#pO{0tfGZ)eSccCwIDrru})hjzc#{}@bF#Bo{~ z7oPeyx}-|~)0=#$&q0-Xf7z9b2Ixx|T&G))g10+MJ*BpfMHi-iFp5o}Cv2_a#5G)v zWpZ6&3i~=^vHs|lKz>n0O?j>6uBM-nkeeu5vDKOZEFgLDU6Wf|cG&{!gWu4LP4N_t zVGV8NfjCn$Z%?Je;da!r5O=yH|Jdk|-c+&l{}ZMNS@%A(D%?h6GN@%zIU@qHt#u|> zHmE?(IXracyZzTeoYbc2)`MLve?gzI2aRmJWAL9#)inEv*&fj_%GvqA>0Z0>Z%4K9 zN5d^k!?#l)H#U-P1-Na?f8yzz7e&rF@%*CMC&lbM_nrKD~vDmG0tyHs2dGON|TShlsRGJBbW7TD8P+1!ImR zI0w^;<5N)9qP$g#`P}qSgqr&6_R{^Gv}k{8j}+ec8(D^33ikQsK?lsC&N4Ig&1HC7 z;7^BqHS=2deQq4%_wF-;>IHl9&2ZR!J*Bz%g-p7##nge|dMAWFB6z~hgces!Q2y`D z7z{mY(7qpj&KjJWq#BQou9R9KQ#28L&PQ>I_Wc9E49@Y4hJyi>5GciVc0LEwS~%rs zw-$Y_PZ$;RSpFz+r)Wn*vW7;b8`Nce55_+Wd@((qouud&pV`*zp%f@mP=9#;01x4s zm&ZOfu<>5MKBwUe&#|_w#wE6nLnA{9j&kP(xCHbSV^;Vr_Qq+To=uxE3}9m?`qn~x z2i9c7meaa7JBly`M?;TZ2lJyX&LcS|V?0I&*;*;+Y<|dJ1ta*);=dBwc#~U}Z5vB~ z2ndV@RxR@P{VT2bxBEUX!|x2Xw*-xSYXqc;qCifm>3(gj90qbc-u$t7wpApbKZ6o``s6O@F=pHE2 z^^F|bTv@zVLQnN&zVRJ+rBW&?;-iU#pEQk`HMymIw#|VF1So&J4SG-PZ)2!k{@p(X z=eLbsANX2Y13Js{?f(FYm9IQzyo&q8ULS_`Jt_-h9Mc`l&Tv!P12yaa0I-&`We4rG z@PJPvqs{R{!|06Mh9^?BAFUX8%iBez)qY_wWc5!|`iF?-B|CxnMc6pPg`?9$ zZ%>LAY~UWL{hfcB@-x-Vl-6EMYjg2KQSqg|tEj9Nw%=u830X&F{{U!xdj2(m9IZUk z&3gAY5d|Z8C)WViM!p%+EG=H)Em}`MBg`kKCyL(Em3XFj8XvJ6hGO!mLg(9$@T*=JR#`l@P!FYJc%N8;>&4oShbM`n+L6y_ zTw#F8HFLu{gfO}Wl$ipkU`>93gOZjGGF?z9Cp)84UP7t&v6ML#<=!H&)9uMADvo@oi>qm^)Wwbe+a{OrCRu( zPJk;}4=zlPzP*Y3FQr_FhGi62>IK!kS5ucQ1!ZFAy30A4|;N9LBY%tU_+4)yb9 zCW7$EB#oHK!ztq{ODL+{DwLkBXmBUViHZC9{{RH<@Xv#O3jWKQE}^Jgp(9LLCv|Rw zrg^WKzihwTW5j+q@tx;@J``$C6_c=r=ISX9U)KazXa4{MvG~v6e~TZp#gD_^6Wkk{ z$B5d`43ZyAark1m{{W4Dve$<`418$1(|jvnlR+lSsN#0rus%UdpYzRp&N8fMMpACf z{d!fU9G;q^y|w6ho`>*yZF@@#b8zNMh6M**t=yLCYie(WgTi)g4dSehdHGFy^`FBZ z3q#;L4-n}3BgF){S>nmePmE%_pN2p1Pu*wYCx_N1vDq{epb z&Ii=j&%^7OoI-_1qw0AMSb9{Hq1S&?$Lc-s{8}+m%%7}1MttndUV?T-J?&c z>iU^1_VX_VARvwpZ0EgkejfOe{uZ~JNwdnBo{>wG!f zP$iP}&TbLM@w<-Zq?_AuvFZQy%uz-vJG7V|?YepJX`LexfRyfyl zMH#`7eJj?M9x|rtLOPxedPn_h=k=Eu0RSBU9cgJ0 P0Du9GJ5tx^$5H>;&>U14 literal 0 HcmV?d00001 diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/community/CONTRIBUTING.md b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/community/CONTRIBUTING.md new file mode 100644 index 0000000000..b5d9b809fc --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/community/CONTRIBUTING.md @@ -0,0 +1,73 @@ +# 参与贡献 OpenMMLab + +欢迎各种形式的贡献,包括但不限于以下内容。 + +- 修复(文本错误,bug) +- 新的功能和组件 + +## 工作流程 + +1. fork 并 pull 最新的 OpenMMLab 仓库 (mmclassification) +2. 签出到一个新分支(不要使用 master 分支提交 PR) +3. 进行修改并提交至 fork 出的自己的远程仓库 +4. 在我们的仓库中创建一个 PR + +注意:如果你计划添加一些新的功能,并引入大量改动,请尽量首先创建一个 issue 来进行讨论。 + +## 代码风格 + +### Python + +我们采用 [PEP8](https://www.python.org/dev/peps/pep-0008/) 作为统一的代码风格。 + +我们使用下列工具来进行代码风格检查与格式化: + +- [flake8](http://flake8.pycqa.org/en/latest/): 一个包含了多个代码风格检查工具的封装。 +- [yapf](https://github.com/google/yapf): 一个 Python 文件的格式化工具。 +- [isort](https://github.com/timothycrosley/isort): 一个对 import 进行排序的 Python 工具。 +- [markdownlint](https://github.com/markdownlint/markdownlint): 一个对 markdown 文件进行格式检查与提示的工具。 +- [docformatter](https://github.com/myint/docformatter): 一个 docstring 格式化工具。 + +yapf 和 isort 的格式设置位于 [setup.cfg](https://github.com/open-mmlab/mmclassification/blob/master/setup.cfg) + +我们使用 [pre-commit hook](https://pre-commit.com/) 来保证每次提交时自动进行代 +码检查和格式化,启用的功能包括 `flake8`, `yapf`, `isort`, `trailing +whitespaces`, `markdown files`, 修复 `end-of-files`, `double-quoted-strings`, +`python-encoding-pragma`, `mixed-line-ending`, 对 `requirments.txt`的排序等。 +pre-commit hook 的配置文件位于 [.pre-commit-config](https://github.com/open-mmlab/mmclassification/blob/master/.pre-commit-config.yaml) + +在你克隆仓库后,你需要按照如下步骤安装并初始化 pre-commit hook。 + +```shell +pip install -U pre-commit +``` + +在仓库文件夹中执行 + +```shell +pre-commit install +``` + +如果你在安装 markdownlint 的时候遇到问题,请尝试按照以下步骤安装 ruby + +```shell +# 安装 rvm +curl -L https://get.rvm.io | bash -s -- --autolibs=read-fail +[[ -s "$HOME/.rvm/scripts/rvm" ]] && source "$HOME/.rvm/scripts/rvm" +rvm autolibs disable + +# 安装 ruby +rvm install 2.7.1 +``` + +或者参照 [该仓库](https://github.com/innerlee/setup) 并按照指引执行 [`zzruby.sh`](https://github.com/innerlee/setup/blob/master/zzruby.sh) + +在此之后,每次提交,代码规范检查和格式化工具都将被强制执行。 + +```{important} +在创建 PR 之前,请确保你的代码完成了代码规范检查,并经过了 yapf 的格式化。 +``` + +### C++ 和 CUDA + +我们遵照 [Google C++ Style Guide](https://google.github.io/styleguide/cppguide.html) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/conf.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/conf.py new file mode 100644 index 0000000000..596a7f0dcc --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/conf.py @@ -0,0 +1,284 @@ +# Configuration file for the Sphinx documentation builder. +# +# This file only contains a selection of the most common options. For a full +# list see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html + +# -- Path setup -------------------------------------------------------------- + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# +import os +import subprocess +import sys + +import pytorch_sphinx_theme +from m2r import MdInclude +from recommonmark.transform import AutoStructify +from sphinx.builders.html import StandaloneHTMLBuilder + +sys.path.insert(0, os.path.abspath('..')) + +# -- Project information ----------------------------------------------------- + +project = 'MMClassification' +copyright = '2020, OpenMMLab' +author = 'MMClassification Authors' +version_file = '../mmcls/version.py' + + +def get_version(): + with open(version_file, 'r') as f: + exec(compile(f.read(), version_file, 'exec')) + return locals()['__version__'] + + +# The full version, including alpha/beta/rc tags +release = get_version() + +# -- General configuration --------------------------------------------------- + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + 'sphinx.ext.autodoc', + 'sphinx.ext.napoleon', + 'sphinx.ext.viewcode', + 'sphinx_markdown_tables', + 'myst_parser', + 'sphinx_copybutton', +] + +autodoc_mock_imports = ['matplotlib', 'mmcls.version', 'mmcv.ops'] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This pattern also affects html_static_path and html_extra_path. +exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] + +# -- Options for HTML output ------------------------------------------------- +source_suffix = { + '.rst': 'restructuredtext', + '.md': 'markdown', +} + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_theme = 'pytorch_sphinx_theme' +html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()] + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +# +html_theme_options = { + # 'logo_url': 'https://mmocr.readthedocs.io/en/latest/', + 'menu': [ + { + 'name': 'GitHub', + 'url': 'https://github.com/open-mmlab/mmclassification' + }, + { + 'name': + 'Colab 教程', + 'children': [ + { + 'name': + '用命令行工具训练和推理', + 'url': + 'https://colab.research.google.com/github/' + 'open-mmlab/mmclassification/blob/master/docs_zh-CN/' + 'tutorials/MMClassification_tools_cn.ipynb', + }, + { + 'name': + '用 Python API 训练和推理', + 'url': + 'https://colab.research.google.com/github/' + 'open-mmlab/mmclassification/blob/master/docs_zh-CN/' + 'tutorials/MMClassification_python_cn.ipynb', + }, + ] + }, + { + 'name': + '算法库', + 'children': [ + { + 'name': 'MMAction2', + 'url': 'https://github.com/open-mmlab/mmaction2', + }, + { + 'name': 'MMClassification', + 'url': 'https://github.com/open-mmlab/mmclassification', + }, + { + 'name': 'MMDetection', + 'url': 'https://github.com/open-mmlab/mmdetection', + }, + { + 'name': 'MMDetection3D', + 'url': 'https://github.com/open-mmlab/mmdetection3d', + }, + { + 'name': 'MMEditing', + 'url': 'https://github.com/open-mmlab/mmediting', + }, + { + 'name': 'MMGeneration', + 'url': 'https://github.com/open-mmlab/mmgeneration', + }, + { + 'name': 'MMOCR', + 'url': 'https://github.com/open-mmlab/mmocr', + }, + { + 'name': 'MMPose', + 'url': 'https://github.com/open-mmlab/mmpose', + }, + { + 'name': 'MMSegmentation', + 'url': 'https://github.com/open-mmlab/mmsegmentation', + }, + { + 'name': 'MMTracking', + 'url': 'https://github.com/open-mmlab/mmtracking', + }, + ] + }, + { + 'name': + 'OpenMMLab', + 'children': [ + { + 'name': '官网', + 'url': 'https://openmmlab.com/' + }, + { + 'name': 'GitHub', + 'url': 'https://github.com/open-mmlab/' + }, + { + 'name': '推特', + 'url': 'https://twitter.com/OpenMMLab' + }, + { + 'name': '知乎', + 'url': 'https://zhihu.com/people/openmmlab' + }, + ] + }, + ] +} + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] +html_css_files = ['css/readthedocs.css'] + +language = 'zh_CN' + +master_doc = 'index' + +# -- Options for HTMLHelp output --------------------------------------------- + +# Output file base name for HTML help builder. +htmlhelp_basename = 'mmclsdoc' + +# -- Options for LaTeX output ------------------------------------------------ + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # + # 'papersize': 'letterpaper', + + # The font size ('10pt', '11pt' or '12pt'). + # + # 'pointsize': '10pt', + + # Additional stuff for the LaTeX preamble. + # + # 'preamble': '', + + # Latex figure (float) alignment + # + # 'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + (master_doc, 'mmcls.tex', 'MMClassification Documentation', + 'MMClassification Contributors', 'manual'), +] + +# -- Options for manual page output ------------------------------------------ + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [(master_doc, 'mmcls', 'MMClassification Documentation', [author], + 1)] + +# -- Options for Texinfo output ---------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + (master_doc, 'mmcls', 'MMClassification Documentation', author, 'mmcls', + 'One line description of project.', 'Miscellaneous'), +] + +# -- Options for Epub output ------------------------------------------------- + +# Bibliographic Dublin Core info. +epub_title = project + +# The unique identifier of the text. This can be a ISBN number +# or the project homepage. +# +# epub_identifier = '' + +# A unique identification for the text. +# +# epub_uid = '' + +# A list of files that should not be packed into the epub file. +epub_exclude_files = ['search.html'] + +# set priority when building html +StandaloneHTMLBuilder.supported_image_types = [ + 'image/svg+xml', 'image/gif', 'image/png', 'image/jpeg' +] + +# -- Extension configuration ------------------------------------------------- +# Ignore >>> when copying code +copybutton_prompt_text = r'>>> |\.\.\. ' +copybutton_prompt_is_regexp = True + + +def builder_inited_handler(app): + subprocess.run(['./stat.py']) + + +def setup(app): + app.add_config_value('no_underscore_emphasis', False, 'env') + app.add_config_value('m2r_parse_relative_links', False, 'env') + app.add_config_value('m2r_anonymous_references', False, 'env') + app.add_config_value('m2r_disable_inline_math', False, 'env') + app.add_directive('mdinclude', MdInclude) + app.add_config_value('recommonmark_config', { + 'auto_toc_tree_section': 'Contents', + 'enable_eval_rst': True, + }, True) + app.add_transform(AutoStructify) + app.connect('builder-inited', builder_inited_handler) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/getting_started.md b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/getting_started.md new file mode 100644 index 0000000000..edc66a157b --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/getting_started.md @@ -0,0 +1,228 @@ +# 基础教程 + +本文档提供 MMClassification 相关用法的基本教程。 + +## 准备数据集 + +MMClassification 建议用户将数据集根目录链接到 `$MMCLASSIFICATION/data` 下。 +如果用户的文件夹结构与默认结构不同,则需要在配置文件中进行对应路径的修改。 + +``` +mmclassification +├── mmcls +├── tools +├── configs +├── docs +├── data +│ ├── imagenet +│ │ ├── meta +│ │ ├── train +│ │ ├── val +│ ├── cifar +│ │ ├── cifar-10-batches-py +│ ├── mnist +│ │ ├── train-images-idx3-ubyte +│ │ ├── train-labels-idx1-ubyte +│ │ ├── t10k-images-idx3-ubyte +│ │ ├── t10k-labels-idx1-ubyte + +``` + +对于 ImageNet,其存在多个版本,但最为常用的一个是 [ILSVRC 2012](http://www.image-net.org/challenges/LSVRC/2012/),可以通过以下步骤获取该数据集。 + +1. 注册账号并登录 [下载页面](http://www.image-net.org/download-images) +2. 获取 ILSVRC2012 下载链接并下载以下文件 + - ILSVRC2012_img_train.tar (~138GB) + - ILSVRC2012_img_val.tar (~6.3GB) +3. 解压下载的文件 +4. 使用 [该脚本](https://github.com/BVLC/caffe/blob/master/data/ilsvrc12/get_ilsvrc_aux.sh) 获取元数据 + +对于 MNIST,CIFAR10 和 CIFAR100,程序将会在需要的时候自动下载数据集。 + +对于用户自定义数据集的准备,请参阅 [教程 2:如何增加新数据集](tutorials/new_dataset.md) + +## 使用预训练模型进行推理 + +MMClassification 提供了一些脚本用于进行单张图像的推理、数据集的推理和数据集的测试(如 ImageNet 等) + +### 单张图像的推理 + +```shell +python demo/image_demo.py ${IMAGE_FILE} ${CONFIG_FILE} ${CHECKPOINT_FILE} + +# Example +python demo/image_demo.py demo/demo.JPEG configs/resnet/resnet50_b32x8_imagenet.py \ + https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb32_in1k_20210831-ea4938fc.pth +``` + +### 数据集的推理与测试 + +- 支持单 GPU +- 支持单节点多 GPU +- 支持多节点 + +用户可使用以下命令进行数据集的推理: + +```shell +# 单 GPU +python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [--metrics ${METRICS}] [--out ${RESULT_FILE}] + +# 多 GPU +./tools/dist_test.sh ${CONFIG_FILE} ${CHECKPOINT_FILE} ${GPU_NUM} [--metrics ${METRICS}] [--out ${RESULT_FILE}] + +# 基于 slurm 分布式环境的多节点 +python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [--metrics ${METRICS}] [--out ${RESULT_FILE}] --launcher slurm +``` + +可选参数: + +- `RESULT_FILE`:输出结果的文件名。如果未指定,结果将不会保存到文件中。支持 json, yaml, pickle 格式。 +- `METRICS`:数据集测试指标,如准确率 (accuracy), 精确率 (precision), 召回率 (recall) 等 + +例子: + +假定用户将下载的模型权重文件放置在 `checkpoints/` 目录下。 + +在 ImageNet 验证集上,使用 ResNet-50 进行推理并获得预测标签及其对应的预测得分。 + +```shell +python tools/test.py configs/resnet/resnet50_b16x8_cifar10.py \ + https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_b16x8_cifar10_20210528-f54bfad9.pth \ + --out result.pkl +``` + +## 模型训练 + +MMClassification 使用 `MMDistributedDataParallel` 进行分布式训练,使用 `MMDataParallel` 进行非分布式训练。 + +所有的输出(日志文件和模型权重文件)会被将保存到工作目录下。工作目录通过配置文件中的参数 `work_dir` 指定。 + +默认情况下,MMClassification 在每个周期后会在验证集上评估模型,可以通过在训练配置中修改 `interval` 参数来更改评估间隔 + +```python +evaluation = dict(interval=12) # 每进行 12 轮训练后评估一次模型 +``` + +### 使用单个 GPU 进行训练 + +```shell +python tools/train.py ${CONFIG_FILE} [optional arguments] +``` + +如果用户想在命令中指定工作目录,则需要增加参数 `--work-dir ${YOUR_WORK_DIR}` + +### 使用多个 GPU 进行训练 + +```shell +./tools/dist_train.sh ${CONFIG_FILE} ${GPU_NUM} [optional arguments] +``` + +可选参数为: + +- `--no-validate` (**不建议**): 默认情况下,程序将会在训练期间的每 k (默认为 1) 个周期进行一次验证。要禁用这一功能,使用 `--no-validate` +- `--work-dir ${WORK_DIR}`:覆盖配置文件中指定的工作目录。 +- `--resume-from ${CHECKPOINT_FILE}`:从以前的模型权重文件恢复训练。 + +`resume-from` 和 `load-from` 的不同点: +`resume-from` 加载模型参数和优化器状态,并且保留检查点所在的周期数,常被用于恢复意外被中断的训练。 +`load-from` 只加载模型参数,但周期数从 0 开始计数,常被用于微调模型。 + +### 使用多台机器进行训练 + +如果用户在 [slurm](https://slurm.schedmd.com/) 集群上运行 MMClassification,可使用 `slurm_train.sh` 脚本。(该脚本也支持单台机器上进行训练) + +```shell +[GPUS=${GPUS}] ./tools/slurm_train.sh ${PARTITION} ${JOB_NAME} ${CONFIG_FILE} ${WORK_DIR} +``` + +用户可以在 [slurm_train.sh](https://github.com/open-mmlab/mmclassification/blob/master/tools/slurm_train.sh) 中检查所有的参数和环境变量 + +如果用户的多台机器通过 Ethernet 连接,则可以参考 pytorch [launch utility](https://pytorch.org/docs/stable/distributed.html#launch-utility)。如果用户没有高速网络,如 InfiniBand,速度将会非常慢。 + +### 使用单台机器启动多个任务 + +如果用使用单台机器启动多个任务,如在有 8 块 GPU 的单台机器上启动 2 个需要 4 块 GPU 的训练任务,则需要为每个任务指定不同端口,以避免通信冲突。 + +如果用户使用 `dist_train.sh` 脚本启动训练任务,则可以通过以下命令指定端口 + +```shell +CUDA_VISIBLE_DEVICES=0,1,2,3 PORT=29500 ./tools/dist_train.sh ${CONFIG_FILE} 4 +CUDA_VISIBLE_DEVICES=4,5,6,7 PORT=29501 ./tools/dist_train.sh ${CONFIG_FILE} 4 +``` + +如果用户在 slurm 集群下启动多个训练任务,则需要修改配置文件(通常是配置文件的倒数第 6 行)中的 `dist_params` 变量,以设置不同的通信端口。 + +在 `config1.py` 中, + +```python +dist_params = dict(backend='nccl', port=29500) +``` + +在 `config2.py` 中, + +```python +dist_params = dict(backend='nccl', port=29501) +``` + +之后便可启动两个任务,分别对应 `config1.py` 和 `config2.py`。 + +```shell +CUDA_VISIBLE_DEVICES=0,1,2,3 GPUS=4 ./tools/slurm_train.sh ${PARTITION} ${JOB_NAME} config1.py ${WORK_DIR} +CUDA_VISIBLE_DEVICES=4,5,6,7 GPUS=4 ./tools/slurm_train.sh ${PARTITION} ${JOB_NAME} config2.py ${WORK_DIR} +``` + +## 实用工具 + +我们在 `tools/` 目录下提供的一些对训练和测试十分有用的工具 + +### 计算 FLOPs 和参数量(试验性的) + +我们根据 [flops-counter.pytorch](https://github.com/sovrasov/flops-counter.pytorch) 提供了一个脚本用于计算给定模型的 FLOPs 和参数量 + +```shell +python tools/get_flops.py ${CONFIG_FILE} [--shape ${INPUT_SHAPE}] +``` + +用户将获得如下结果: + +``` +============================== +Input shape: (3, 224, 224) +Flops: 4.12 GFLOPs +Params: 25.56 M +============================== +``` + +```{warning} +此工具仍处于试验阶段,我们不保证该数字正确无误。您最好将结果用于简单比较,但在技术报告或论文中采用该结果之前,请仔细检查。 +- FLOPs 与输入的尺寸有关,而参数量与输入尺寸无关。默认输入尺寸为 (1, 3, 224, 224) +- 一些运算不会被计入 FLOPs 的统计中,例如 GN 和自定义运算。详细信息请参考 [`mmcv.cnn.get_model_complexity_info()`](https://github.com/open-mmlab/mmcv/blob/master/mmcv/cnn/utils/flops_counter.py) +``` + +### 模型发布 + +在发布模型之前,你也许会需要 +1. 转换模型权重至 CPU 张量 +2. 删除优化器状态 +3. 计算模型权重文件的哈希值,并添加至文件名之后 + +```shell +python tools/convert_models/publish_model.py ${INPUT_FILENAME} ${OUTPUT_FILENAME} +``` + +例如: + +```shell +python tools/convert_models/publish_model.py work_dirs/resnet50/latest.pth imagenet_resnet50.pth +``` + +最终输出的文件名将会是 `imagenet_resnet50_{date}-{hash id}.pth` + +## 详细教程 + +目前,MMClassification 提供以下几种更详细的教程: + +- [如何微调模型](tutorials/finetune.md) +- [如何增加新数据集](tutorials/new_dataset.md) +- [如何设计数据处理流程](tutorials/data_pipeline.md) +- [如何增加新模块](tutorials/new_modules.md) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/install.md b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/install.md new file mode 100644 index 0000000000..ea0c4b8dbb --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/install.md @@ -0,0 +1,134 @@ +# 安装 + +## 安装依赖包 + +- Python 3.6+ +- PyTorch 1.5+ +- [MMCV](https://github.com/open-mmlab/mmcv) + +MMClassification 和 MMCV 的适配关系如下,请安装正确版本的 MMCV 以避免安装问题 + +| MMClassification 版本 | MMCV 版本 | +|:---------------------:|:---------------------:| +| master | mmcv>=1.3.16, <=1.5.0 | +| 0.17.0 | mmcv>=1.3.8, <=1.5.0 | +| 0.16.0 | mmcv>=1.3.8, <=1.5.0 | +| 0.15.0 | mmcv>=1.3.8, <=1.5.0 | +| 0.14.0 | mmcv>=1.3.8, <=1.5.0 | +| 0.13.0 | mmcv>=1.3.8, <=1.5.0 | +| 0.12.0 | mmcv>=1.3.1, <=1.5.0 | +| 0.11.1 | mmcv>=1.3.1, <=1.5.0 | +| 0.11.0 | mmcv>=1.3.0 | +| 0.10.0 | mmcv>=1.3.0 | +| 0.9.0 | mmcv>=1.1.4 | +| 0.8.0 | mmcv>=1.1.4 | +| 0.7.0 | mmcv>=1.1.4 | +| 0.6.0 | mmcv>=1.1.4 | + +```{note} +由于 `master` 分支处于频繁开发中,`mmcv` 版本依赖可能不准确。如果您在使用 +`master` 分支时遇到问题,请尝试更新 `mmcv` 到最新版。 +``` + +## 安装 MMClassification 步骤 + +a. 创建 conda 虚拟环境,并激活 + +```shell +conda create -n open-mmlab python=3.8 -y +conda activate open-mmlab +``` + +b. 按照 [官方指南](https://pytorch.org/) 安装 PyTorch 和 TorchVision,如: + +```shell +conda install pytorch torchvision -c pytorch +``` + +```{note} +请确保 CUDA 编译版本和运行版本相匹配。 +可以参照 [PyTorch 官网](https://pytorch.org/) 对预编译包所支持的 CUDA 版本进行核对。 +``` + +`例 1`:如果用户的 `/usr/local/cuda` 文件夹下已安装 CUDA 10.1 版本,并且想要安装 PyTorch 1.5.1 版本, +则需要安装 CUDA 10.1 下预编译的 PyTorch。 + +```shell +conda install pytorch==1.5.1 torchvision==0.6.1 cudatoolkit=10.1 -c pytorch +``` + +`例 2`:如果用户的 `/usr/local/cuda` 文件夹下已安装 CUDA 11.3 版本,并且想要安装 PyTorch 1.10.0 版本, +则需要安装 CUDA 11.3 下预编译的 PyTorch。 + +```shell +conda install pytorch==1.10.0 torchvision==0.11.1 cudatoolkit=11.3 -c pytorch +``` + +如果 PyTorch 是由源码进行编译安装(而非直接下载预编译好的安装包),则可以使用更多的 CUDA 版本(如 9.0 版本)。 + +c. 安装 MMClassification 库 + +### 稳定版本 + +我们推荐使用 [MIM](https://github.com/open-mmlab/mim) 进行 MMClassification 的安装。 + +```shell +pip install git+https://github.com/open-mmlab/mim.git +mim install mmcls +``` + +MIM 工具可以自动安装 OpenMMLab 旗下的各个项目及其依赖,同时可以协助进行训练、调参和预训练模型下载等。 + +或者,可以直接通过 pip 进行 MMClassification 的安装: + +```shell +pip install mmcls +``` + +### 开发版本 + +首先,克隆最新的 MMClassification 仓库: + +```shell +git clone https://github.com/open-mmlab/mmclassification.git +cd mmclassification +``` + +之后,安装依赖包和 MMClassification: + +```shell +pip install -e . # 或者 "python setup.py develop" +``` + +```{note} +按照以上步骤,MMClassification 是以 `dev` 模式安装的,任何本地的代码修改都可以直接生效,无需重新安装(除非提交了一些 commit,并且希望提升版本号) +``` + +### 利用 Docker 镜像安装 MMClassification + +MMClassification 提供 [Dockerfile](https://github.com/open-mmlab/mmclassification/blob/master/docker/Dockerfile) ,可以通过以下命令创建 docker 镜像。 + +```shell +# 创建基于 PyTorch 1.6.0, CUDA 10.1, CUDNN 7 的镜像。 +docker build -f ./docker/Dockerfile --rm -t mmcls:torch1.6.0-cuda10.1-cudnn7 . +``` + +```{important} +请确保已经安装了 [nvidia-container-toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#docker). +``` + +运行一个基于上述镜像的容器: + +```shell +docker run --gpus all --shm-size=8g -it -v {DATA_DIR}:/workspace/mmclassification/data mmcls:torch1.6.0-cuda10.1-cudnn7 /bin/bash +``` + +## 在多个 MMClassification 版本下进行开发 + +MMClassification 的训练和测试脚本已经修改了 `PYTHONPATH` 变量,以确保其能够运行当前目录下的 MMClassification。 + +如果想要运行环境下默认的 MMClassification,用户需要在训练和测试脚本中去除这一行: + +```shell +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH +``` diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/model_zoo.md b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/model_zoo.md new file mode 100644 index 0000000000..5055397660 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/model_zoo.md @@ -0,0 +1 @@ +../docs/model_zoo.md \ No newline at end of file diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/stat.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/stat.py new file mode 100644 index 0000000000..8968cbed73 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/stat.py @@ -0,0 +1,107 @@ +#!/usr/bin/env python +import functools as func +import glob +import os +import re +from pathlib import Path + +import numpy as np + +MMCLS_ROOT = Path(__file__).absolute().parents[1] +url_prefix = 'https://github.com/open-mmlab/mmclassification/blob/master/' + +papers_root = Path('papers') +papers_root.mkdir(exist_ok=True) +files = [Path(f) for f in sorted(glob.glob('../configs/*/README.md'))] + +stats = [] +titles = [] +num_ckpts = 0 +num_configs = 0 + +for f in files: + with open(f, 'r') as content_file: + content = content_file.read() + + # Extract checkpoints + ckpts = set(x.lower().strip() + for x in re.findall(r'\[model\]\((https?.*)\)', content)) + if len(ckpts) == 0: + continue + num_ckpts += len(ckpts) + + # Extract paper title + title = content.split('\n')[0].replace('# ', '').strip() + titles.append(title) + + # Extract paper abbreviation + abbr = [x for x in re.findall(r'', content)] + abbr = abbr[0] if len(abbr) > 0 else title + + # Extract paper type + _papertype = [x for x in re.findall(r'\[([A-Z]+)\]', content)] + assert len(_papertype) > 0 + papertype = _papertype[0] + paper = set([(papertype, title)]) + + # Write a copy of README + copy = papers_root / (f.parent.name + '.md') + if copy.exists(): + os.remove(copy) + + def replace_link(matchobj): + # Replace relative link to GitHub link. + name = matchobj.group(1) + link = matchobj.group(2) + if not link.startswith('http') and (f.parent / link).exists(): + rel_link = (f.parent / link).absolute().relative_to(MMCLS_ROOT) + link = url_prefix + str(rel_link) + return f'[{name}]({link})' + + content = re.sub(r'\[([^\]]+)\]\(([^)]+)\)', replace_link, content) + + with open(copy, 'w') as copy_file: + copy_file.write(content) + + statsmsg = f""" +\t* [{papertype}] [{title}]({copy}) ({len(ckpts)} ckpts) +""" + stats.append( + dict( + paper=paper, ckpts=ckpts, statsmsg=statsmsg, abbr=abbr, copy=copy)) + +allpapers = func.reduce(lambda a, b: a.union(b), + [stat['paper'] for stat in stats]) +msglist = '\n'.join(stat['statsmsg'] for stat in stats) + +papertypes, papercounts = np.unique([t for t, _ in allpapers], + return_counts=True) +countstr = '\n'.join( + [f' - {t}: {c}' for t, c in zip(papertypes, papercounts)]) + +modelzoo = f""" +# 模型库统计 + +* 论文数量: {len(set(titles))} +{countstr} + +* 模型权重文件数量: {num_ckpts} +{msglist} +""" + +with open('modelzoo_statistics.md', 'w') as f: + f.write(modelzoo) + +toctree = """ +.. toctree:: + :maxdepth: 1 + :caption: 模型库 + :glob: + + modelzoo_statistics.md + model_zoo.md +""" +with open('_model_zoo.rst', 'w') as f: + f.write(toctree) + for stat in stats: + f.write(f' {stat["abbr"]} <{stat["copy"]}>\n') diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/tools/model_serving.md b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/tools/model_serving.md new file mode 100644 index 0000000000..de760f670f --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/tools/model_serving.md @@ -0,0 +1,87 @@ +# 模型部署至 TorchServe + +为了使用 [`TorchServe`](https://pytorch.org/serve/) 部署一个 `MMClassification` 模型,需要进行以下几步: + +## 1. 转换 MMClassification 模型至 TorchServe + +```shell +python tools/deployment/mmcls2torchserve.py ${CONFIG_FILE} ${CHECKPOINT_FILE} \ +--output-folder ${MODEL_STORE} \ +--model-name ${MODEL_NAME} +``` + +```{note} +${MODEL_STORE} 需要是一个文件夹的绝对路径。 +``` + +示例: + +```shell +python tools/deployment/mmcls2torchserve.py \ + configs/resnet/resnet18_b32x8_imagenet.py \ + checkpoints/resnet18_8xb32_in1k_20210831-fbbb1da6.pth \ + --output-folder ./checkpoints \ + --model-name resnet18_in1k +``` + +## 2. 构建 `mmcls-serve` docker 镜像 + +```shell +docker build -t mmcls-serve:latest docker/serve/ +``` + +## 3. 运行 `mmcls-serve` 镜像 + +请参考官方文档 [基于 docker 运行 TorchServe](https://github.com/pytorch/serve/blob/master/docker/README.md#running-torchserve-in-a-production-docker-environment). + +为了使镜像能够使用 GPU 资源,需要安装 [nvidia-docker](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html)。之后可以传递 `--gpus` 参数以在 GPU 上运。 + +示例: + +```shell +docker run --rm \ +--cpus 8 \ +--gpus device=0 \ +-p8080:8080 -p8081:8081 -p8082:8082 \ +--mount type=bind,source=`realpath ./checkpoints`,target=/home/model-server/model-store \ +mmcls-serve:latest +``` + +```{note} +`realpath ./checkpoints` 是 "./checkpoints" 的绝对路径,你可以将其替换为你保存 TorchServe 模型的目录的绝对路径。 +``` + +参考 [该文档](https://github.com/pytorch/serve/blob/master/docs/rest_api.md) 了解关于推理 (8080),管理 (8081) 和指标 (8082) 等 API 的信息。 + +## 4. 测试部署 + +```shell +curl http://127.0.0.1:8080/predictions/${MODEL_NAME} -T demo/demo.JPEG +``` + +您应该获得类似于以下内容的响应: + +```json +{ + "pred_label": 58, + "pred_score": 0.38102269172668457, + "pred_class": "water snake" +} +``` + +另外,你也可以使用 `test_torchserver.py` 来比较 TorchServe 和 PyTorch 的结果,并进行可视化。 + +```shell +python tools/deployment/test_torchserver.py ${IMAGE_FILE} ${CONFIG_FILE} ${CHECKPOINT_FILE} ${MODEL_NAME} +[--inference-addr ${INFERENCE_ADDR}] [--device ${DEVICE}] +``` + +示例: + +```shell +python tools/deployment/test_torchserver.py \ + demo/demo.JPEG \ + configs/resnet/resnet18_b32x8_imagenet.py \ + checkpoints/resnet18_8xb32_in1k_20210831-fbbb1da6.pth \ + resnet18_in1k +``` diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/tools/onnx2tensorrt.md b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/tools/onnx2tensorrt.md new file mode 100644 index 0000000000..a92f74274b --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/tools/onnx2tensorrt.md @@ -0,0 +1,76 @@ +# ONNX 转 TensorRT(试验性的) + + + +- [ONNX 转 TensorRT(试验性的)](#onnx-tensorrt) + - [如何将模型从 ONNX 转换到 TensorRT](#id1) + - [准备工作](#id2) + - [使用方法](#id3) + - [支持转换至 TensorRT 的模型列表](#tensorrt) + - [提示](#id4) + - [常见问题](#id5) + + + +## 如何将模型从 ONNX 转换到 TensorRT + +### 准备工作 + +1. 请参照 [安装指南](https://mmclassification.readthedocs.io/zh_CN/latest/install.html#mmclassification) 从源码安装 MMClassification。 +2. 使用我们的工具 [pytorch2onnx.md](./pytorch2onnx.md) 将 PyTorch 模型转换至 ONNX。 + +### 使用方法 + +```bash +python tools/deployment/onnx2tensorrt.py \ + ${MODEL} \ + --trt-file ${TRT_FILE} \ + --shape ${IMAGE_SHAPE} \ + --workspace-size {WORKSPACE_SIZE} \ + --show \ + --verify \ +``` + +所有参数的说明: + +- `model` : ONNX 模型的路径。 +- `--trt-file`: TensorRT 引擎文件的输出路径。如果没有指定,默认为当前脚本执行路径下的 `tmp.trt`。 +- `--shape`: 模型输入的高度和宽度。如果没有指定,默认为 `224 224`。 +- `--workspace-size` : 构建 TensorRT 引擎所需要的 GPU 空间大小,单位为 GiB。如果没有指定,默认为 `1` GiB。 +- `--show`: 是否展示模型的输出。如果没有指定,默认为 `False`。 +- `--verify`: 是否使用 ONNXRuntime 和 TensorRT 验证模型转换的正确性。如果没有指定,默认为`False`。 + +示例: + +```bash +python tools/deployment/onnx2tensorrt.py \ + checkpoints/resnet/resnet18_b16x8_cifar10.onnx \ + --trt-file checkpoints/resnet/resnet18_b16x8_cifar10.trt \ + --shape 224 224 \ + --show \ + --verify \ +``` + +## 支持转换至 TensorRT 的模型列表 + +下表列出了保证可转换为 TensorRT 的模型。 + +| 模型 | 配置文件 | 状态 | +| :----------: | :--------------------------------------------------------------------------: | :----: | +| MobileNetV2 | `configs/mobilenet_v2/mobilenet_v2_b32x8_imagenet.py` | Y | +| ResNet | `configs/resnet/resnet18_b16x8_cifar10.py` | Y | +| ResNeXt | `configs/resnext/resnext50_32x4d_b32x8_imagenet.py` | Y | +| ShuffleNetV1 | `configs/shufflenet_v1/shufflenet_v1_1x_b64x16_linearlr_bn_nowd_imagenet.py` | Y | +| ShuffleNetV2 | `configs/shufflenet_v2/shufflenet_v2_1x_b64x16_linearlr_bn_nowd_imagenet.py` | Y | + +注: + +- *以上所有模型转换测试基于 Pytorch==1.6.0 和 TensorRT-7.2.1.6.Ubuntu-16.04.x86_64-gnu.cuda-10.2.cudnn8.0 进行* + +## 提示 + +- 如果你在上述模型的转换中遇到问题,请在 GitHub 中创建一个 issue,我们会尽快处理。未在上表中列出的模型,由于资源限制,我们可能无法提供很多帮助,如果遇到问题,请尝试自行解决。 + +## 常见问题 + +- 无 diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/tools/pytorch2onnx.md b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/tools/pytorch2onnx.md new file mode 100644 index 0000000000..5cc95a74c2 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/tools/pytorch2onnx.md @@ -0,0 +1,89 @@ +# Pytorch 转 ONNX (试验性的) + + + +- [Pytorch 转 ONNX (试验性的)](#pytorch-onnx) + - [如何将模型从 PyTorch 转换到 ONNX](#id1) + - [准备工作](#id2) + - [使用方法](#id3) + - [支持导出至 ONNX 的模型列表](#onnx) + - [提示](#id4) + - [常见问题](#id5) + + + +## 如何将模型从 PyTorch 转换到 ONNX + +### 准备工作 + +1. 请参照 [安装指南](https://mmclassification.readthedocs.io/zh_CN/latest/install.html#mmclassification) 从源码安装 MMClassification。 +2. 安装 onnx 和 onnxruntime。 + + ```shell + pip install onnx onnxruntime==1.5.1 + ``` + +### 使用方法 + +```bash +python tools/deployment/pytorch2onnx.py \ + ${CONFIG_FILE} \ + --checkpoint ${CHECKPOINT_FILE} \ + --output-file ${OUTPUT_FILE} \ + --shape ${IMAGE_SHAPE} \ + --opset-version ${OPSET_VERSION} \ + --dynamic-shape \ + --show \ + --simplify \ + --verify \ +``` + +所有参数的说明: + +- `config` : 模型配置文件的路径。 +- `--checkpoint` : 模型权重文件的路径。 +- `--output-file`: ONNX 模型的输出路径。如果没有指定,默认为当前脚本执行路径下的 `tmp.onnx`。 +- `--shape`: 模型输入的高度和宽度。如果没有指定,默认为 `224 224`。 +- `--opset-version` : ONNX 的 opset 版本。如果没有指定,默认为 `11`。 +- `--dynamic-shape` : 是否以动态输入尺寸导出 ONNX。 如果没有指定,默认为 `False`。 +- `--show`: 是否打印导出模型的架构。如果没有指定,默认为 `False`。 +- `--simplify`: 是否精简导出的 ONNX 模型。如果没有指定,默认为 `False`。 +- `--verify`: 是否验证导出模型的正确性。如果没有指定,默认为`False`。 + +示例: + +```bash +python tools/deployment/pytorch2onnx.py \ + configs/resnet/resnet18_b16x8_cifar10.py \ + --checkpoint checkpoints/resnet/resnet18_b16x8_cifar10.pth \ + --output-file checkpoints/resnet/resnet18_b16x8_cifar10.onnx \ + --dynamic-shape \ + --show \ + --simplify \ + --verify \ +``` + +## 支持导出至 ONNX 的模型列表 + +下表列出了保证可导出至 ONNX,并在 ONNX Runtime 中运行的模型。 + +| 模型 | 配置文件 | 批推理 | 动态输入尺寸 | 备注 | +| :----------: | :--------------------------------------------------------------------------: | :-------------: | :-----------: | ---- | +| MobileNetV2 | `configs/mobilenet_v2/mobilenet_v2_b32x8_imagenet.py` | Y | Y | | +| ResNet | `configs/resnet/resnet18_b16x8_cifar10.py` | Y | Y | | +| ResNeXt | `configs/resnext/resnext50_32x4d_b32x8_imagenet.py` | Y | Y | | +| SE-ResNet | `configs/seresnet/seresnet50_b32x8_imagenet.py` | Y | Y | | +| ShuffleNetV1 | `configs/shufflenet_v1/shufflenet_v1_1x_b64x16_linearlr_bn_nowd_imagenet.py` | Y | Y | | +| ShuffleNetV2 | `configs/shufflenet_v2/shufflenet_v2_1x_b64x16_linearlr_bn_nowd_imagenet.py` | Y | Y | | + +注: + +- *以上所有模型转换测试基于 Pytorch==1.6.0 进行* + +## 提示 + +- 如果你在上述模型的转换中遇到问题,请在 GitHub 中创建一个 issue,我们会尽快处理。未在上表中列出的模型,由于资源限制,我们可能无法提供很多帮助,如果遇到问题,请尝试自行解决。 + +## 常见问题 + +- 无 diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/tools/pytorch2torchscript.md b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/tools/pytorch2torchscript.md new file mode 100644 index 0000000000..8ce68e04f3 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/tools/pytorch2torchscript.md @@ -0,0 +1,55 @@ +# Pytorch 转 TorchScript (试验性的) + + + +- [Pytorch 转 TorchScript (试验性的)](#pytorch-torchscript) + - [如何将 PyTorch 模型转换至 TorchScript](#id1) + - [使用方法](#id2) + - [提示](#id3) + - [常见问题](#id4) + + + +## 如何将 PyTorch 模型转换至 TorchScript + +### 使用方法 + +```bash +python tools/deployment/pytorch2torchscript.py \ + ${CONFIG_FILE} \ + --checkpoint ${CHECKPOINT_FILE} \ + --output-file ${OUTPUT_FILE} \ + --shape ${IMAGE_SHAPE} \ + --verify \ +``` + +所有参数的说明: + +- `config` : 模型配置文件的路径。 +- `--checkpoint` : 模型权重文件的路径。 +- `--output-file`: TorchScript 模型的输出路径。如果没有指定,默认为当前脚本执行路径下的 `tmp.pt`。 +- `--shape`: 模型输入的高度和宽度。如果没有指定,默认为 `224 224`。 +- `--verify`: 是否验证导出模型的正确性。如果没有指定,默认为`False`。 + +示例: + +```bash +python tools/deployment/pytorch2onnx.py \ + configs/resnet/resnet18_b16x8_cifar10.py \ + --checkpoint checkpoints/resnet/resnet18_b16x8_cifar10.pth \ + --output-file checkpoints/resnet/resnet18_b16x8_cifar10.pt \ + --verify \ +``` + +注: + +- *所有模型基于 Pytorch==1.8.1 通过了转换测试* + +## 提示 + +- 由于 `torch.jit.is_tracing()` 只在 PyTorch 1.6 之后的版本中得到支持,对于 PyTorch 1.3-1.5 的用户,我们建议手动提前返回结果。 +- 如果你在本仓库的模型转换中遇到问题,请在 GitHub 中创建一个 issue,我们会尽快处理。 + +## 常见问题 + +- 无 diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/tools/visualization.md b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/tools/visualization.md new file mode 100644 index 0000000000..2825f3db0a --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/tools/visualization.md @@ -0,0 +1,82 @@ +# 可视化 + + + +- [可视化](#可视化) + - [数据流水线可视化](#数据流水线可视化) + - [使用方法](#使用方法) + - [常见问题](#常见问题) + + + +## 数据流水线可视化 + +### 使用方法 + +```bash +python tools/visualizations/vis_pipeline.py \ + ${CONFIG_FILE} \ + --output-dir ${OUTPUT_DIR} \ + --phase ${DATASET_PHASE} \ + --number ${BUNBER_IMAGES_DISPLAY} \ + --skip-type ${SKIP_TRANSFORM_TYPE} \ + --mode ${DISPLAY_MODE} \ + --show \ + --adaptive \ + --min-edge-length ${MIN_EDGE_LENGTH} \ + --max-edge-length ${MAX_EDGE_LENGTH} \ + --bgr2rgb \ + --window-size ${WINDOW_SIZE} +``` + +**所有参数的说明**: + +- `config` : 模型配置文件的路径。 +- `--output-dir`: 保存图片文件夹,如果没有指定,默认为 `''`,表示不保存。 +- `--phase`: 可视化数据集的阶段,只能为 `[train, val, test]` 之一,默认为 `train`。 +- `--number`: 可视化样本数量。如果没有指定,默认展示数据集的所有图片。 +- `--skip-type`: 预设跳过的数据流水线过程。如果没有指定,默认为 `['ToTensor', 'Normalize', 'ImageToTensor', 'Collect']`。 +- `--mode`: 可视化的模式,只能为 `[original, pipeline, concat]` 之一,如果没有指定,默认为 `concat`。 +- `--show`: 将可视化图片以弹窗形式展示。 +- `--adaptive`: 自动调节可视化图片的大小。 +- `--min-edge-length`: 最短边长度,当使用了 `--adaptive` 时有效。 当图片任意边小于 `${MIN_EDGE_LENGTH}` 时,会保持长宽比不变放大图片,短边对齐至 `${MIN_EDGE_LENGTH}`,默认为200。 +- `--max-edge-length`: 最长边长度,当使用了 `--adaptive` 时有效。 当图片任意边大于 `${MAX_EDGE_LENGTH}` 时,会保持长宽比不变缩小图片,短边对齐至 `${MAX_EDGE_LENGTH}`,默认为1000。 +- `--bgr2rgb`: 将图片的颜色通道翻转。 +- `--window-size`: 可视化窗口大小,如果没有指定,默认为 `12*7`。如果需要指定,按照格式 `'W*H'`。 + +```{note} + +1. 如果不指定 `--mode`,默认设置为 `concat`,获取原始图片和预处理后图片拼接的图片;如果 `--mode` 设置为 `original`,则获取原始图片; 如果 `--mode` 设置为 `pipeline`,则获取预处理后的图片。 + +2. 当指定了 `--adaptive` 选项时,会自动的调整尺寸过大和过小的图片,你可以通过设定 `--min-edge-length` 与 `--max-edge-length` 来指定自动调整的图片尺寸。 +``` + +**示例**: + +1. 可视化 `ImageNet` 训练集的所有经过预处理的图片,并以弹窗形式显示: + +```shell +python ./tools/visualizations/vis_pipeline.py ./configs/resnet/resnet50_b32x8_imagenet.py --show --mode pipeline +``` + +
+ +2. 可视化 `ImageNet` 训练集的10张原始图片与预处理后图片对比图,保存在 `./tmp` 文件夹下: + +```shell +python ./tools/visualizations/vis_pipeline.py configs/swin_transformer/swin_base_224_b16x64_300e_imagenet.py --phase train --output-dir tmp --number 10 --adaptive +``` + +
+ +3. 可视化 `CIFAR100` 验证集中的100张原始图片,显示并保存在 `./tmp` 文件夹下: + +```shell +python ./tools/visualizations/vis_pipeline.py configs/resnet/resnet50_b16x8_cifar100.py --phase val --output-dir tmp --mode original --number 100 --show --adaptive --bgr2rgb +``` + +
+ +## 常见问题 + +- 无 diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/tutorials/config.md b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/tutorials/config.md new file mode 100644 index 0000000000..9804c191bd --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/tutorials/config.md @@ -0,0 +1,405 @@ +# 教程 1:如何编写配置文件 + +MMClassification 主要使用 python 文件作为配置文件。其配置文件系统的设计将模块化与继承整合进来,方便用户进行各种实验。所有配置文件都放置在 `configs` 文件夹下,主要包含 `_base_` 原始配置文件夹 以及 `resnet`, `swin_transformer`,`vision_transformer` 等诸多算法文件夹。 + +可以使用 ```python tools/misc/print_config.py /PATH/TO/CONFIG``` 命令来查看完整的配置信息,从而方便检查所对应的配置文件。 + + + +- [配置文件以及权重命名规则](#配置文件以及权重命名规则) +- [配置文件结构](#配置文件结构) +- [继承并修改配置文件](#继承并修改配置文件) + - [使用配置文件里的中间变量](#使用配置文件里的中间变量) + - [忽略基础配置文件里的部分内容](#忽略基础配置文件里的部分内容) + - [引用基础配置文件里的变量](#引用基础配置文件里的变量) +- [通过命令行参数修改配置信息](#通过命令行参数修改配置信息) +- [导入用户自定义模块](#导入用户自定义模块) +- [常见问题](#常见问题) + + + +## 配置文件以及权重命名规则 + +MMClassification 按照以下风格进行配置文件命名,代码库的贡献者需要遵循相同的命名规则。文件名总体分为四部分:算法信息,模块信息,训练信息和数据信息。逻辑上属于不同部分的单词之间用下划线 `'_'` 连接,同一部分有多个单词用短横线 `'-'` 连接。 + +``` +{algorithm info}_{module info}_{training info}_{data info}.py +``` + +- `algorithm info`:算法信息,算法名称或者网络架构,如 resnet 等; +- `module info`: 模块信息,因任务而异,用以表示一些特殊的 neck、head 和 pretrain 信息; +- `training info`:一些训练信息,训练策略设置,包括 batch size,schedule 数据增强等; +- `data info`:数据信息,数据集名称、模态、输入尺寸等,如 imagenet, cifar 等; + +### 算法信息 +指论文中的算法名称缩写,以及相应的分支架构信息。例如: +- `resnet50` +- `mobilenet-v3-large` +- `vit-small-patch32` : `patch32` 表示 `ViT` 切分的分块大小 +- `seresnext101-32x4d` : `SeResNet101` 基本网络结构,`32x4d` 表示在 `Bottleneck` 中 `groups` 和 `width_per_group` 分别为32和4 + +### 模块信息 +指一些特殊的 `neck` 、`head` 或者 `pretrain` 的信息, 在分类中常见为预训练信息,比如: +- `in21k-pre` : 在 `ImageNet21k` 上预训练 +- `in21k-pre-3rd-party` : 在 `ImageNet21k` 上预训练,其权重来自其他仓库 + +### 训练信息 +训练策略的一些设置,包括训练类型、 `batch size`、 `lr schedule`、 数据增强以及特殊的损失函数等等,比如: +Batch size 信息: +- 格式为`{gpu x batch_per_gpu}`, 如 `8xb32` + +训练类型(主要见于 transformer 网络,如 `ViT` 算法,这类算法通常分为预训练和微调两种模式): +- `ft` : Finetune config,用于微调的配置文件 +- `pt` : Pretrain config,用于预训练的配置文件 + +训练策略信息,训练策略以复现配置文件为基础,此基础不必标注训练策略。但如果在此基础上进行改进,则需注明训练策略,按照应用点位顺序排列,如:`{pipeline aug}-{train aug}-{loss trick}-{scheduler}-{epochs}` +- `coslr-200e` : 使用 cosine scheduler, 训练 200 个 epoch +- `autoaug-mixup-lbs-coslr-50e` : 使用了 `autoaug`、`mixup`、`label smooth`、`cosine scheduler`, 训练了 50 个轮次 + +### 数据信息 +- `in1k` : `ImageNet1k` 数据集,默认使用 `224x224` 大小的图片 +- `in21k` : `ImageNet21k` 数据集,有些地方也称为 `ImageNet22k` 数据集,默认使用 `224x224` 大小的图片 +- `in1k-384px` : 表示训练的输出图片大小为 `384x384` +- `cifar100` + +### 配置文件命名案例: + +``` +repvgg-D2se_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py +``` + +- `repvgg-D2se`: 算法信息 + + `repvgg`: 主要算法名称。 + + `D2se`: 模型的结构。 +- `deploy`:模块信息,该模型为推理状态。 +- `4xb64-autoaug-lbs-mixup-coslr-200e`: 训练信息 + + `4xb64`: 使用4块 GPU 并且 每块 GPU 的批大小为64。 + + `autoaug`: 使用 `AutoAugment` 数据增强方法。 + + `lbs`: 使用 `label smoothing` 损失函数。 + + `mixup`: 使用 `mixup` 训练增强方法。 + + `coslr`: 使用 `cosine scheduler` 优化策略。 + + `200e`: 训练 200 轮次。 +- `in1k`: 数据信息。 配置文件用于 `ImageNet1k` 数据集上使用 `224x224` 大小图片训练。 + +```{note} +部分配置文件目前还没有遵循此命名规范,相关文件命名近期会更新。 +``` + +### 权重命名规则 + +权重的命名主要包括配置文件名,日期和哈希值。 + +``` +{config_name}_{date}-{hash}.pth +``` + + +## 配置文件结构 + +在 `configs/_base_` 文件夹下有 4 个基本组件类型,分别是: + +- [模型(model)](https://github.com/open-mmlab/mmclassification/tree/master/configs/_base_/models) +- [数据(data)](https://github.com/open-mmlab/mmclassification/tree/master/configs/_base_/datasets) +- [训练策略(schedule)](https://github.com/open-mmlab/mmclassification/tree/master/configs/_base_/schedules) +- [运行设置(runtime)](https://github.com/open-mmlab/mmclassification/blob/master/configs/_base_/default_runtime.py) + +你可以通过继承一些基本配置文件轻松构建自己的训练配置文件。由来自`_base_` 的组件组成的配置称为 _primitive_。 + +为了帮助用户对 MMClassification 检测系统中的完整配置和模块有一个基本的了解,我们使用 [ResNet50 原始配置文件](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet50_b32x8_imagenet.py) 作为案例进行说明并注释每一行含义。更详细的用法和各个模块对应的替代方案,请参考 API 文档。 + +```python +_base_ = [ + '../_base_/models/resnet50.py', # 模型 + '../_base_/datasets/imagenet_bs32.py', # 数据 + '../_base_/schedules/imagenet_bs256.py', # 训练策略 + '../_base_/default_runtime.py' # 默认运行设置 +] +``` + +下面对这四个部分分别进行说明,仍然以上述 ResNet50 原始配置文件作为案例。 + + +### 模型 + +模型参数 `model` 在配置文件中为一个 `python` 字典,主要包括网络结构、损失函数等信息: +- `type` : 分类器名称, 目前 MMClassification 只支持 `ImageClassifier`, 参考 [API 文档](https://mmclassification.readthedocs.io/zh_CN/latest/api.html#module-mmcls.models.classifiers)。 +- `backbone` : 主干网类型,可用选项参考 [API 文档](https://mmclassification.readthedocs.io/zh_CN/latest/api.html#module-mmcls.models.backbones)。 +- `neck` : 颈网络类型,目前 MMClassification 只支持 `GlobalAveragePooling`, 参考 [API 文档](https://mmclassification.readthedocs.io/zh_CN/latest/api.html#module-mmcls.models.necks)。 +- `head` : 头网络类型, 包括单标签分类与多标签分类头网络,可用选项参考 [API 文档](https://mmclassification.readthedocs.io/zh_CN/latest/api.html#module-mmcls.models.heads)。 + - `loss` : 损失函数类型, 支持 `CrossEntropyLoss`, [`LabelSmoothLoss`](https://github.com/open-mmlab/mmclassification/blob/master/configs/_base_/models/resnet50_label_smooth.py) 等,可用选项参考 [API 文档](https://mmclassification.readthedocs.io/zh_CN/latest/api.html#module-mmcls.models.losses)。 +- `train_cfg` :训练配置, 支持 [`mixup`](https://github.com/open-mmlab/mmclassification/blob/master/configs/_base_/models/resnet50_mixup.py), [`cutmix`](https://github.com/open-mmlab/mmclassification/blob/master/configs/_base_/models/resnet50_cutmix.py) 等训练增强。 + +```{note} +配置文件中的 'type' 不是构造时的参数,而是类名。 +``` + +```python +model = dict( + type='ImageClassifier', # 分类器类型 + backbone=dict( + type='ResNet', # 主干网络类型 + depth=50, # 主干网网络深度, ResNet 一般有18, 34, 50, 101, 152 可以选择 + num_stages=4, # 主干网络状态(stages)的数目,这些状态产生的特征图作为后续的 head 的输入。 + out_indices=(3, ), # 输出的特征图输出索引。越远离输入图像,索引越大 + frozen_stages=-1, # 网络微调时,冻结网络的stage(训练时不执行反相传播算法),若num_stages=4,backbone包含stem 与 4 个 stages。frozen_stages为-1时,不冻结网络; 为0时,冻结 stem; 为1时,冻结 stem 和 stage1; 为4时,冻结整个backbone + style='pytorch'), # 主干网络的风格,'pytorch' 意思是步长为2的层为 3x3 卷积, 'caffe' 意思是步长为2的层为 1x1 卷积。 + neck=dict(type='GlobalAveragePooling'), # 颈网络类型 + head=dict( + type='LinearClsHead', # 线性分类头, + num_classes=1000, # 输出类别数,这与数据集的类别数一致 + in_channels=2048, # 输入通道数,这与 neck 的输出通道一致 + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), # 损失函数配置信息 + topk=(1, 5), # 评估指标,Top-k 准确率, 这里为 top1 与 top5 准确率 + )) +``` + +### 数据 +数据参数 `data` 在配置文件中为一个 `python` 字典,主要包含构造数据集加载器(dataloader)配置信息: +- `samples_per_gpu` : 构建 dataloader 时,每个 GPU 的 Batch Size +- `workers_per_gpu` : 构建 dataloader 时,每个 GPU 的 线程数 +- `train | val | test` : 构造数据集 + - `type` : 数据集类型, MMClassification 支持 `ImageNet`、 `Cifar` 等 ,参考[API 文档](https://mmclassification.readthedocs.io/zh_CN/latest/api.html#module-mmcls.datasets) + - `data_prefix` : 数据集根目录 + - `pipeline` : 数据处理流水线,参考相关教程文档 [如何设计数据处理流水线](https://mmclassification.readthedocs.io/zh_CN/latest/tutorials/data_pipeline.html) + +评估参数 `evaluation` 也是一个字典, 为 `evaluation hook` 的配置信息, 主要包括评估间隔、评估指标等。 + +```python +# dataset settings +dataset_type = 'ImageNet' # 数据集名称, +img_norm_cfg = dict( #图像归一化配置,用来归一化输入的图像。 + mean=[123.675, 116.28, 103.53], # 预训练里用于预训练主干网络模型的平均值。 + std=[58.395, 57.12, 57.375], # 预训练里用于预训练主干网络模型的标准差。 + to_rgb=True) # 是否反转通道,使用 cv2, mmcv 读取图片默认为 BGR 通道顺序,这里 Normalize 均值方差数组的数值是以 RGB 通道顺序, 因此需要反转通道顺序。 +# 训练数据流水线 +train_pipeline = [ + dict(type='LoadImageFromFile'), # 读取图片 + dict(type='RandomResizedCrop', size=224), # 随机缩放抠图 + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), # 以概率为0.5随机水平翻转图片 + dict(type='Normalize', **img_norm_cfg), # 归一化 + dict(type='ImageToTensor', keys=['img']), # image 转为 torch.Tensor + dict(type='ToTensor', keys=['gt_label']), # gt_label 转为 torch.Tensor + dict(type='Collect', keys=['img', 'gt_label']) # 决定数据中哪些键应该传递给检测器的流程 +] +# 测试数据流水线 +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', size=(256, -1)), + dict(type='CenterCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) # test 时不传递 gt_label +] +data = dict( + samples_per_gpu=32, # 单个 GPU 的 Batch size + workers_per_gpu=2, # 单个 GPU 的 线程数 + train=dict( # 训练数据信息 + type=dataset_type, # 数据集名称 + data_prefix='data/imagenet/train', # 数据集目录,当不存在 ann_file 时,类别信息从文件夹自动获取 + pipeline=train_pipeline), # 数据集需要经过的 数据流水线 + val=dict( # 验证数据集信息 + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', # 标注文件路径,存在 ann_file 时,不通过文件夹自动获取类别信息 + pipeline=test_pipeline), + test=dict( # 测试数据集信息 + type=dataset_type, + data_prefix='data/imagenet/val', + ann_file='data/imagenet/meta/val.txt', + pipeline=test_pipeline)) +evaluation = dict( # evaluation hook 的配置 + interval=1, # 验证期间的间隔,单位为 epoch 或者 iter, 取决于 runner 类型。 + metric='accuracy') # 验证期间使用的指标。 +``` + +### 训练策略 +主要包含 优化器设置、 `optimizer hook` 设置、学习率策略和 `runner`设置: +- `optimizer` : 优化器设置信息, 支持 `pytorch` 所有的优化器,参考相关 [mmcv](https://mmcv.readthedocs.io/zh_CN/latest/_modules/mmcv/runner/optimizer/default_constructor.html#DefaultOptimizerConstructor) 文档 +- `optimizer_config` : `optimizer hook` 的配置文件,如设置梯度限制,参考相关 [mmcv](https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/optimizer.py#L8) 代码 +- `lr_config` : 学习率策略,支持 "CosineAnnealing"、 "Step"、 "Cyclic" 等等,参考相关 [mmcv](https://mmcv.readthedocs.io/zh_CN/latest/_modules/mmcv/runner/hooks/lr_updater.html#LrUpdaterHook) 文档 +- `runner` : 有关 `runner` 可以参考 `mmcv` 对于 [`runner`](https://mmcv.readthedocs.io/zh_CN/latest/understand_mmcv/runner.html) 介绍文档 +```python +# 用于构建优化器的配置文件。支持 PyTorch 中的所有优化器,同时它们的参数与 PyTorch 里的优化器参数一致。 +optimizer = dict(type='SGD', # 优化器类型 + lr=0.1, # 优化器的学习率,参数的使用细节请参照对应的 PyTorch 文档。 + momentum=0.9, # 动量(Momentum) + weight_decay=0.0001) # 权重衰减系数(weight decay)。 + # optimizer hook 的配置文件 +optimizer_config = dict(grad_clip=None) # 大多数方法不使用梯度限制(grad_clip)。 +# 学习率调整配置,用于注册 LrUpdater hook。 +lr_config = dict(policy='step', # 调度流程(scheduler)的策略,也支持 CosineAnnealing, Cyclic, 等。 + step=[30, 60, 90]) # 在 epoch 为 30, 60, 90 时, lr 进行衰减 +runner = dict(type='EpochBasedRunner', # 将使用的 runner 的类别,如 IterBasedRunner 或 EpochBasedRunner。 + max_epochs=100) # runner 总回合数, 对于 IterBasedRunner 使用 `max_iters` +``` + +### 运行设置 + +本部分主要包括保存权重策略、日志配置、训练参数、断点权重路径和工作目录等等。 + +```python +# Checkpoint hook 的配置文件。 +checkpoint_config = dict(interval=1) # 保存的间隔是 1,单位会根据 runner 不同变动,可以为 epoch 或者 iter。 +# 日志配置信息。 +log_config = dict( + interval=100, # 打印日志的间隔, 单位 iters + hooks=[ + dict(type='TextLoggerHook'), # 用于记录训练过程的文本记录器(logger)。 + # dict(type='TensorboardLoggerHook') # 同样支持 Tensorboard 日志 + ]) + +dist_params = dict(backend='nccl') # 用于设置分布式训练的参数,端口也同样可被设置。 +log_level = 'INFO' # 日志的输出级别。 +resume_from = None # 从给定路径里恢复检查点(checkpoints),训练模式将从检查点保存的轮次开始恢复训练。 +workflow = [('train', 1)] # runner 的工作流程,[('train', 1)] 表示只有一个工作流且工作流仅执行一次。 +work_dir = 'work_dir' # 用于保存当前实验的模型检查点和日志的目录文件地址。 +``` + +## 继承并修改配置文件 + +为了精简代码、更快的修改配置文件以及便于理解,我们建议继承现有方法。 + +对于在同一算法文件夹下的所有配置文件,MMClassification 推荐只存在 **一个** 对应的 _原始配置_ 文件。 +所有其他的配置文件都应该继承 _原始配置_ 文件,这样就能保证配置文件的最大继承深度为 3。 + +例如,如果在 ResNet 的基础上做了一些修改,用户首先可以通过指定 `_base_ = '../../configs/resnet/resnet50_b32x8_imagenet.py'` 来继承基础的 ResNet 结构、数据集以及其他训练配置信息,然后修改配置文件中的必要参数以完成继承。如想在基础 resnet50 的基础上将训练轮数由 100 改为 300 和修改学习率衰减轮数,同时修改数据集路径,可以建立新的配置文件 `configs/resnet/resnet50_8xb32-300e_in1k.py`, 文件中写入以下内容: + +```python +_base_ = '../../configs/resnet/resnet50_b32x8_imagenet.py' + +runner = dict(max_epochs=300) +lr_config = dict(step=[150, 200, 250]) + +data = dict( + train=dict(data_prefix='mydata/imagenet/train'), + val=dict(data_prefix='mydata/imagenet/train', ), + test=dict(data_prefix='mydata/imagenet/train', ) +) +``` + +### 使用配置文件里的中间变量 + +用一些中间变量,中间变量让配置文件更加清晰,也更容易修改。 + +例如数据集里的 `train_pipeline` / `test_pipeline` 是作为数据流水线的中间变量。我们首先要定义 `train_pipeline` / `test_pipeline`,然后将它们传递到 `data` 中。如果想修改训练或测试时输入图片的大小,就需要修改 `train_pipeline` / `test_pipeline` 这些中间变量。 + + +```python +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', size=384, backend='pillow',), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', size=384, backend='pillow'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) +``` + +### 忽略基础配置文件里的部分内容 + +有时,您需要设置 `_delete_=True` 去忽略基础配置文件里的一些域内容。 可以参照 [mmcv](https://mmcv.readthedocs.io/zh_CN/latest/understand_mmcv/config.html#inherit-from-base-config-with-ignored-fields) 来获得一些简单的指导。 + + +以下是一个简单应用案例。 如果在上述 ResNet50 案例中 使用 cosine schedule ,使用继承并直接修改会报 `get unexcepected keyword 'step'` 错, 因为基础配置文件 lr_config 域信息的 `'step'` 字段被保留下来了,需要加入 `_delete_=True` 去忽略基础配置文件里的 `lr_config` 相关域内容: + +```python +_base_ = '../../configs/resnet/resnet50_b32x8_imagenet.py' + +lr_config = dict( + _delete_=True, + policy='CosineAnnealing', + min_lr=0, + warmup='linear', + by_epoch=True, + warmup_iters=5, + warmup_ratio=0.1 +) +``` + +### 引用基础配置文件里的变量 + +有时,您可以引用 `_base_` 配置信息的一些域内容,这样可以避免重复定义。 可以参照 [mmcv](https://mmcv.readthedocs.io/zh_CN/latest/understand_mmcv/config.html#reference-variables-from-base) 来获得一些简单的指导。 + +以下是一个简单应用案例,在训练数据预处理流水线中使用 auto augment 数据增强,参考配置文件 [`configs/_base_/datasets/imagenet_bs64_autoaug.py`](https://github.com/open-mmlab/mmclassification/blob/master/configs/_base_/datasets/imagenet_bs64_autoaug.py)。 在定义 `train_pipeline` 时,可以直接在 `_base_` 中加入定义 auto augment 数据增强的文件命名,再通过 `{{_base_.auto_increasing_policies}}` 引用变量: + +```python +_base_ = ['./pipelines/auto_aug.py'] + +# dataset settings +dataset_type = 'ImageNet' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', size=224), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='AutoAugment', policies={{_base_.auto_increasing_policies}}), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [...] +data = dict( + samples_per_gpu=64, + workers_per_gpu=2, + train=dict(..., pipeline=train_pipeline), + val=dict(..., pipeline=test_pipeline)) +evaluation = dict(interval=1, metric='accuracy') +``` + +## 通过命令行参数修改配置信息 + +当用户使用脚本 "tools/train.py" 或者 "tools/test.py" 提交任务,以及使用一些工具脚本时,可以通过指定 `--cfg-options` 参数来直接修改所使用的配置文件内容。 + +- 更新配置文件内的字典 + + 可以按照原始配置文件中字典的键的顺序指定配置选项。 + 例如,`--cfg-options model.backbone.norm_eval=False` 将主干网络中的所有 BN 模块更改为 `train` 模式。 + +- 更新配置文件内列表的键 + + 一些配置字典在配置文件中会形成一个列表。例如,训练流水线 `data.train.pipeline` 通常是一个列表。 + 例如,`[dict(type='LoadImageFromFile'), dict(type='TopDownRandomFlip', flip_prob=0.5), ...]` 。如果要将流水线中的 `'flip_prob=0.5'` 更改为 `'flip_prob=0.0'`,您可以这样指定 `--cfg-options data.train.pipeline.1.flip_prob=0.0` 。 + +- 更新列表/元组的值。 + + 当配置文件中需要更新的是一个列表或者元组,例如,配置文件通常会设置 `workflow=[('train', 1)]`,用户如果想更改, + 需要指定 `--cfg-options workflow="[(train,1),(val,1)]"`。注意这里的引号 " 对于列表以及元组数据类型的修改是必要的, + 并且 **不允许** 引号内所指定的值的书写存在空格。 + +## 导入用户自定义模块 + +```{note} +本部分仅在当将 MMClassification 当作库构建自己项目时可能用到,初学者可跳过。 +``` + +在学习完后续教程 [如何添加新数据集](https://mmclassification.readthedocs.io/zh_CN/latest/tutorials/new_dataset.html)、[如何设计数据处理流程](https://mmclassification.readthedocs.io/zh_CN/latest/tutorials/data_pipeline.html) 、[如何增加新模块](https://mmclassification.readthedocs.io/zh_CN/latest/tutorials/new_modules.html) 后,您可能使用 MMClassification 完成自己的项目并在项目中自定义了数据集、模型、数据增强等。为了精简代码,可以将 MMClassification 作为一个第三方库,只需要保留自己的额外的代码,并在配置文件中导入自定义的模块。案例可以参考 [OpenMMLab 算法大赛项目](https://github.com/zhangrui-wolf/openmmlab-competition-2021)。 + +只需要在你的配置文件中添加以下代码: + +```python +custom_imports = dict( + imports=['your_dataset_class', + 'your_transforme_class', + 'your_model_class', + 'your_module_class'], + allow_failed_imports=False) +``` + +## 常见问题 +- 无 diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/tutorials/data_pipeline.md b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/tutorials/data_pipeline.md new file mode 100644 index 0000000000..e50a2c0b84 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/tutorials/data_pipeline.md @@ -0,0 +1,148 @@ +# 教程 4:如何设计数据处理流程 + +## 设计数据流水线 + +按照典型的用法,我们通过 `Dataset` 和 `DataLoader` 来使用多个 worker 进行数据加 +载。对 `Dataset` 的索引操作将返回一个与模型的 `forward` 方法的参数相对应的字典。 + +数据流水线和数据集在这里是解耦的。通常,数据集定义如何处理标注文件,而数据流水 +线定义所有准备数据字典的步骤。流水线由一系列操作组成。每个操作都将一个字典作为 +输入,并输出一个字典。 + +这些操作分为数据加载,预处理和格式化。 + +这里使用 ResNet-50 在 ImageNet 数据集上的数据流水线作为示例。 + +```python +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', size=224), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', size=256), + dict(type='CenterCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] +``` + +对于每个操作,我们列出了添加、更新、删除的相关字典字段。在流水线的最后,我们使 +用 `Collect` 仅保留进行模型 `forward` 方法所需的项。 + +### 数据加载 + +`LoadImageFromFile` - 从文件中加载图像 + +- 添加:img, img_shape, ori_shape + +默认情况下,`LoadImageFromFile` 将会直接从硬盘加载图像,但对于一些效率较高、规 +模较小的模型,这可能会导致 IO 瓶颈。MMCV 支持多种数据加载后端来加速这一过程。例 +如,如果训练设备上配置了 [memcached](https://memcached.org/),那么我们按照如下 +方式修改配置文件。 + +``` +memcached_root = '/mnt/xxx/memcached_client/' +train_pipeline = [ + dict( + type='LoadImageFromFile', + file_client_args=dict( + backend='memcached', + server_list_cfg=osp.join(memcached_root, 'server_list.conf'), + client_cfg=osp.join(memcached_root, 'client.conf'))), +] +``` + +更多支持的数据加载后端,可以参见 [mmcv.fileio.FileClient](https://github.com/open-mmlab/mmcv/blob/master/mmcv/fileio/file_client.py)。 + +### 预处理 + +`Resize` - 缩放图像尺寸 + +- 添加:scale, scale_idx, pad_shape, scale_factor, keep_ratio +- 更新:img, img_shape + +`RandomFlip` - 随机翻转图像 + +- 添加:flip, flip_direction +- 更新:img + +`RandomCrop` - 随机裁剪图像 + +- 更新:img, pad_shape + +`Normalize` - 图像数据归一化 + +- 添加:img_norm_cfg +- 更新:img + +### 格式化 + +`ToTensor` - 转换(标签)数据至 `torch.Tensor` + +- 更新:根据参数 `keys` 指定 + +`ImageToTensor` - 转换图像数据至 `torch.Tensor` + +- 更新:根据参数 `keys` 指定 + +`Collect` - 保留指定键值 + +- 删除:除了参数 `keys` 指定以外的所有键值对 + +## 扩展及使用自定义流水线 + +1. 编写一个新的数据处理操作,并放置在 `mmcls/datasets/pipelines/` 目录下的任何 + 一个文件中,例如 `my_pipeline.py`。这个类需要重载 `__call__` 方法,接受一个 + 字典作为输入,并返回一个字典。 + + ```python + from mmcls.datasets import PIPELINES + + @PIPELINES.register_module() + class MyTransform(object): + + def __call__(self, results): + # 对 results['img'] 进行变换操作 + return results + ``` + +2. 在 `mmcls/datasets/pipelines/__init__.py` 中导入这个新的类。 + + ```python + ... + from .my_pipeline import MyTransform + + __all__ = [ + ..., 'MyTransform' + ] + ``` + +3. 在数据流水线的配置中添加这一操作。 + + ```python + img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) + train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', size=224), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='MyTransform'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) + ] + ``` + +## 流水线可视化 + +设计好数据流水线后,可以使用[可视化工具](../tools/visualization.md)查看具体的效果。 diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/tutorials/finetune.md b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/tutorials/finetune.md new file mode 100644 index 0000000000..2901506587 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/tutorials/finetune.md @@ -0,0 +1,222 @@ +# 教程 2:如何微调模型 + +已经证明,在 ImageNet 数据集上预先训练的分类模型对于其他数据集和其他下游任务有很好的效果。 + +该教程提供了如何将 [Model Zoo](https://github.com/open-mmlab/mmclassification/blob/master/docs/model_zoo.md) 中提供的预训练模型用于其他数据集,已获得更好的效果。 + +在新数据集上微调模型分为两步: + +- 按照 [教程 2:如何增加新数据集](new_dataset.md) 添加对新数据集的支持。 +- 按照本教程中讨论的内容修改配置文件 + +假设我们现在有一个在 ImageNet-2012 数据集上训练好的 ResNet-50 模型,并且希望在 +CIFAR-10 数据集上进行模型微调,我们需要修改配置文件中的五个部分。 + +## 继承基础配置 + +首先,创建一个新的配置文件 `configs/tutorial/resnet50_finetune_cifar.py` 来保存我们的配置,当然,这个文件名可以自由设定。 + +为了重用不同配置之间的通用部分,我们支持从多个现有配置中继承配置。要微调 +ResNet-50 模型,新配置需要继承 `_base_/models/resnet50.py` 来搭建模型的基本结构。 +为了使用 CIFAR10 数据集,新的配置文件可以直接继承 `_base_/datasets/cifar10.py`。 +而为了保留运行相关设置,比如训练调整器,新的配置文件需要继承 +`_base_/default_runtime.py`。 + +要继承以上这些配置文件,只需要把下面一段代码放在我们的配置文件开头。 + +```python +_base_ = [ + '../_base_/models/resnet50.py', + '../_base_/datasets/cifar10.py', '../_base_/default_runtime.py' +] +``` + +除此之外,你也可以不使用继承,直接编写完整的配置文件,例如 +[`configs/lenet/lenet5_mnist.py`](https://github.com/open-mmlab/mmclassification/blob/master/configs/lenet/lenet5_mnist.py)。 + +## 修改模型 + +在进行模型微调是,我们通常希望在主干网络(backbone)加载预训练模型,再用我们的数据集训练一个新的分类头(head)。 + +为了在主干网络加载预训练模型,我们需要修改主干网络的初始化设置,使用 +`Pretrained` 类型的初始化函数。另外,在初始化设置中,我们使用 +`prefix='backbone'` 来告诉初始化函数移除权重文件中键值名称的前缀,比如把 +`backbone.conv1` 变成 `conv1`。方便起见,我们这里使用一个在线的权重文件链接,它 +会在训练前自动下载对应的文件,你也可以提前下载这个模型,然后使用本地路径。 + +接下来,新的配置文件需要按照新数据集的类别数目来修改分类头的配置。只需要修改分 +类头中的 `num_classes` 设置即可。 + +```python +model = dict( + backbone=dict( + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb32_in1k_20210831-ea4938fc.pth', + prefix='backbone', + )), + head=dict(num_classes=10), +) +``` + +```{tip} +这里我们只需要设定我们想要修改的部分配置,其他配置将会自动从我们的父配置文件中获取。 +``` + +另外,有时我们在进行微调时会希望冻结主干网络前面几层的参数,这么做有助于在后续 +训练中,保持网络从预训练权重中获得的提取低阶特征的能力。在 MMClassification 中, +这一功能可以通过简单的一个 `frozen_stages` 参数来实现。比如我们需要冻结前两层网 +络的参数,只需要在上面的配置中添加一行: + +```python +model = dict( + backbone=dict( + frozen_stages=2, + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb32_in1k_20210831-ea4938fc.pth', + prefix='backbone', + )), + head=dict(num_classes=10), +) +``` + +```{note} +目前还不是所有的网络都支持 `frozen_stages` 参数,在使用之前,请先检查 +[文档](https://mmclassification.readthedocs.io/zh_CN/latest/api.html#module-mmcls.models.backbones) +以确认你所使用的主干网络是否支持。 +``` + +## 修改数据集 + +当针对一个新的数据集进行微调时,我们通常都需要修改一些数据集相关的配置。比如这 +里,我们就需要把 CIFAR-10 数据集中的图像大小从 32 缩放到 224 来配合 ImageNet 上 +预训练模型的输入。这一需要可以通过修改数据集的预处理流水线(pipeline)来实现。 + +```python +img_norm_cfg = dict( + mean=[125.307, 122.961, 113.8575], + std=[51.5865, 50.847, 51.255], + to_rgb=False, +) +train_pipeline = [ + dict(type='RandomCrop', size=32, padding=4), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Resize', size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']), +] +test_pipeline = [ + dict(type='Resize', size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline), +) +``` + +## 修改训练策略设置 + +用于微调任务的超参数与默认配置不同,通常只需要较小的学习率和较少的训练时间。 + +```python +# 用于批大小为 128 的优化器学习率 +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=None) +# 学习率衰减策略 +lr_config = dict(policy='step', step=[15]) +runner = dict(type='EpochBasedRunner', max_epochs=200) +log_config = dict(interval=100) +``` + +## 开始训练 + +现在,我们完成了用于微调的配置文件,完整的文件如下: + +```python +_base_ = [ + '../_base_/models/resnet50.py', + '../_base_/datasets/cifar10_bs16.py', '../_base_/default_runtime.py' +] + +# 模型设置 +model = dict( + backbone=dict( + frozen_stages=2, + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb32_in1k_20210831-ea4938fc.pth', + prefix='backbone', + )), + head=dict(num_classes=10), +) + +# 数据集设置 +img_norm_cfg = dict( + mean=[125.307, 122.961, 113.8575], + std=[51.5865, 50.847, 51.255], + to_rgb=False, +) +train_pipeline = [ + dict(type='RandomCrop', size=32, padding=4), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Resize', size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']), +] +test_pipeline = [ + dict(type='Resize', size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline), +) + +# 训练策略设置 +# 用于批大小为 128 的优化器学习率 +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=None) +# 学习率衰减策略 +lr_config = dict(policy='step', step=[15]) +runner = dict(type='EpochBasedRunner', max_epochs=200) +log_config = dict(interval=100) +``` + +接下来,我们使用一台 8 张 GPU 的电脑来训练我们的模型,指令如下: + +```shell +bash tools/dist_train.sh configs/tutorial/resnet50_finetune_cifar.py 8 +``` + +当然,我们也可以使用单张 GPU 来进行训练,使用如下命令: + +```shell +python tools/train.py configs/tutorial/resnet50_finetune_cifar.py +``` + +但是如果我们使用单张 GPU 进行训练的话,需要在数据集设置部分作如下修改: + +```python +data = dict( + samples_per_gpu=128, + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline), +) +``` + +这是因为我们的训练策略是针对批次大小(batch size)为 128 设置的。在父配置文件中, +设置了 `samples_per_gpu=16`,如果使用 8 张 GPU,总的批次大小就是 128。而如果使 +用单张 GPU,就必须手动修改 `samples_per_gpu=128` 来匹配训练策略。 diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/tutorials/new_dataset.md b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/tutorials/new_dataset.md new file mode 100644 index 0000000000..52123240b9 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/tutorials/new_dataset.md @@ -0,0 +1,140 @@ +# 教程 3:如何添加新数据集 + +## 通过重新组织数据来自定义数据集 + +### 将数据集重新组织为已有格式 + +最简单的方法是将数据集转换为现有的数据集格式 (ImageNet)。 + +为了训练,根据图片的类别,存放至不同子目录下。训练数据文件夹结构如下所示: + +``` +imagenet +├── ... +├── train +│ ├── n01440764 +│ │ ├── n01440764_10026.JPEG +│ │ ├── n01440764_10027.JPEG +│ │ ├── ... +│ ├── ... +│ ├── n15075141 +│ │ ├── n15075141_999.JPEG +│ │ ├── n15075141_9993.JPEG +│ │ ├── ... +``` + +为了验证,我们提供了一个注释列表。列表的每一行都包含一个文件名及其相应的真实标签。格式如下: + +``` +ILSVRC2012_val_00000001.JPEG 65 +ILSVRC2012_val_00000002.JPEG 970 +ILSVRC2012_val_00000003.JPEG 230 +ILSVRC2012_val_00000004.JPEG 809 +ILSVRC2012_val_00000005.JPEG 516 +``` + +注:真实标签的值应该位于 `[0, 类别数目 - 1]` 之间 + +### 自定义数据集的示例 + +用户可以编写一个继承自 `BasesDataset` 的新数据集类,并重载 `load_annotations(self)` 方法,类似 [CIFAR10](https://github.com/open-mmlab/mmclassification/blob/master/mmcls/datasets/cifar.py) 和 [ImageNet](https://github.com/open-mmlab/mmclassification/blob/master/mmcls/datasets/imagenet.py)。 + + +通常,此方法返回一个包含所有样本的列表,其中的每个样本都是一个字典。字典中包含了必要的数据信息,例如 `img` 和 `gt_label`。 + +假设我们将要实现一个 `Filelist` 数据集,该数据集将使用文件列表进行训练和测试。注释列表的格式如下: + +``` +000001.jpg 0 +000002.jpg 1 +``` + +我们可以在 `mmcls/datasets/filelist.py` 中创建一个新的数据集类以加载数据。 + +```python +import mmcv +import numpy as np + +from .builder import DATASETS +from .base_dataset import BaseDataset + + +@DATASETS.register_module() +class Filelist(BaseDataset): + + def load_annotations(self): + assert isinstance(self.ann_file, str) + + data_infos = [] + with open(self.ann_file) as f: + samples = [x.strip().split(' ') for x in f.readlines()] + for filename, gt_label in samples: + info = {'img_prefix': self.data_prefix} + info['img_info'] = {'filename': filename} + info['gt_label'] = np.array(gt_label, dtype=np.int64) + data_infos.append(info) + return data_infos + +``` + +将新的数据集类加入到 `mmcls/datasets/__init__.py` 中: + +```python +from .base_dataset import BaseDataset +... +from .filelist import Filelist + +__all__ = [ + 'BaseDataset', ... ,'Filelist' +] +``` + +然后在配置文件中,为了使用 `Filelist`,用户可以按以下方式修改配置 + +```python +train = dict( + type='Filelist', + ann_file = 'image_list.txt', + pipeline=train_pipeline +) +``` + +## 通过混合数据集来自定义数据集 + +MMClassification 还支持混合数据集以进行训练。目前支持合并和重复数据集。 + +### 重复数据集 + +我们使用 `RepeatDataset` 作为一个重复数据集的封装。举个例子,假设原始数据集是 `Dataset_A`,为了重复它,我们需要如下的配置文件: + +```python +dataset_A_train = dict( + type='RepeatDataset', + times=N, + dataset=dict( # 这里是 Dataset_A 的原始配置 + type='Dataset_A', + ... + pipeline=train_pipeline + ) + ) +``` + +### 类别平衡数据集 + +我们使用 `ClassBalancedDataset` 作为根据类别频率对数据集进行重复采样的封装类。进行重复采样的数据集需要实现函数 `self.get_cat_ids(idx)` 以支持 `ClassBalancedDataset`。 + +举个例子,按照 `oversample_thr=1e-3` 对 `Dataset_A` 进行重复采样,需要如下的配置文件: + +```python +dataset_A_train = dict( + type='ClassBalancedDataset', + oversample_thr=1e-3, + dataset=dict( # 这里是 Dataset_A 的原始配置 + type='Dataset_A', + ... + pipeline=train_pipeline + ) + ) +``` + +更加具体的细节,请参考 [源代码](https://github.com/open-mmlab/mmclassification/tree/master/mmcls/datasets/dataset_wrappers.py)。 diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/tutorials/new_modules.md b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/tutorials/new_modules.md new file mode 100644 index 0000000000..f731b43c68 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/docs_zh-CN/tutorials/new_modules.md @@ -0,0 +1,281 @@ +# 教程 5:如何增加新模块 + +## 开发新组件 + +我们基本上将模型组件分为 3 种类型。 + +- 主干网络:通常是一个特征提取网络,例如 ResNet、MobileNet +- 颈部:用于连接主干网络和头部的组件,例如 GlobalAveragePooling +- 头部:用于执行特定任务的组件,例如分类和回归 + +### 添加新的主干网络 + +这里,我们以 ResNet_CIFAR 为例,展示了如何开发一个新的主干网络组件。 + +ResNet_CIFAR 针对 CIFAR 32x32 的图像输入,将 ResNet 中 `kernel_size=7, +stride=2` 的设置替换为 `kernel_size=3, stride=1`,并移除了 stem 层之后的 +`MaxPooling`,以避免传递过小的特征图到残差块中。 + +它继承自 `ResNet` 并只修改了 stem 层。 + +1. 创建一个新文件 `mmcls/models/backbones/resnet_cifar.py`。 + +```python +import torch.nn as nn + +from ..builder import BACKBONES +from .resnet import ResNet + + +@BACKBONES.register_module() +class ResNet_CIFAR(ResNet): + + """ResNet backbone for CIFAR. + + (对这个主干网络的简短描述) + + Args: + depth(int): Network depth, from {18, 34, 50, 101, 152}. + ... + (参数文档) + """ + + def __init__(self, depth, deep_stem=False, **kwargs): + # 调用基类 ResNet 的初始化函数 + super(ResNet_CIFAR, self).__init__(depth, deep_stem=deep_stem **kwargs) + # 其他特殊的初始化流程 + assert not self.deep_stem, 'ResNet_CIFAR do not support deep_stem' + + def _make_stem_layer(self, in_channels, base_channels): + # 重载基类的方法,以实现对网络结构的修改 + self.conv1 = build_conv_layer( + self.conv_cfg, + in_channels, + base_channels, + kernel_size=3, + stride=1, + padding=1, + bias=False) + self.norm1_name, norm1 = build_norm_layer( + self.norm_cfg, base_channels, postfix=1) + self.add_module(self.norm1_name, norm1) + self.relu = nn.ReLU(inplace=True) + + def forward(self, x): # 需要返回一个元组 + pass # 此处省略了网络的前向实现 + + def init_weights(self, pretrained=None): + pass # 如果有必要的话,重载基类 ResNet 的参数初始化函数 + + def train(self, mode=True): + pass # 如果有必要的话,重载基类 ResNet 的训练状态函数 +``` + +2. 在 `mmcls/models/backbones/__init__.py` 中导入新模块 + +```python +... +from .resnet_cifar import ResNet_CIFAR + +__all__ = [ + ..., 'ResNet_CIFAR' +] +``` + +3. 在配置文件中使用新的主干网络 + +```python +model = dict( + ... + backbone=dict( + type='ResNet_CIFAR', + depth=18, + other_arg=xxx), + ... +``` + +### 添加新的颈部组件 + +这里我们以 `GlobalAveragePooling` 为例。这是一个非常简单的颈部组件,没有任何参数。 + +要添加新的颈部组件,我们主要需要实现 `forward` 函数,该函数对主干网络的输出进行 +一些操作并将结果传递到头部。 + +1. 创建一个新文件 `mmcls/models/necks/gap.py` + + ```python + import torch.nn as nn + + from ..builder import NECKS + + @NECKS.register_module() + class GlobalAveragePooling(nn.Module): + + def __init__(self): + self.gap = nn.AdaptiveAvgPool2d((1, 1)) + + def forward(self, inputs): + # 简单起见,我们默认输入是一个张量 + outs = self.gap(inputs) + outs = outs.view(inputs.size(0), -1) + return outs + ``` + +2. 在 `mmcls/models/necks/__init__.py` 中导入新模块 + + ```python + ... + from .gap import GlobalAveragePooling + + __all__ = [ + ..., 'GlobalAveragePooling' + ] + ``` + +3. 修改配置文件以使用新的颈部组件 + + ```python + model = dict( + neck=dict(type='GlobalAveragePooling'), + ) + ``` + +### 添加新的头部组件 + +在此,我们以 `LinearClsHead` 为例,说明如何开发新的头部组件。 + +要添加一个新的头部组件,基本上我们需要实现 `forward_train` 函数,它接受来自颈部 +或主干网络的特征图作为输入,并基于真实标签计算。 + +1. 创建一个文件 `mmcls/models/heads/linear_head.py`. + + ```python + from ..builder import HEADS + from .cls_head import ClsHead + + + @HEADS.register_module() + class LinearClsHead(ClsHead): + + def __init__(self, + num_classes, + in_channels, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, )): + super(LinearClsHead, self).__init__(loss=loss, topk=topk) + self.in_channels = in_channels + self.num_classes = num_classes + + if self.num_classes <= 0: + raise ValueError( + f'num_classes={num_classes} must be a positive integer') + + self._init_layers() + + def _init_layers(self): + self.fc = nn.Linear(self.in_channels, self.num_classes) + + def init_weights(self): + normal_init(self.fc, mean=0, std=0.01, bias=0) + + def forward_train(self, x, gt_label): + cls_score = self.fc(x) + losses = self.loss(cls_score, gt_label) + return losses + + ``` + +2. 在 `mmcls/models/heads/__init__.py` 中导入这个模块 + + ```python + ... + from .linear_head import LinearClsHead + + __all__ = [ + ..., 'LinearClsHead' + ] + ``` + +3. 修改配置文件以使用新的头部组件。 + +连同 `GlobalAveragePooling` 颈部组件,完整的模型配置如下: + +```python +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) + +``` + +### 添加新的损失函数 + +要添加新的损失函数,我们主要需要在损失函数模块中 `forward` 函数。另外,利用装饰器 `weighted_loss` 可以方便的实现对每个元素的损失进行加权平均。 + +假设我们要模拟从另一个分类模型生成的概率分布,需要添加 `L1loss` 来实现该目的。 + +1. 创建一个新文件 `mmcls/models/losses/l1_loss.py` + + ```python + import torch + import torch.nn as nn + + from ..builder import LOSSES + from .utils import weighted_loss + + @weighted_loss + def l1_loss(pred, target): + assert pred.size() == target.size() and target.numel() > 0 + loss = torch.abs(pred - target) + return loss + + @LOSSES.register_module() + class L1Loss(nn.Module): + + def __init__(self, reduction='mean', loss_weight=1.0): + super(L1Loss, self).__init__() + self.reduction = reduction + self.loss_weight = loss_weight + + def forward(self, + pred, + target, + weight=None, + avg_factor=None, + reduction_override=None): + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + loss = self.loss_weight * l1_loss( + pred, target, weight, reduction=reduction, avg_factor=avg_factor) + return loss + ``` + +2. 在文件 `mmcls/models/losses/__init__.py` 中导入这个模块 + + ```python + ... + from .l1_loss import L1Loss, l1_loss + + __all__ = [ + ..., 'L1Loss', 'l1_loss' + ] + ``` + +3. 修改配置文件中的 `loss` 字段以使用新的损失函数 + + ```python + loss=dict(type='L1Loss', loss_weight=1.0)) + ``` diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/__init__.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/__init__.py new file mode 100644 index 0000000000..ae9a519b1a --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/__init__.py @@ -0,0 +1,60 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +import mmcv +from packaging.version import parse + +from .version import __version__ + + +def digit_version(version_str: str, length: int = 4): + """Convert a version string into a tuple of integers. + + This method is usually used for comparing two versions. For pre-release + versions: alpha < beta < rc. + + Args: + version_str (str): The version string. + length (int): The maximum number of version levels. Default: 4. + + Returns: + tuple[int]: The version info in digits (integers). + """ + version = parse(version_str) + assert version.release, f'failed to parse version {version_str}' + release = list(version.release) + release = release[:length] + if len(release) < length: + release = release + [0] * (length - len(release)) + if version.is_prerelease: + mapping = {'a': -3, 'b': -2, 'rc': -1} + val = -4 + # version.pre can be None + if version.pre: + if version.pre[0] not in mapping: + warnings.warn(f'unknown prerelease version {version.pre[0]}, ' + 'version checking may go wrong') + else: + val = mapping[version.pre[0]] + release.extend([val, version.pre[-1]]) + else: + release.extend([val, 0]) + + elif version.is_postrelease: + release.extend([1, version.post]) + else: + release.extend([0, 0]) + return tuple(release) + + +mmcv_minimum_version = '1.3.8' +mmcv_maximum_version = '1.7.2' +mmcv_version = digit_version(mmcv.__version__) + + +assert (mmcv_version >= digit_version(mmcv_minimum_version) + and mmcv_version <= digit_version(mmcv_maximum_version)), \ + f'MMCV=={mmcv.__version__} is used but incompatible. ' \ + f'Please install mmcv>={mmcv_minimum_version}, <={mmcv_maximum_version}.' + +__all__ = ['__version__', 'digit_version'] diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/apis/__init__.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/apis/__init__.py new file mode 100644 index 0000000000..7dc58a9731 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/apis/__init__.py @@ -0,0 +1,9 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .inference import inference_model, init_model, show_result_pyplot +from .test import multi_gpu_test, single_gpu_test +from .train import set_random_seed, train_model + +__all__ = [ + 'set_random_seed', 'train_model', 'init_model', 'inference_model', + 'multi_gpu_test', 'single_gpu_test', 'show_result_pyplot' +] diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/apis/inference.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/apis/inference.py new file mode 100644 index 0000000000..8bff2017bc --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/apis/inference.py @@ -0,0 +1,119 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +import mmcv +import numpy as np +import torch +from mmcv.parallel import collate, scatter +from mmcv.runner import load_checkpoint + +from mmcls.datasets.pipelines import Compose +from mmcls.models import build_classifier + + +def init_model(config, checkpoint=None, device='cuda:0', options=None): + """Initialize a classifier from config file. + + Args: + config (str or :obj:`mmcv.Config`): Config file path or the config + object. + checkpoint (str, optional): Checkpoint path. If left as None, the model + will not load any weights. + options (dict): Options to override some settings in the used config. + + Returns: + nn.Module: The constructed classifier. + """ + if isinstance(config, str): + config = mmcv.Config.fromfile(config) + elif not isinstance(config, mmcv.Config): + raise TypeError('config must be a filename or Config object, ' + f'but got {type(config)}') + if options is not None: + config.merge_from_dict(options) + config.model.pretrained = None + model = build_classifier(config.model) + if checkpoint is not None: + map_loc = 'cpu' if device == 'cpu' else None + checkpoint = load_checkpoint(model, checkpoint, map_location=map_loc) + if 'CLASSES' in checkpoint.get('meta', {}): + model.CLASSES = checkpoint['meta']['CLASSES'] + else: + from mmcls.datasets import ImageNet + warnings.simplefilter('once') + warnings.warn('Class names are not saved in the checkpoint\'s ' + 'meta data, use imagenet by default.') + model.CLASSES = ImageNet.CLASSES + model.cfg = config # save the config in the model for convenience + model.to(device) + model.eval() + return model + + +def inference_model(model, img): + """Inference image(s) with the classifier. + + Args: + model (nn.Module): The loaded classifier. + img (str/ndarray): The image filename or loaded image. + + Returns: + result (dict): The classification results that contains + `class_name`, `pred_label` and `pred_score`. + """ + cfg = model.cfg + device = next(model.parameters()).device # model device + # build the data pipeline + if isinstance(img, str): + if cfg.data.test.pipeline[0]['type'] != 'LoadImageFromFile': + cfg.data.test.pipeline.insert(0, dict(type='LoadImageFromFile')) + data = dict(img_info=dict(filename=img), img_prefix=None) + else: + if cfg.data.test.pipeline[0]['type'] == 'LoadImageFromFile': + cfg.data.test.pipeline.pop(0) + data = dict(img=img) + test_pipeline = Compose(cfg.data.test.pipeline) + data = test_pipeline(data) + data = collate([data], samples_per_gpu=1) + if next(model.parameters()).is_cuda: + # scatter to specified GPU + data = scatter(data, [device])[0] + + # forward the model + with torch.no_grad(): + scores = model(return_loss=False, **data) + pred_score = np.max(scores, axis=1)[0] + pred_label = np.argmax(scores, axis=1)[0] + result = {'pred_label': pred_label, 'pred_score': float(pred_score)} + result['pred_class'] = model.CLASSES[result['pred_label']] + return result + + +def show_result_pyplot(model, + img, + result, + fig_size=(15, 10), + title='result', + wait_time=0): + """Visualize the classification results on the image. + + Args: + model (nn.Module): The loaded classifier. + img (str or np.ndarray): Image filename or loaded image. + result (list): The classification result. + fig_size (tuple): Figure size of the pyplot figure. + Defaults to (15, 10). + title (str): Title of the pyplot figure. + Defaults to 'result'. + wait_time (int): How many seconds to display the image. + Defaults to 0. + """ + if hasattr(model, 'module'): + model = model.module + model.show_result( + img, + result, + show=True, + fig_size=fig_size, + win_name=title, + wait_time=wait_time) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/apis/test.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/apis/test.py new file mode 100644 index 0000000000..153bf38447 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/apis/test.py @@ -0,0 +1,198 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +import pickle +import shutil +import tempfile +import time + +import mmcv +import numpy as np +import torch +import torch.distributed as dist +from mmcv.image import tensor2imgs +from mmcv.runner import get_dist_info + + +def single_gpu_test(model, + data_loader, + show=False, + out_dir=None, + **show_kwargs): + model.eval() + results = [] + dataset = data_loader.dataset + prog_bar = mmcv.ProgressBar(len(dataset)) + for i, data in enumerate(data_loader): + with torch.no_grad(): + result = model(return_loss=False, **data) + + batch_size = len(result) + results.extend(result) + + if show or out_dir: + scores = np.vstack(result) + pred_score = np.max(scores, axis=1) + pred_label = np.argmax(scores, axis=1) + pred_class = [model.CLASSES[lb] for lb in pred_label] + + img_metas = data['img_metas'].data[0] + imgs = tensor2imgs(data['img'], **img_metas[0]['img_norm_cfg']) + assert len(imgs) == len(img_metas) + + for i, (img, img_meta) in enumerate(zip(imgs, img_metas)): + h, w, _ = img_meta['img_shape'] + img_show = img[:h, :w, :] + + ori_h, ori_w = img_meta['ori_shape'][:-1] + img_show = mmcv.imresize(img_show, (ori_w, ori_h)) + + if out_dir: + out_file = osp.join(out_dir, img_meta['ori_filename']) + else: + out_file = None + + result_show = { + 'pred_score': pred_score[i], + 'pred_label': pred_label[i], + 'pred_class': pred_class[i] + } + model.module.show_result( + img_show, + result_show, + show=show, + out_file=out_file, + **show_kwargs) + + batch_size = data['img'].size(0) + for _ in range(batch_size): + prog_bar.update() + return results + + +def multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=False): + """Test model with multiple gpus. + + This method tests model with multiple gpus and collects the results + under two different modes: gpu and cpu modes. By setting 'gpu_collect=True' + it encodes results to gpu tensors and use gpu communication for results + collection. On cpu mode it saves the results on different gpus to 'tmpdir' + and collects them by the rank 0 worker. + + Args: + model (nn.Module): Model to be tested. + data_loader (nn.Dataloader): Pytorch data loader. + tmpdir (str): Path of directory to save the temporary results from + different gpus under cpu mode. + gpu_collect (bool): Option to use either gpu or cpu to collect results. + + Returns: + list: The prediction results. + """ + model.eval() + results = [] + dataset = data_loader.dataset + rank, world_size = get_dist_info() + if rank == 0: + # Check if tmpdir is valid for cpu_collect + if (not gpu_collect) and (tmpdir is not None and osp.exists(tmpdir)): + raise OSError((f'The tmpdir {tmpdir} already exists.', + ' Since tmpdir will be deleted after testing,', + ' please make sure you specify an empty one.')) + prog_bar = mmcv.ProgressBar(len(dataset)) + time.sleep(2) # This line can prevent deadlock problem in some cases. + for i, data in enumerate(data_loader): + with torch.no_grad(): + result = model(return_loss=False, **data) + if isinstance(result, list): + results.extend(result) + else: + results.append(result) + + if rank == 0: + batch_size = data['img'].size(0) + for _ in range(batch_size * world_size): + prog_bar.update() + + # collect results from all ranks + if gpu_collect: + results = collect_results_gpu(results, len(dataset)) + else: + results = collect_results_cpu(results, len(dataset), tmpdir) + return results + + +def collect_results_cpu(result_part, size, tmpdir=None): + rank, world_size = get_dist_info() + # create a tmp dir if it is not specified + if tmpdir is None: + MAX_LEN = 512 + # 32 is whitespace + dir_tensor = torch.full((MAX_LEN, ), + 32, + dtype=torch.uint8, + device='cuda') + if rank == 0: + mmcv.mkdir_or_exist('.dist_test') + tmpdir = tempfile.mkdtemp(dir='.dist_test') + tmpdir = torch.tensor( + bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda') + dir_tensor[:len(tmpdir)] = tmpdir + dist.broadcast(dir_tensor, 0) + tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip() + else: + mmcv.mkdir_or_exist(tmpdir) + # dump the part result to the dir + mmcv.dump(result_part, osp.join(tmpdir, f'part_{rank}.pkl')) + dist.barrier() + # collect all parts + if rank != 0: + return None + else: + # load results of all parts from tmp dir + part_list = [] + for i in range(world_size): + part_file = osp.join(tmpdir, f'part_{i}.pkl') + part_result = mmcv.load(part_file) + part_list.append(part_result) + # sort the results + ordered_results = [] + for res in zip(*part_list): + ordered_results.extend(list(res)) + # the dataloader may pad some samples + ordered_results = ordered_results[:size] + # remove tmp dir + shutil.rmtree(tmpdir) + return ordered_results + + +def collect_results_gpu(result_part, size): + rank, world_size = get_dist_info() + # dump result part to tensor with pickle + part_tensor = torch.tensor( + bytearray(pickle.dumps(result_part)), dtype=torch.uint8, device='cuda') + # gather all result part tensor shape + shape_tensor = torch.tensor(part_tensor.shape, device='cuda') + shape_list = [shape_tensor.clone() for _ in range(world_size)] + dist.all_gather(shape_list, shape_tensor) + # padding result part tensor to max length + shape_max = torch.tensor(shape_list).max() + part_send = torch.zeros(shape_max, dtype=torch.uint8, device='cuda') + part_send[:shape_tensor[0]] = part_tensor + part_recv_list = [ + part_tensor.new_zeros(shape_max) for _ in range(world_size) + ] + # gather all result part + dist.all_gather(part_recv_list, part_send) + + if rank == 0: + part_list = [] + for recv, shape in zip(part_recv_list, shape_list): + part_result = pickle.loads(recv[:shape[0]].cpu().numpy().tobytes()) + part_list.append(part_result) + # sort the results + ordered_results = [] + for res in zip(*part_list): + ordered_results.extend(list(res)) + # the dataloader may pad some samples + ordered_results = ordered_results[:size] + return ordered_results diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/apis/train.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/apis/train.py new file mode 100644 index 0000000000..e5bea85f77 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/apis/train.py @@ -0,0 +1,177 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import random +import warnings + +import numpy as np +import torch +from mmcv.parallel import MMDataParallel, MMDistributedDataParallel +from mmcv.runner import DistSamplerSeedHook, build_optimizer, build_runner + +from mmcls.core import DistOptimizerHook +from mmcls.datasets import build_dataloader, build_dataset +from mmcls.utils import get_root_logger + +# TODO import eval hooks from mmcv and delete them from mmcls +try: + from mmcv.runner.hooks import EvalHook, DistEvalHook +except ImportError: + warnings.warn('DeprecationWarning: EvalHook and DistEvalHook from mmcls ' + 'will be deprecated.' + 'Please install mmcv through master branch.') + from mmcls.core import EvalHook, DistEvalHook + +# TODO import optimizer hook from mmcv and delete them from mmcls +try: + from mmcv.runner import Fp16OptimizerHook +except ImportError: + warnings.warn('DeprecationWarning: FP16OptimizerHook from mmcls will be ' + 'deprecated. Please install mmcv>=1.1.4.') + from mmcls.core import Fp16OptimizerHook + + +def set_random_seed(seed, deterministic=False): + """Set random seed. + + Args: + seed (int): Seed to be used. + deterministic (bool): Whether to set the deterministic option for + CUDNN backend, i.e., set `torch.backends.cudnn.deterministic` + to True and `torch.backends.cudnn.benchmark` to False. + Default: False. + """ + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + if deterministic: + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + + +def train_model(model, + dataset, + cfg, + distributed=False, + validate=False, + timestamp=None, + device='cuda', + meta=None): + logger = get_root_logger() + + # prepare data loaders + dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset] + + data_loaders = [ + build_dataloader( + ds, + cfg.data.samples_per_gpu, + cfg.data.workers_per_gpu, + # cfg.gpus will be ignored if distributed + num_gpus=len(cfg.gpu_ids), + dist=distributed, + round_up=True, + seed=cfg.seed) for ds in dataset + ] + + # put model on gpus + if distributed: + find_unused_parameters = cfg.get('find_unused_parameters', False) + # Sets the `find_unused_parameters` parameter in + # torch.nn.parallel.DistributedDataParallel + if device == 'cuda': + model = MMDistributedDataParallel( + model.cuda(), + device_ids=[torch.cuda.current_device()], + broadcast_buffers=False, + find_unused_parameters=find_unused_parameters) + elif device == 'npu': + from mmcv.device.npu import NPUDistributedDataParallel + model = NPUDistributedDataParallel( + model.npu(), + device_ids=[torch.npu.current_device()], + broadcast_buffers=False, + find_unused_parameters=find_unused_parameters) + else: + if device == 'cuda': + model = MMDataParallel( + model.cuda(cfg.gpu_ids[0]), device_ids=cfg.gpu_ids) + elif device == 'npu': + from mmcv.device.npu import NPUDataParallel + model = NPUDataParallel( + model.npu(), + device_ids=cfg.gpu_ids) + elif device == 'cpu': + model = model.cpu() + else: + raise ValueError(F'unsupported device name {device}.') + + # build runner + optimizer = build_optimizer(model, cfg.optimizer) + + if cfg.get('runner') is None: + cfg.runner = { + 'type': 'EpochBasedRunner', + 'max_epochs': cfg.total_epochs + } + warnings.warn( + 'config is now expected to have a `runner` section, ' + 'please set `runner` in your config.', UserWarning) + + runner = build_runner( + cfg.runner, + default_args=dict( + model=model, + batch_processor=None, + optimizer=optimizer, + work_dir=cfg.work_dir, + logger=logger, + meta=meta)) + + # an ugly walkaround to make the .log and .log.json filenames the same + runner.timestamp = timestamp + + # fp16 setting + fp16_cfg = cfg.get('fp16', None) + if fp16_cfg is not None: + optimizer_config = Fp16OptimizerHook( + **cfg.optimizer_config, **fp16_cfg, distributed=distributed) + elif distributed and 'type' not in cfg.optimizer_config: + optimizer_config = DistOptimizerHook(**cfg.optimizer_config) + else: + optimizer_config = cfg.optimizer_config + + # register hooks + runner.register_training_hooks( + cfg.lr_config, + optimizer_config, + cfg.checkpoint_config, + cfg.log_config, + cfg.get('momentum_config', None), + custom_hooks_config=cfg.get('custom_hooks', None)) + if distributed and cfg.runner['type'] == 'EpochBasedRunner': + runner.register_hook(DistSamplerSeedHook()) + + # register eval hooks + if validate: + val_dataset = build_dataset(cfg.data.val, dict(test_mode=True)) + val_dataloader = build_dataloader( + val_dataset, + samples_per_gpu=cfg.data.samples_per_gpu, + workers_per_gpu=cfg.data.workers_per_gpu, + dist=distributed, + shuffle=False, + round_up=True) + eval_cfg = cfg.get('evaluation', {}) + eval_cfg['by_epoch'] = cfg.runner['type'] != 'IterBasedRunner' + eval_hook = DistEvalHook if distributed else EvalHook + # `EvalHook` needs to be executed after `IterTimerHook`. + # Otherwise, it will cause a bug if use `IterBasedRunner`. + # Refers to https://github.com/open-mmlab/mmcv/issues/1261 + runner.register_hook( + eval_hook(val_dataloader, **eval_cfg), priority='LOW') + + if cfg.resume_from: + runner.resume(cfg.resume_from) + elif cfg.load_from: + runner.load_checkpoint(cfg.load_from) + runner.run(data_loaders, cfg.workflow) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/__init__.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/__init__.py new file mode 100644 index 0000000000..3a1df4d03f --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .evaluation import * # noqa: F401, F403 +from .fp16 import * # noqa: F401, F403 +from .utils import * # noqa: F401, F403 diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/evaluation/__init__.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/evaluation/__init__.py new file mode 100644 index 0000000000..48a941923a --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/evaluation/__init__.py @@ -0,0 +1,12 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .eval_hooks import DistEvalHook, EvalHook +from .eval_metrics import (calculate_confusion_matrix, f1_score, precision, + precision_recall_f1, recall, support) +from .mean_ap import average_precision, mAP +from .multilabel_eval_metrics import average_performance + +__all__ = [ + 'DistEvalHook', 'EvalHook', 'precision', 'recall', 'f1_score', 'support', + 'average_precision', 'mAP', 'average_performance', + 'calculate_confusion_matrix', 'precision_recall_f1' +] diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/evaluation/eval_hooks.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/evaluation/eval_hooks.py new file mode 100644 index 0000000000..25eeba9fc8 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/evaluation/eval_hooks.py @@ -0,0 +1,107 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +import warnings + +from mmcv.runner import Hook +from torch.utils.data import DataLoader + + +class EvalHook(Hook): + """Evaluation hook. + + Args: + dataloader (DataLoader): A PyTorch dataloader. + interval (int): Evaluation interval (by epochs). Default: 1. + """ + + def __init__(self, dataloader, interval=1, by_epoch=True, **eval_kwargs): + warnings.warn( + 'DeprecationWarning: EvalHook and DistEvalHook in mmcls will be ' + 'deprecated, please install mmcv through master branch.') + if not isinstance(dataloader, DataLoader): + raise TypeError('dataloader must be a pytorch DataLoader, but got' + f' {type(dataloader)}') + self.dataloader = dataloader + self.interval = interval + self.eval_kwargs = eval_kwargs + self.by_epoch = by_epoch + + def after_train_epoch(self, runner): + if not self.by_epoch or not self.every_n_epochs(runner, self.interval): + return + from mmcls.apis import single_gpu_test + results = single_gpu_test(runner.model, self.dataloader, show=False) + self.evaluate(runner, results) + + def after_train_iter(self, runner): + if self.by_epoch or not self.every_n_iters(runner, self.interval): + return + from mmcls.apis import single_gpu_test + runner.log_buffer.clear() + results = single_gpu_test(runner.model, self.dataloader, show=False) + self.evaluate(runner, results) + + def evaluate(self, runner, results): + eval_res = self.dataloader.dataset.evaluate( + results, logger=runner.logger, **self.eval_kwargs) + for name, val in eval_res.items(): + runner.log_buffer.output[name] = val + runner.log_buffer.ready = True + + +class DistEvalHook(EvalHook): + """Distributed evaluation hook. + + Args: + dataloader (DataLoader): A PyTorch dataloader. + interval (int): Evaluation interval (by epochs). Default: 1. + tmpdir (str, optional): Temporary directory to save the results of all + processes. Default: None. + gpu_collect (bool): Whether to use gpu or cpu to collect results. + Default: False. + """ + + def __init__(self, + dataloader, + interval=1, + gpu_collect=False, + by_epoch=True, + **eval_kwargs): + warnings.warn( + 'DeprecationWarning: EvalHook and DistEvalHook in mmcls will be ' + 'deprecated, please install mmcv through master branch.') + if not isinstance(dataloader, DataLoader): + raise TypeError('dataloader must be a pytorch DataLoader, but got ' + f'{type(dataloader)}') + self.dataloader = dataloader + self.interval = interval + self.gpu_collect = gpu_collect + self.by_epoch = by_epoch + self.eval_kwargs = eval_kwargs + + def after_train_epoch(self, runner): + if not self.by_epoch or not self.every_n_epochs(runner, self.interval): + return + from mmcls.apis import multi_gpu_test + results = multi_gpu_test( + runner.model, + self.dataloader, + tmpdir=osp.join(runner.work_dir, '.eval_hook'), + gpu_collect=self.gpu_collect) + if runner.rank == 0: + print('\n') + self.evaluate(runner, results) + + def after_train_iter(self, runner): + if self.by_epoch or not self.every_n_iters(runner, self.interval): + return + from mmcls.apis import multi_gpu_test + runner.log_buffer.clear() + results = multi_gpu_test( + runner.model, + self.dataloader, + tmpdir=osp.join(runner.work_dir, '.eval_hook'), + gpu_collect=self.gpu_collect) + if runner.rank == 0: + print('\n') + self.evaluate(runner, results) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/evaluation/eval_metrics.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/evaluation/eval_metrics.py new file mode 100644 index 0000000000..bc90fa8534 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/evaluation/eval_metrics.py @@ -0,0 +1,248 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from numbers import Number + +import numpy as np +import torch + + +def calculate_confusion_matrix(pred, target): + """Calculate confusion matrix according to the prediction and target. + + Args: + pred (torch.Tensor | np.array): The model prediction with shape (N, C). + target (torch.Tensor | np.array): The target of each prediction with + shape (N, 1) or (N,). + + Returns: + torch.Tensor: Confusion matrix + The shape is (C, C), where C is the number of classes. + """ + + if isinstance(pred, np.ndarray): + pred = torch.from_numpy(pred) + if isinstance(target, np.ndarray): + target = torch.from_numpy(target) + assert ( + isinstance(pred, torch.Tensor) and isinstance(target, torch.Tensor)), \ + (f'pred and target should be torch.Tensor or np.ndarray, ' + f'but got {type(pred)} and {type(target)}.') + + num_classes = pred.size(1) + _, pred_label = pred.topk(1, dim=1) + pred_label = pred_label.view(-1) + target_label = target.view(-1) + assert len(pred_label) == len(target_label) + confusion_matrix = torch.zeros(num_classes, num_classes) + with torch.no_grad(): + for t, p in zip(target_label, pred_label): + confusion_matrix[t.long(), p.long()] += 1 + return confusion_matrix + + +def precision_recall_f1(pred, target, average_mode='macro', thrs=0.): + """Calculate precision, recall and f1 score according to the prediction and + target. + + Args: + pred (torch.Tensor | np.array): The model prediction with shape (N, C). + target (torch.Tensor | np.array): The target of each prediction with + shape (N, 1) or (N,). + average_mode (str): The type of averaging performed on the result. + Options are 'macro' and 'none'. If 'none', the scores for each + class are returned. If 'macro', calculate metrics for each class, + and find their unweighted mean. + Defaults to 'macro'. + thrs (Number | tuple[Number], optional): Predictions with scores under + the thresholds are considered negative. Default to 0. + + Returns: + tuple: tuple containing precision, recall, f1 score. + + The type of precision, recall, f1 score is one of the following: + + +----------------------------+--------------------+-------------------+ + | Args | ``thrs`` is number | ``thrs`` is tuple | + +============================+====================+===================+ + | ``average_mode`` = "macro" | float | list[float] | + +----------------------------+--------------------+-------------------+ + | ``average_mode`` = "none" | np.array | list[np.array] | + +----------------------------+--------------------+-------------------+ + """ + + allowed_average_mode = ['macro', 'none'] + if average_mode not in allowed_average_mode: + raise ValueError(f'Unsupport type of averaging {average_mode}.') + + if isinstance(pred, torch.Tensor): + pred = pred.numpy() + if isinstance(target, torch.Tensor): + target = target.numpy() + assert (isinstance(pred, np.ndarray) and isinstance(target, np.ndarray)),\ + (f'pred and target should be torch.Tensor or np.ndarray, ' + f'but got {type(pred)} and {type(target)}.') + + if isinstance(thrs, Number): + thrs = (thrs, ) + return_single = True + elif isinstance(thrs, tuple): + return_single = False + else: + raise TypeError( + f'thrs should be a number or tuple, but got {type(thrs)}.') + + label = np.indices(pred.shape)[1] + pred_label = np.argsort(pred, axis=1)[:, -1] + pred_score = np.sort(pred, axis=1)[:, -1] + + precisions = [] + recalls = [] + f1_scores = [] + for thr in thrs: + # Only prediction values larger than thr are counted as positive + _pred_label = pred_label.copy() + if thr is not None: + _pred_label[pred_score <= thr] = -1 + pred_positive = label == _pred_label.reshape(-1, 1) + gt_positive = label == target.reshape(-1, 1) + precision = (pred_positive & gt_positive).sum(0) / np.maximum( + pred_positive.sum(0), 1) * 100 + recall = (pred_positive & gt_positive).sum(0) / np.maximum( + gt_positive.sum(0), 1) * 100 + f1_score = 2 * precision * recall / np.maximum(precision + recall, + 1e-20) + if average_mode == 'macro': + precision = float(precision.mean()) + recall = float(recall.mean()) + f1_score = float(f1_score.mean()) + precisions.append(precision) + recalls.append(recall) + f1_scores.append(f1_score) + + if return_single: + return precisions[0], recalls[0], f1_scores[0] + else: + return precisions, recalls, f1_scores + + +def precision(pred, target, average_mode='macro', thrs=0.): + """Calculate precision according to the prediction and target. + + Args: + pred (torch.Tensor | np.array): The model prediction with shape (N, C). + target (torch.Tensor | np.array): The target of each prediction with + shape (N, 1) or (N,). + average_mode (str): The type of averaging performed on the result. + Options are 'macro' and 'none'. If 'none', the scores for each + class are returned. If 'macro', calculate metrics for each class, + and find their unweighted mean. + Defaults to 'macro'. + thrs (Number | tuple[Number], optional): Predictions with scores under + the thresholds are considered negative. Default to 0. + + Returns: + float | np.array | list[float | np.array]: Precision. + + +----------------------------+--------------------+-------------------+ + | Args | ``thrs`` is number | ``thrs`` is tuple | + +============================+====================+===================+ + | ``average_mode`` = "macro" | float | list[float] | + +----------------------------+--------------------+-------------------+ + | ``average_mode`` = "none" | np.array | list[np.array] | + +----------------------------+--------------------+-------------------+ + """ + precisions, _, _ = precision_recall_f1(pred, target, average_mode, thrs) + return precisions + + +def recall(pred, target, average_mode='macro', thrs=0.): + """Calculate recall according to the prediction and target. + + Args: + pred (torch.Tensor | np.array): The model prediction with shape (N, C). + target (torch.Tensor | np.array): The target of each prediction with + shape (N, 1) or (N,). + average_mode (str): The type of averaging performed on the result. + Options are 'macro' and 'none'. If 'none', the scores for each + class are returned. If 'macro', calculate metrics for each class, + and find their unweighted mean. + Defaults to 'macro'. + thrs (Number | tuple[Number], optional): Predictions with scores under + the thresholds are considered negative. Default to 0. + + Returns: + float | np.array | list[float | np.array]: Recall. + + +----------------------------+--------------------+-------------------+ + | Args | ``thrs`` is number | ``thrs`` is tuple | + +============================+====================+===================+ + | ``average_mode`` = "macro" | float | list[float] | + +----------------------------+--------------------+-------------------+ + | ``average_mode`` = "none" | np.array | list[np.array] | + +----------------------------+--------------------+-------------------+ + """ + _, recalls, _ = precision_recall_f1(pred, target, average_mode, thrs) + return recalls + + +def f1_score(pred, target, average_mode='macro', thrs=0.): + """Calculate F1 score according to the prediction and target. + + Args: + pred (torch.Tensor | np.array): The model prediction with shape (N, C). + target (torch.Tensor | np.array): The target of each prediction with + shape (N, 1) or (N,). + average_mode (str): The type of averaging performed on the result. + Options are 'macro' and 'none'. If 'none', the scores for each + class are returned. If 'macro', calculate metrics for each class, + and find their unweighted mean. + Defaults to 'macro'. + thrs (Number | tuple[Number], optional): Predictions with scores under + the thresholds are considered negative. Default to 0. + + Returns: + float | np.array | list[float | np.array]: F1 score. + + +----------------------------+--------------------+-------------------+ + | Args | ``thrs`` is number | ``thrs`` is tuple | + +============================+====================+===================+ + | ``average_mode`` = "macro" | float | list[float] | + +----------------------------+--------------------+-------------------+ + | ``average_mode`` = "none" | np.array | list[np.array] | + +----------------------------+--------------------+-------------------+ + """ + _, _, f1_scores = precision_recall_f1(pred, target, average_mode, thrs) + return f1_scores + + +def support(pred, target, average_mode='macro'): + """Calculate the total number of occurrences of each label according to the + prediction and target. + + Args: + pred (torch.Tensor | np.array): The model prediction with shape (N, C). + target (torch.Tensor | np.array): The target of each prediction with + shape (N, 1) or (N,). + average_mode (str): The type of averaging performed on the result. + Options are 'macro' and 'none'. If 'none', the scores for each + class are returned. If 'macro', calculate metrics for each class, + and find their unweighted sum. + Defaults to 'macro'. + + Returns: + float | np.array: Support. + + - If the ``average_mode`` is set to macro, the function returns + a single float. + - If the ``average_mode`` is set to none, the function returns + a np.array with shape C. + """ + confusion_matrix = calculate_confusion_matrix(pred, target) + with torch.no_grad(): + res = confusion_matrix.sum(1) + if average_mode == 'macro': + res = float(res.sum().numpy()) + elif average_mode == 'none': + res = res.numpy() + else: + raise ValueError(f'Unsupport type of averaging {average_mode}.') + return res diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/evaluation/mean_ap.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/evaluation/mean_ap.py new file mode 100644 index 0000000000..2771a2acd7 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/evaluation/mean_ap.py @@ -0,0 +1,74 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import torch + + +def average_precision(pred, target): + r"""Calculate the average precision for a single class. + + AP summarizes a precision-recall curve as the weighted mean of maximum + precisions obtained for any r'>r, where r is the recall: + + .. math:: + \text{AP} = \sum_n (R_n - R_{n-1}) P_n + + Note that no approximation is involved since the curve is piecewise + constant. + + Args: + pred (np.ndarray): The model prediction with shape (N, ). + target (np.ndarray): The target of each prediction with shape (N, ). + + Returns: + float: a single float as average precision value. + """ + eps = np.finfo(np.float32).eps + + # sort examples + sort_inds = np.argsort(-pred) + sort_target = target[sort_inds] + + # count true positive examples + pos_inds = sort_target == 1 + tp = np.cumsum(pos_inds) + total_pos = tp[-1] + + # count not difficult examples + pn_inds = sort_target != -1 + pn = np.cumsum(pn_inds) + + tp[np.logical_not(pos_inds)] = 0 + precision = tp / np.maximum(pn, eps) + ap = np.sum(precision) / np.maximum(total_pos, eps) + return ap + + +def mAP(pred, target): + """Calculate the mean average precision with respect of classes. + + Args: + pred (torch.Tensor | np.ndarray): The model prediction with shape + (N, C), where C is the number of classes. + target (torch.Tensor | np.ndarray): The target of each prediction with + shape (N, C), where C is the number of classes. 1 stands for + positive examples, 0 stands for negative examples and -1 stands for + difficult examples. + + Returns: + float: A single float as mAP value. + """ + if isinstance(pred, torch.Tensor) and isinstance(target, torch.Tensor): + pred = pred.detach().cpu().numpy() + target = target.detach().cpu().numpy() + elif not (isinstance(pred, np.ndarray) and isinstance(target, np.ndarray)): + raise TypeError('pred and target should both be torch.Tensor or' + 'np.ndarray') + + assert pred.shape == \ + target.shape, 'pred and target should be in the same shape.' + num_classes = pred.shape[1] + ap = np.zeros(num_classes) + for k in range(num_classes): + ap[k] = average_precision(pred[:, k], target[:, k]) + mean_ap = ap.mean() * 100.0 + return mean_ap diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/evaluation/multilabel_eval_metrics.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/evaluation/multilabel_eval_metrics.py new file mode 100644 index 0000000000..1d34e2b081 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/evaluation/multilabel_eval_metrics.py @@ -0,0 +1,72 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +import numpy as np +import torch + + +def average_performance(pred, target, thr=None, k=None): + """Calculate CP, CR, CF1, OP, OR, OF1, where C stands for per-class + average, O stands for overall average, P stands for precision, R stands for + recall and F1 stands for F1-score. + + Args: + pred (torch.Tensor | np.ndarray): The model prediction with shape + (N, C), where C is the number of classes. + target (torch.Tensor | np.ndarray): The target of each prediction with + shape (N, C), where C is the number of classes. 1 stands for + positive examples, 0 stands for negative examples and -1 stands for + difficult examples. + thr (float): The confidence threshold. Defaults to None. + k (int): Top-k performance. Note that if thr and k are both given, k + will be ignored. Defaults to None. + + Returns: + tuple: (CP, CR, CF1, OP, OR, OF1) + """ + if isinstance(pred, torch.Tensor) and isinstance(target, torch.Tensor): + pred = pred.detach().cpu().numpy() + target = target.detach().cpu().numpy() + elif not (isinstance(pred, np.ndarray) and isinstance(target, np.ndarray)): + raise TypeError('pred and target should both be torch.Tensor or' + 'np.ndarray') + if thr is None and k is None: + thr = 0.5 + warnings.warn('Neither thr nor k is given, set thr as 0.5 by ' + 'default.') + elif thr is not None and k is not None: + warnings.warn('Both thr and k are given, use threshold in favor of ' + 'top-k.') + + assert pred.shape == \ + target.shape, 'pred and target should be in the same shape.' + + eps = np.finfo(np.float32).eps + target[target == -1] = 0 + if thr is not None: + # a label is predicted positive if the confidence is no lower than thr + pos_inds = pred >= thr + + else: + # top-k labels will be predicted positive for any example + sort_inds = np.argsort(-pred, axis=1) + sort_inds_ = sort_inds[:, :k] + inds = np.indices(sort_inds_.shape) + pos_inds = np.zeros_like(pred) + pos_inds[inds[0], sort_inds_] = 1 + + tp = (pos_inds * target) == 1 + fp = (pos_inds * (1 - target)) == 1 + fn = ((1 - pos_inds) * target) == 1 + + precision_class = tp.sum(axis=0) / np.maximum( + tp.sum(axis=0) + fp.sum(axis=0), eps) + recall_class = tp.sum(axis=0) / np.maximum( + tp.sum(axis=0) + fn.sum(axis=0), eps) + CP = precision_class.mean() * 100.0 + CR = recall_class.mean() * 100.0 + CF1 = 2 * CP * CR / np.maximum(CP + CR, eps) + OP = tp.sum() / np.maximum(tp.sum() + fp.sum(), eps) * 100.0 + OR = tp.sum() / np.maximum(tp.sum() + fn.sum(), eps) * 100.0 + OF1 = 2 * OP * OR / np.maximum(OP + OR, eps) + return CP, CR, CF1, OP, OR, OF1 diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/export/__init__.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/export/__init__.py new file mode 100644 index 0000000000..1c6ec1b9bc --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/export/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .test import ONNXRuntimeClassifier, TensorRTClassifier + +__all__ = ['ONNXRuntimeClassifier', 'TensorRTClassifier'] diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/export/test.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/export/test.py new file mode 100644 index 0000000000..f7caed6e02 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/export/test.py @@ -0,0 +1,96 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +import numpy as np +import onnxruntime as ort +import torch + +from mmcls.models.classifiers import BaseClassifier + + +class ONNXRuntimeClassifier(BaseClassifier): + """Wrapper for classifier's inference with ONNXRuntime.""" + + def __init__(self, onnx_file, class_names, device_id): + super(ONNXRuntimeClassifier, self).__init__() + sess = ort.InferenceSession(onnx_file) + + providers = ['CPUExecutionProvider'] + options = [{}] + is_cuda_available = ort.get_device() == 'GPU' + if is_cuda_available: + providers.insert(0, 'CUDAExecutionProvider') + options.insert(0, {'device_id': device_id}) + sess.set_providers(providers, options) + + self.sess = sess + self.CLASSES = class_names + self.device_id = device_id + self.io_binding = sess.io_binding() + self.output_names = [_.name for _ in sess.get_outputs()] + self.is_cuda_available = is_cuda_available + + def simple_test(self, img, img_metas, **kwargs): + raise NotImplementedError('This method is not implemented.') + + def extract_feat(self, imgs): + raise NotImplementedError('This method is not implemented.') + + def forward_train(self, imgs, **kwargs): + raise NotImplementedError('This method is not implemented.') + + def forward_test(self, imgs, img_metas, **kwargs): + input_data = imgs + # set io binding for inputs/outputs + device_type = 'cuda' if self.is_cuda_available else 'cpu' + if not self.is_cuda_available: + input_data = input_data.cpu() + self.io_binding.bind_input( + name='input', + device_type=device_type, + device_id=self.device_id, + element_type=np.float32, + shape=input_data.shape, + buffer_ptr=input_data.data_ptr()) + + for name in self.output_names: + self.io_binding.bind_output(name) + # run session to get outputs + self.sess.run_with_iobinding(self.io_binding) + results = self.io_binding.copy_outputs_to_cpu()[0] + return list(results) + + +class TensorRTClassifier(BaseClassifier): + + def __init__(self, trt_file, class_names, device_id): + super(TensorRTClassifier, self).__init__() + from mmcv.tensorrt import TRTWraper, load_tensorrt_plugin + try: + load_tensorrt_plugin() + except (ImportError, ModuleNotFoundError): + warnings.warn('If input model has custom op from mmcv, \ + you may have to build mmcv with TensorRT from source.') + model = TRTWraper( + trt_file, input_names=['input'], output_names=['probs']) + + self.model = model + self.device_id = device_id + self.CLASSES = class_names + + def simple_test(self, img, img_metas, **kwargs): + raise NotImplementedError('This method is not implemented.') + + def extract_feat(self, imgs): + raise NotImplementedError('This method is not implemented.') + + def forward_train(self, imgs, **kwargs): + raise NotImplementedError('This method is not implemented.') + + def forward_test(self, imgs, img_metas, **kwargs): + input_data = imgs + with torch.cuda.device(self.device_id), torch.no_grad(): + results = self.model({'input': input_data})['probs'] + results = results.detach().cpu().numpy() + + return list(results) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/fp16/__init__.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/fp16/__init__.py new file mode 100644 index 0000000000..20069a93ef --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/fp16/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .decorators import auto_fp16, force_fp32 +from .hooks import Fp16OptimizerHook, wrap_fp16_model + +__all__ = ['auto_fp16', 'force_fp32', 'Fp16OptimizerHook', 'wrap_fp16_model'] diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/fp16/decorators.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/fp16/decorators.py new file mode 100644 index 0000000000..b9b11b3c16 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/fp16/decorators.py @@ -0,0 +1,161 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import functools +from inspect import getfullargspec + +import torch + +from .utils import cast_tensor_type + + +def auto_fp16(apply_to=None, out_fp32=False): + """Decorator to enable fp16 training automatically. + + This decorator is useful when you write custom modules and want to support + mixed precision training. If inputs arguments are fp32 tensors, they will + be converted to fp16 automatically. Arguments other than fp32 tensors are + ignored. + + Args: + apply_to (Iterable, optional): The argument names to be converted. + `None` indicates all arguments. + out_fp32 (bool): Whether to convert the output back to fp32. + + :Example: + + class MyModule1(nn.Module) + + # Convert x and y to fp16 + @auto_fp16() + def forward(self, x, y): + pass + + class MyModule2(nn.Module): + + # convert pred to fp16 + @auto_fp16(apply_to=('pred', )) + def do_something(self, pred, others): + pass + """ + + def auto_fp16_wrapper(old_func): + + @functools.wraps(old_func) + def new_func(*args, **kwargs): + # check if the module has set the attribute `fp16_enabled`, if not, + # just fallback to the original method. + if not isinstance(args[0], torch.nn.Module): + raise TypeError('@auto_fp16 can only be used to decorate the ' + 'method of nn.Module') + if not (hasattr(args[0], 'fp16_enabled') and args[0].fp16_enabled): + return old_func(*args, **kwargs) + # get the arg spec of the decorated method + args_info = getfullargspec(old_func) + # get the argument names to be casted + args_to_cast = args_info.args if apply_to is None else apply_to + # convert the args that need to be processed + new_args = [] + # NOTE: default args are not taken into consideration + if args: + arg_names = args_info.args[:len(args)] + for i, arg_name in enumerate(arg_names): + if arg_name in args_to_cast: + new_args.append( + cast_tensor_type(args[i], torch.float, torch.half)) + else: + new_args.append(args[i]) + # convert the kwargs that need to be processed + new_kwargs = {} + if kwargs: + for arg_name, arg_value in kwargs.items(): + if arg_name in args_to_cast: + new_kwargs[arg_name] = cast_tensor_type( + arg_value, torch.float, torch.half) + else: + new_kwargs[arg_name] = arg_value + # apply converted arguments to the decorated method + output = old_func(*new_args, **new_kwargs) + # cast the results back to fp32 if necessary + if out_fp32: + output = cast_tensor_type(output, torch.half, torch.float) + return output + + return new_func + + return auto_fp16_wrapper + + +def force_fp32(apply_to=None, out_fp16=False): + """Decorator to convert input arguments to fp32 in force. + + This decorator is useful when you write custom modules and want to support + mixed precision training. If there are some inputs that must be processed + in fp32 mode, then this decorator can handle it. If inputs arguments are + fp16 tensors, they will be converted to fp32 automatically. Arguments other + than fp16 tensors are ignored. + + Args: + apply_to (Iterable, optional): The argument names to be converted. + `None` indicates all arguments. + out_fp16 (bool): Whether to convert the output back to fp16. + + :Example: + + class MyModule1(nn.Module) + + # Convert x and y to fp32 + @force_fp32() + def loss(self, x, y): + pass + + class MyModule2(nn.Module): + + # convert pred to fp32 + @force_fp32(apply_to=('pred', )) + def post_process(self, pred, others): + pass + """ + + def force_fp32_wrapper(old_func): + + @functools.wraps(old_func) + def new_func(*args, **kwargs): + # check if the module has set the attribute `fp16_enabled`, if not, + # just fallback to the original method. + if not isinstance(args[0], torch.nn.Module): + raise TypeError('@force_fp32 can only be used to decorate the ' + 'method of nn.Module') + if not (hasattr(args[0], 'fp16_enabled') and args[0].fp16_enabled): + return old_func(*args, **kwargs) + # get the arg spec of the decorated method + args_info = getfullargspec(old_func) + # get the argument names to be casted + args_to_cast = args_info.args if apply_to is None else apply_to + # convert the args that need to be processed + new_args = [] + if args: + arg_names = args_info.args[:len(args)] + for i, arg_name in enumerate(arg_names): + if arg_name in args_to_cast: + new_args.append( + cast_tensor_type(args[i], torch.half, torch.float)) + else: + new_args.append(args[i]) + # convert the kwargs that need to be processed + new_kwargs = dict() + if kwargs: + for arg_name, arg_value in kwargs.items(): + if arg_name in args_to_cast: + new_kwargs[arg_name] = cast_tensor_type( + arg_value, torch.half, torch.float) + else: + new_kwargs[arg_name] = arg_value + # apply converted arguments to the decorated method + output = old_func(*new_args, **new_kwargs) + # cast the results back to fp32 if necessary + if out_fp16: + output = cast_tensor_type(output, torch.float, torch.half) + return output + + return new_func + + return force_fp32_wrapper diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/fp16/hooks.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/fp16/hooks.py new file mode 100644 index 0000000000..642d22b774 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/fp16/hooks.py @@ -0,0 +1,129 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy + +import torch +import torch.nn as nn +from mmcv.runner import OptimizerHook +from mmcv.utils.parrots_wrapper import _BatchNorm + +from ..utils import allreduce_grads +from .utils import cast_tensor_type + + +class Fp16OptimizerHook(OptimizerHook): + """FP16 optimizer hook. + + The steps of fp16 optimizer is as follows. + 1. Scale the loss value. + 2. BP in the fp16 model. + 2. Copy gradients from fp16 model to fp32 weights. + 3. Update fp32 weights. + 4. Copy updated parameters from fp32 weights to fp16 model. + + Refer to https://arxiv.org/abs/1710.03740 for more details. + + Args: + loss_scale (float): Scale factor multiplied with loss. + """ + + def __init__(self, + grad_clip=None, + coalesce=True, + bucket_size_mb=-1, + loss_scale=512., + distributed=True): + self.grad_clip = grad_clip + self.coalesce = coalesce + self.bucket_size_mb = bucket_size_mb + self.loss_scale = loss_scale + self.distributed = distributed + + def before_run(self, runner): + # keep a copy of fp32 weights + runner.optimizer.param_groups = copy.deepcopy( + runner.optimizer.param_groups) + # convert model to fp16 + wrap_fp16_model(runner.model) + + def copy_grads_to_fp32(self, fp16_net, fp32_weights): + """Copy gradients from fp16 model to fp32 weight copy.""" + for fp32_param, fp16_param in zip(fp32_weights, fp16_net.parameters()): + if fp16_param.grad is not None: + if fp32_param.grad is None: + fp32_param.grad = fp32_param.data.new(fp32_param.size()) + fp32_param.grad.copy_(fp16_param.grad) + + def copy_params_to_fp16(self, fp16_net, fp32_weights): + """Copy updated params from fp32 weight copy to fp16 model.""" + for fp16_param, fp32_param in zip(fp16_net.parameters(), fp32_weights): + fp16_param.data.copy_(fp32_param.data) + + def after_train_iter(self, runner): + # clear grads of last iteration + runner.model.zero_grad() + runner.optimizer.zero_grad() + # scale the loss value + scaled_loss = runner.outputs['loss'] * self.loss_scale + scaled_loss.backward() + # copy fp16 grads in the model to fp32 params in the optimizer + fp32_weights = [] + for param_group in runner.optimizer.param_groups: + fp32_weights += param_group['params'] + self.copy_grads_to_fp32(runner.model, fp32_weights) + # allreduce grads + if self.distributed: + allreduce_grads(fp32_weights, self.coalesce, self.bucket_size_mb) + # scale the gradients back + for param in fp32_weights: + if param.grad is not None: + param.grad.div_(self.loss_scale) + if self.grad_clip is not None: + self.clip_grads(fp32_weights) + # update fp32 params + runner.optimizer.step() + # copy fp32 params to the fp16 model + self.copy_params_to_fp16(runner.model, fp32_weights) + + +def wrap_fp16_model(model): + # convert model to fp16 + model.half() + # patch the normalization layers to make it work in fp32 mode + patch_norm_fp32(model) + # set `fp16_enabled` flag + for m in model.modules(): + if hasattr(m, 'fp16_enabled'): + m.fp16_enabled = True + + +def patch_norm_fp32(module): + if isinstance(module, (_BatchNorm, nn.GroupNorm)): + module.float() + module.forward = patch_forward_method(module.forward, torch.half, + torch.float) + for child in module.children(): + patch_norm_fp32(child) + return module + + +def patch_forward_method(func, src_type, dst_type, convert_output=True): + """Patch the forward method of a module. + + Args: + func (callable): The original forward method. + src_type (torch.dtype): Type of input arguments to be converted from. + dst_type (torch.dtype): Type of input arguments to be converted to. + convert_output (bool): Whether to convert the output back to src_type. + + Returns: + callable: The patched forward method. + """ + + def new_forward(*args, **kwargs): + output = func(*cast_tensor_type(args, src_type, dst_type), + **cast_tensor_type(kwargs, src_type, dst_type)) + if convert_output: + output = cast_tensor_type(output, dst_type, src_type) + return output + + return new_forward diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/fp16/utils.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/fp16/utils.py new file mode 100644 index 0000000000..0d0297f946 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/fp16/utils.py @@ -0,0 +1,24 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from collections import abc + +import numpy as np +import torch + + +def cast_tensor_type(inputs, src_type, dst_type): + if isinstance(inputs, torch.Tensor): + return inputs.to(dst_type) + elif isinstance(inputs, str): + return inputs + elif isinstance(inputs, np.ndarray): + return inputs + elif isinstance(inputs, abc.Mapping): + return type(inputs)({ + k: cast_tensor_type(v, src_type, dst_type) + for k, v in inputs.items() + }) + elif isinstance(inputs, abc.Iterable): + return type(inputs)( + cast_tensor_type(item, src_type, dst_type) for item in inputs) + else: + return inputs diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/utils/__init__.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/utils/__init__.py new file mode 100644 index 0000000000..078b7a902b --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/utils/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .dist_utils import DistOptimizerHook, allreduce_grads +from .misc import multi_apply + +__all__ = ['allreduce_grads', 'DistOptimizerHook', 'multi_apply'] diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/utils/dist_utils.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/utils/dist_utils.py new file mode 100644 index 0000000000..9470ee23e8 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/utils/dist_utils.py @@ -0,0 +1,57 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from collections import OrderedDict + +import torch.distributed as dist +from mmcv.runner import OptimizerHook +from torch._utils import (_flatten_dense_tensors, _take_tensors, + _unflatten_dense_tensors) + + +def _allreduce_coalesced(tensors, world_size, bucket_size_mb=-1): + if bucket_size_mb > 0: + bucket_size_bytes = bucket_size_mb * 1024 * 1024 + buckets = _take_tensors(tensors, bucket_size_bytes) + else: + buckets = OrderedDict() + for tensor in tensors: + tp = tensor.type() + if tp not in buckets: + buckets[tp] = [] + buckets[tp].append(tensor) + buckets = buckets.values() + + for bucket in buckets: + flat_tensors = _flatten_dense_tensors(bucket) + dist.all_reduce(flat_tensors) + flat_tensors.div_(world_size) + for tensor, synced in zip( + bucket, _unflatten_dense_tensors(flat_tensors, bucket)): + tensor.copy_(synced) + + +def allreduce_grads(params, coalesce=True, bucket_size_mb=-1): + grads = [ + param.grad.data for param in params + if param.requires_grad and param.grad is not None + ] + world_size = dist.get_world_size() + if coalesce: + _allreduce_coalesced(grads, world_size, bucket_size_mb) + else: + for tensor in grads: + dist.all_reduce(tensor.div_(world_size)) + + +class DistOptimizerHook(OptimizerHook): + + def __init__(self, grad_clip=None, coalesce=True, bucket_size_mb=-1): + self.grad_clip = grad_clip + self.coalesce = coalesce + self.bucket_size_mb = bucket_size_mb + + def after_train_iter(self, runner): + runner.optimizer.zero_grad() + runner.outputs['loss'].backward() + if self.grad_clip is not None: + self.clip_grads(runner.model.parameters()) + runner.optimizer.step() diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/utils/misc.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/utils/misc.py new file mode 100644 index 0000000000..31f846377d --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/utils/misc.py @@ -0,0 +1,8 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from functools import partial + + +def multi_apply(func, *args, **kwargs): + pfunc = partial(func, **kwargs) if kwargs else func + map_results = map(pfunc, *args) + return tuple(map(list, zip(*map_results))) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/visualization/__init__.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/visualization/__init__.py new file mode 100644 index 0000000000..481b8194b3 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/visualization/__init__.py @@ -0,0 +1,7 @@ +from .image import (BaseFigureContextManager, ImshowInfosContextManager, + color_val_matplotlib, imshow_infos) + +__all__ = [ + 'BaseFigureContextManager', 'ImshowInfosContextManager', 'imshow_infos', + 'color_val_matplotlib' +] diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/visualization/image.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/visualization/image.py new file mode 100644 index 0000000000..78146d048e --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/core/visualization/image.py @@ -0,0 +1,326 @@ +from threading import Timer + +import matplotlib +import matplotlib.pyplot as plt +import mmcv +import numpy as np +from matplotlib.backend_bases import CloseEvent +from matplotlib.blocking_input import BlockingInput + +# A small value +EPS = 1e-2 + + +def color_val_matplotlib(color): + """Convert various input in BGR order to normalized RGB matplotlib color + tuples, + + Args: + color (:obj:`mmcv.Color`/str/tuple/int/ndarray): Color inputs + + Returns: + tuple[float]: A tuple of 3 normalized floats indicating RGB channels. + """ + color = mmcv.color_val(color) + color = [color / 255 for color in color[::-1]] + return tuple(color) + + +class BaseFigureContextManager: + """Context Manager to reuse matplotlib figure. + + It provides a figure for saving and a figure for showing to support + different settings. + + Args: + axis (bool): Whether to show the axis lines. + fig_save_cfg (dict): Keyword parameters of figure for saving. + Defaults to empty dict. + fig_show_cfg (dict): Keyword parameters of figure for showing. + Defaults to empty dict. + """ + + def __init__(self, axis=False, fig_save_cfg={}, fig_show_cfg={}) -> None: + self.is_inline = 'inline' in matplotlib.get_backend() + + # Because save and show need different figure size + # We set two figure and axes to handle save and show + self.fig_save: plt.Figure = None + self.fig_save_cfg = fig_save_cfg + self.ax_save: plt.Axes = None + + self.fig_show: plt.Figure = None + self.fig_show_cfg = fig_show_cfg + self.ax_show: plt.Axes = None + self.blocking_input: BlockingInput = None + + self.axis = axis + + def __enter__(self): + if not self.is_inline: + # If use inline backend, we cannot control which figure to show, + # so disable the interactive fig_show, and put the initialization + # of fig_save to `prepare` function. + self._initialize_fig_save() + self._initialize_fig_show() + return self + + def _initialize_fig_save(self): + fig = plt.figure(**self.fig_save_cfg) + ax = fig.add_subplot() + + # remove white edges by set subplot margin + fig.subplots_adjust(left=0, right=1, bottom=0, top=1) + + self.fig_save, self.ax_save = fig, ax + + def _initialize_fig_show(self): + # fig_save will be resized to image size, only fig_show needs fig_size. + fig = plt.figure(**self.fig_show_cfg) + ax = fig.add_subplot() + + # remove white edges by set subplot margin + fig.subplots_adjust(left=0, right=1, bottom=0, top=1) + + self.fig_show, self.ax_show = fig, ax + self.blocking_input = BlockingInput( + self.fig_show, eventslist=('key_press_event', 'close_event')) + + def __exit__(self, exc_type, exc_value, traceback): + if self.is_inline: + # If use inline backend, whether to close figure depends on if + # users want to show the image. + return + + plt.close(self.fig_save) + plt.close(self.fig_show) + + try: + # In matplotlib>=3.4.0, with TkAgg, plt.close will destroy + # window after idle, need to update manually. + # Refers to https://github.com/matplotlib/matplotlib/blob/v3.4.x/lib/matplotlib/backends/_backend_tk.py#L470 # noqa: E501 + self.fig_show.canvas.manager.window.update() + except AttributeError: + pass + + def prepare(self): + if self.is_inline: + # if use inline backend, just rebuild the fig_save. + self._initialize_fig_save() + self.ax_save.cla() + self.ax_save.axis(self.axis) + return + + # If users force to destroy the window, rebuild fig_show. + if not plt.fignum_exists(self.fig_show.number): + self._initialize_fig_show() + + # Clear all axes + self.ax_save.cla() + self.ax_save.axis(self.axis) + self.ax_show.cla() + self.ax_show.axis(self.axis) + + def wait_continue(self, timeout=0): + if self.is_inline: + # If use inline backend, interactive input and timeout is no use. + return + + # In matplotlib==3.4.x, with TkAgg, official timeout api of + # start_event_loop cannot work properly. Use a Timer to directly stop + # event loop. + if timeout > 0: + timer = Timer(timeout, self.fig_show.canvas.stop_event_loop) + timer.start() + while True: + # Disable matplotlib default hotkey to close figure. + with plt.rc_context({'keymap.quit': []}): + key_press = self.blocking_input(n=1, timeout=0) + + # Timeout or figure is closed or press space or press 'q' + if len(key_press) == 0 or isinstance( + key_press[0], + CloseEvent) or key_press[0].key in ['q', ' ']: + break + if timeout > 0: + timer.cancel() + + +class ImshowInfosContextManager(BaseFigureContextManager): + """Context Manager to reuse matplotlib figure and put infos on images. + + Args: + fig_size (tuple[int]): Size of the figure to show image. + + Examples: + >>> import mmcv + >>> from mmcls.core import visualization as vis + >>> img1 = mmcv.imread("./1.png") + >>> info1 = {'class': 'cat', 'label': 0} + >>> img2 = mmcv.imread("./2.png") + >>> info2 = {'class': 'dog', 'label': 1} + >>> with vis.ImshowInfosContextManager() as manager: + ... # Show img1 + ... manager.put_img_infos(img1, info1) + ... # Show img2 on the same figure and save output image. + ... manager.put_img_infos( + ... img2, info2, out_file='./2_out.png') + """ + + def __init__(self, fig_size=(15, 10)): + super().__init__( + axis=False, + # A proper dpi for image save with default font size. + fig_save_cfg=dict(frameon=False, dpi=36), + fig_show_cfg=dict(frameon=False, figsize=fig_size)) + + def _put_text(self, ax, text, x, y, text_color, font_size): + ax.text( + x, + y, + f'{text}', + bbox={ + 'facecolor': 'black', + 'alpha': 0.7, + 'pad': 0.2, + 'edgecolor': 'none', + 'boxstyle': 'round' + }, + color=text_color, + fontsize=font_size, + family='monospace', + verticalalignment='top', + horizontalalignment='left') + + def put_img_infos(self, + img, + infos, + text_color='white', + font_size=26, + row_width=20, + win_name='', + show=True, + wait_time=0, + out_file=None): + """Show image with extra information. + + Args: + img (str | ndarray): The image to be displayed. + infos (dict): Extra infos to display in the image. + text_color (:obj:`mmcv.Color`/str/tuple/int/ndarray): Extra infos + display color. Defaults to 'white'. + font_size (int): Extra infos display font size. Defaults to 26. + row_width (int): width between each row of results on the image. + win_name (str): The image title. Defaults to '' + show (bool): Whether to show the image. Defaults to True. + wait_time (int): How many seconds to display the image. + Defaults to 0. + out_file (Optional[str]): The filename to write the image. + Defaults to None. + + Returns: + np.ndarray: The image with extra infomations. + """ + self.prepare() + + text_color = color_val_matplotlib(text_color) + img = mmcv.imread(img).astype(np.uint8) + + x, y = 3, row_width // 2 + img = mmcv.bgr2rgb(img) + width, height = img.shape[1], img.shape[0] + img = np.ascontiguousarray(img) + + # add a small EPS to avoid precision lost due to matplotlib's + # truncation (https://github.com/matplotlib/matplotlib/issues/15363) + dpi = self.fig_save.get_dpi() + self.fig_save.set_size_inches((width + EPS) / dpi, + (height + EPS) / dpi) + + for k, v in infos.items(): + if isinstance(v, float): + v = f'{v:.2f}' + label_text = f'{k}: {v}' + self._put_text(self.ax_save, label_text, x, y, text_color, + font_size) + if show and not self.is_inline: + self._put_text(self.ax_show, label_text, x, y, text_color, + font_size) + y += row_width + + self.ax_save.imshow(img) + stream, _ = self.fig_save.canvas.print_to_buffer() + buffer = np.frombuffer(stream, dtype='uint8') + img_rgba = buffer.reshape(height, width, 4) + rgb, _ = np.split(img_rgba, [3], axis=2) + img_save = rgb.astype('uint8') + img_save = mmcv.rgb2bgr(img_save) + + if out_file is not None: + mmcv.imwrite(img_save, out_file) + + if show and not self.is_inline: + # Reserve some space for the tip. + self.ax_show.set_title(win_name) + self.ax_show.set_ylim(height + 20) + self.ax_show.text( + width // 2, + height + 18, + 'Press SPACE to continue.', + ha='center', + fontsize=font_size) + self.ax_show.imshow(img) + + # Refresh canvas, necessary for Qt5 backend. + self.fig_show.canvas.draw() + + self.wait_continue(timeout=wait_time) + elif (not show) and self.is_inline: + # If use inline backend, we use fig_save to show the image + # So we need to close it if users don't want to show. + plt.close(self.fig_save) + + return img_save + + +def imshow_infos(img, + infos, + text_color='white', + font_size=26, + row_width=20, + win_name='', + show=True, + fig_size=(15, 10), + wait_time=0, + out_file=None): + """Show image with extra information. + + Args: + img (str | ndarray): The image to be displayed. + infos (dict): Extra infos to display in the image. + text_color (:obj:`mmcv.Color`/str/tuple/int/ndarray): Extra infos + display color. Defaults to 'white'. + font_size (int): Extra infos display font size. Defaults to 26. + row_width (int): width between each row of results on the image. + win_name (str): The image title. Defaults to '' + show (bool): Whether to show the image. Defaults to True. + fig_size (tuple): Image show figure size. Defaults to (15, 10). + wait_time (int): How many seconds to display the image. Defaults to 0. + out_file (Optional[str]): The filename to write the image. + Defaults to None. + + Returns: + np.ndarray: The image with extra infomations. + """ + with ImshowInfosContextManager(fig_size=fig_size) as manager: + img = manager.put_img_infos( + img, + infos, + text_color=text_color, + font_size=font_size, + row_width=row_width, + win_name=win_name, + show=show, + wait_time=wait_time, + out_file=out_file) + return img diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/__init__.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/__init__.py new file mode 100644 index 0000000000..6e15ed682f --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/__init__.py @@ -0,0 +1,19 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .base_dataset import BaseDataset +from .builder import DATASETS, PIPELINES, build_dataloader, build_dataset +from .cifar import CIFAR10, CIFAR100 +from .dataset_wrappers import (ClassBalancedDataset, ConcatDataset, + RepeatDataset) +from .imagenet import ImageNet +from .imagenet21k import ImageNet21k +from .mnist import MNIST, FashionMNIST +from .multi_label import MultiLabelDataset +from .samplers import DistributedSampler +from .voc import VOC + +__all__ = [ + 'BaseDataset', 'ImageNet', 'CIFAR10', 'CIFAR100', 'MNIST', 'FashionMNIST', + 'VOC', 'MultiLabelDataset', 'build_dataloader', 'build_dataset', + 'DistributedSampler', 'ConcatDataset', 'RepeatDataset', + 'ClassBalancedDataset', 'DATASETS', 'PIPELINES', 'ImageNet21k' +] diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/base_dataset.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/base_dataset.py new file mode 100644 index 0000000000..b271cca113 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/base_dataset.py @@ -0,0 +1,206 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +from abc import ABCMeta, abstractmethod + +import mmcv +import numpy as np +from torch.utils.data import Dataset + +from mmcls.core.evaluation import precision_recall_f1, support +from mmcls.models.losses import accuracy +from .pipelines import Compose + + +class BaseDataset(Dataset, metaclass=ABCMeta): + """Base dataset. + + Args: + data_prefix (str): the prefix of data path + pipeline (list): a list of dict, where each element represents + a operation defined in `mmcls.datasets.pipelines` + ann_file (str | None): the annotation file. When ann_file is str, + the subclass is expected to read from the ann_file. When ann_file + is None, the subclass is expected to read according to data_prefix + test_mode (bool): in train mode or test mode + """ + + CLASSES = None + + def __init__(self, + data_prefix, + pipeline, + classes=None, + ann_file=None, + test_mode=False): + super(BaseDataset, self).__init__() + self.ann_file = ann_file + self.data_prefix = data_prefix + self.test_mode = test_mode + self.pipeline = Compose(pipeline) + self.CLASSES = self.get_classes(classes) + self.data_infos = self.load_annotations() + + @abstractmethod + def load_annotations(self): + pass + + @property + def class_to_idx(self): + """Map mapping class name to class index. + + Returns: + dict: mapping from class name to class index. + """ + + return {_class: i for i, _class in enumerate(self.CLASSES)} + + def get_gt_labels(self): + """Get all ground-truth labels (categories). + + Returns: + list[int]: categories for all images. + """ + + gt_labels = np.array([data['gt_label'] for data in self.data_infos]) + return gt_labels + + def get_cat_ids(self, idx): + """Get category id by index. + + Args: + idx (int): Index of data. + + Returns: + int: Image category of specified index. + """ + + return self.data_infos[idx]['gt_label'].astype(np.int) + + def prepare_data(self, idx): + results = copy.deepcopy(self.data_infos[idx]) + return self.pipeline(results) + + def __len__(self): + return len(self.data_infos) + + def __getitem__(self, idx): + return self.prepare_data(idx) + + @classmethod + def get_classes(cls, classes=None): + """Get class names of current dataset. + + Args: + classes (Sequence[str] | str | None): If classes is None, use + default CLASSES defined by builtin dataset. If classes is a + string, take it as a file name. The file contains the name of + classes where each line contains one class name. If classes is + a tuple or list, override the CLASSES defined by the dataset. + + Returns: + tuple[str] or list[str]: Names of categories of the dataset. + """ + if classes is None: + return cls.CLASSES + + if isinstance(classes, str): + # take it as a file path + class_names = mmcv.list_from_file(classes) + elif isinstance(classes, (tuple, list)): + class_names = classes + else: + raise ValueError(f'Unsupported type {type(classes)} of classes.') + + return class_names + + def evaluate(self, + results, + metric='accuracy', + metric_options=None, + logger=None): + """Evaluate the dataset. + + Args: + results (list): Testing results of the dataset. + metric (str | list[str]): Metrics to be evaluated. + Default value is `accuracy`. + metric_options (dict, optional): Options for calculating metrics. + Allowed keys are 'topk', 'thrs' and 'average_mode'. + Defaults to None. + logger (logging.Logger | str, optional): Logger used for printing + related information during evaluation. Defaults to None. + Returns: + dict: evaluation results + """ + if metric_options is None: + metric_options = {'topk': (1, 5)} + if isinstance(metric, str): + metrics = [metric] + else: + metrics = metric + allowed_metrics = [ + 'accuracy', 'precision', 'recall', 'f1_score', 'support' + ] + eval_results = {} + results = np.vstack(results) + gt_labels = self.get_gt_labels() + num_imgs = len(results) + assert len(gt_labels) == num_imgs, 'dataset testing results should '\ + 'be of the same length as gt_labels.' + + invalid_metrics = set(metrics) - set(allowed_metrics) + if len(invalid_metrics) != 0: + raise ValueError(f'metric {invalid_metrics} is not supported.') + + topk = metric_options.get('topk', (1, 5)) + thrs = metric_options.get('thrs') + average_mode = metric_options.get('average_mode', 'macro') + + if 'accuracy' in metrics: + if thrs is not None: + acc = accuracy(results, gt_labels, topk=topk, thrs=thrs) + else: + acc = accuracy(results, gt_labels, topk=topk) + if isinstance(topk, tuple): + eval_results_ = { + f'accuracy_top-{k}': a + for k, a in zip(topk, acc) + } + else: + eval_results_ = {'accuracy': acc} + if isinstance(thrs, tuple): + for key, values in eval_results_.items(): + eval_results.update({ + f'{key}_thr_{thr:.2f}': value.item() + for thr, value in zip(thrs, values) + }) + else: + eval_results.update( + {k: v.item() + for k, v in eval_results_.items()}) + + if 'support' in metrics: + support_value = support( + results, gt_labels, average_mode=average_mode) + eval_results['support'] = support_value + + precision_recall_f1_keys = ['precision', 'recall', 'f1_score'] + if len(set(metrics) & set(precision_recall_f1_keys)) != 0: + if thrs is not None: + precision_recall_f1_values = precision_recall_f1( + results, gt_labels, average_mode=average_mode, thrs=thrs) + else: + precision_recall_f1_values = precision_recall_f1( + results, gt_labels, average_mode=average_mode) + for key, values in zip(precision_recall_f1_keys, + precision_recall_f1_values): + if key in metrics: + if isinstance(thrs, tuple): + eval_results.update({ + f'{key}_thr_{thr:.2f}': value + for thr, value in zip(thrs, values) + }) + else: + eval_results[key] = values + + return eval_results diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/builder.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/builder.py new file mode 100644 index 0000000000..5662f91d9e --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/builder.py @@ -0,0 +1,122 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import platform +import random +from functools import partial + +import numpy as np +import torch +from mmcv.parallel import collate +from mmcv.runner import get_dist_info +from mmcv.utils import Registry, build_from_cfg, digit_version +from torch.utils.data import DataLoader + +from .samplers import DistributedSampler + +if platform.system() != 'Windows': + # https://github.com/pytorch/pytorch/issues/973 + import resource + rlimit = resource.getrlimit(resource.RLIMIT_NOFILE) + hard_limit = rlimit[1] + soft_limit = min(4096, hard_limit) + resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit)) + +DATASETS = Registry('dataset') +PIPELINES = Registry('pipeline') + + +def build_dataset(cfg, default_args=None): + from .dataset_wrappers import (ConcatDataset, RepeatDataset, + ClassBalancedDataset) + if isinstance(cfg, (list, tuple)): + dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg]) + elif cfg['type'] == 'RepeatDataset': + dataset = RepeatDataset( + build_dataset(cfg['dataset'], default_args), cfg['times']) + elif cfg['type'] == 'ClassBalancedDataset': + dataset = ClassBalancedDataset( + build_dataset(cfg['dataset'], default_args), cfg['oversample_thr']) + else: + dataset = build_from_cfg(cfg, DATASETS, default_args) + + return dataset + + +def build_dataloader(dataset, + samples_per_gpu, + workers_per_gpu, + num_gpus=1, + dist=True, + shuffle=True, + round_up=True, + seed=None, + pin_memory=True, + persistent_workers=True, + **kwargs): + """Build PyTorch DataLoader. + + In distributed training, each GPU/process has a dataloader. + In non-distributed training, there is only one dataloader for all GPUs. + + Args: + dataset (Dataset): A PyTorch dataset. + samples_per_gpu (int): Number of training samples on each GPU, i.e., + batch size of each GPU. + workers_per_gpu (int): How many subprocesses to use for data loading + for each GPU. + num_gpus (int): Number of GPUs. Only used in non-distributed training. + dist (bool): Distributed training/test or not. Default: True. + shuffle (bool): Whether to shuffle the data at every epoch. + Default: True. + round_up (bool): Whether to round up the length of dataset by adding + extra samples to make it evenly divisible. Default: True. + pin_memory (bool): Whether to use pin_memory in DataLoader. + Default: True + persistent_workers (bool): If True, the data loader will not shutdown + the worker processes after a dataset has been consumed once. + This allows to maintain the workers Dataset instances alive. + The argument also has effect in PyTorch>=1.7.0. + Default: True + kwargs: any keyword argument to be used to initialize DataLoader + + Returns: + DataLoader: A PyTorch dataloader. + """ + rank, world_size = get_dist_info() + if dist: + sampler = DistributedSampler( + dataset, world_size, rank, shuffle=shuffle, round_up=round_up) + shuffle = False + batch_size = samples_per_gpu + num_workers = workers_per_gpu + else: + sampler = None + batch_size = num_gpus * samples_per_gpu + num_workers = num_gpus * workers_per_gpu + + init_fn = partial( + worker_init_fn, num_workers=num_workers, rank=rank, + seed=seed) if seed is not None else None + + if digit_version(torch.__version__) >= digit_version('1.8.0'): + kwargs['persistent_workers'] = persistent_workers + + data_loader = DataLoader( + dataset, + batch_size=batch_size, + sampler=sampler, + num_workers=num_workers, + collate_fn=partial(collate, samples_per_gpu=samples_per_gpu), + pin_memory=pin_memory, + shuffle=shuffle, + worker_init_fn=init_fn, + **kwargs) + + return data_loader + + +def worker_init_fn(worker_id, num_workers, rank, seed): + # The seed of each worker equals to + # num_worker * rank + worker_id + user_seed + worker_seed = num_workers * rank + worker_id + seed + np.random.seed(worker_seed) + random.seed(worker_seed) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/cifar.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/cifar.py new file mode 100644 index 0000000000..31440247b8 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/cifar.py @@ -0,0 +1,133 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os +import os.path +import pickle + +import numpy as np +import torch.distributed as dist +from mmcv.runner import get_dist_info + +from .base_dataset import BaseDataset +from .builder import DATASETS +from .utils import check_integrity, download_and_extract_archive + + +@DATASETS.register_module() +class CIFAR10(BaseDataset): + """`CIFAR10 `_ Dataset. + + This implementation is modified from + https://github.com/pytorch/vision/blob/master/torchvision/datasets/cifar.py + """ # noqa: E501 + + base_folder = 'cifar-10-batches-py' + url = 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz' + filename = 'cifar-10-python.tar.gz' + tgz_md5 = 'c58f30108f718f92721af3b95e74349a' + train_list = [ + ['data_batch_1', 'c99cafc152244af753f735de768cd75f'], + ['data_batch_2', 'd4bba439e000b95fd0a9bffe97cbabec'], + ['data_batch_3', '54ebc095f3ab1f0389bbae665268c751'], + ['data_batch_4', '634d18415352ddfa80567beed471001a'], + ['data_batch_5', '482c414d41f54cd18b22e5b47cb7c3cb'], + ] + + test_list = [ + ['test_batch', '40351d587109b95175f43aff81a1287e'], + ] + meta = { + 'filename': 'batches.meta', + 'key': 'label_names', + 'md5': '5ff9c542aee3614f3951f8cda6e48888', + } + + def load_annotations(self): + + rank, world_size = get_dist_info() + + if rank == 0 and not self._check_integrity(): + download_and_extract_archive( + self.url, + self.data_prefix, + filename=self.filename, + md5=self.tgz_md5) + + if world_size > 1: + dist.barrier() + assert self._check_integrity(), \ + 'Shared storage seems unavailable. ' \ + f'Please download the dataset manually through {self.url}.' + + if not self.test_mode: + downloaded_list = self.train_list + else: + downloaded_list = self.test_list + + self.imgs = [] + self.gt_labels = [] + + # load the picked numpy arrays + for file_name, checksum in downloaded_list: + file_path = os.path.join(self.data_prefix, self.base_folder, + file_name) + with open(file_path, 'rb') as f: + entry = pickle.load(f, encoding='latin1') + self.imgs.append(entry['data']) + if 'labels' in entry: + self.gt_labels.extend(entry['labels']) + else: + self.gt_labels.extend(entry['fine_labels']) + + self.imgs = np.vstack(self.imgs).reshape(-1, 3, 32, 32) + self.imgs = self.imgs.transpose((0, 2, 3, 1)) # convert to HWC + + self._load_meta() + + data_infos = [] + for img, gt_label in zip(self.imgs, self.gt_labels): + gt_label = np.array(gt_label, dtype=np.int64) + info = {'img': img, 'gt_label': gt_label} + data_infos.append(info) + return data_infos + + def _load_meta(self): + path = os.path.join(self.data_prefix, self.base_folder, + self.meta['filename']) + if not check_integrity(path, self.meta['md5']): + raise RuntimeError( + 'Dataset metadata file not found or corrupted.' + + ' You can use download=True to download it') + with open(path, 'rb') as infile: + data = pickle.load(infile, encoding='latin1') + self.CLASSES = data[self.meta['key']] + + def _check_integrity(self): + root = self.data_prefix + for fentry in (self.train_list + self.test_list): + filename, md5 = fentry[0], fentry[1] + fpath = os.path.join(root, self.base_folder, filename) + if not check_integrity(fpath, md5): + return False + return True + + +@DATASETS.register_module() +class CIFAR100(CIFAR10): + """`CIFAR100 `_ Dataset.""" + + base_folder = 'cifar-100-python' + url = 'https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz' + filename = 'cifar-100-python.tar.gz' + tgz_md5 = 'eb9058c3a382ffc7106e4002c42a8d85' + train_list = [ + ['train', '16019d7e3df5f24257cddd939b257f8d'], + ] + + test_list = [ + ['test', 'f0ef6b0ae62326f3e7ffdfab6717acfc'], + ] + meta = { + 'filename': 'meta', + 'key': 'fine_label_names', + 'md5': '7973b15100ade9c7d40fb424638fde48', + } diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/dataset_wrappers.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/dataset_wrappers.py new file mode 100644 index 0000000000..68c234e2f2 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/dataset_wrappers.py @@ -0,0 +1,172 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import bisect +import math +from collections import defaultdict + +import numpy as np +from torch.utils.data.dataset import ConcatDataset as _ConcatDataset + +from .builder import DATASETS + + +@DATASETS.register_module() +class ConcatDataset(_ConcatDataset): + """A wrapper of concatenated dataset. + + Same as :obj:`torch.utils.data.dataset.ConcatDataset`, but + add `get_cat_ids` function. + + Args: + datasets (list[:obj:`Dataset`]): A list of datasets. + """ + + def __init__(self, datasets): + super(ConcatDataset, self).__init__(datasets) + self.CLASSES = datasets[0].CLASSES + + def get_cat_ids(self, idx): + if idx < 0: + if -idx > len(self): + raise ValueError( + 'absolute value of index should not exceed dataset length') + idx = len(self) + idx + dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx) + if dataset_idx == 0: + sample_idx = idx + else: + sample_idx = idx - self.cumulative_sizes[dataset_idx - 1] + return self.datasets[dataset_idx].get_cat_ids(sample_idx) + + +@DATASETS.register_module() +class RepeatDataset(object): + """A wrapper of repeated dataset. + + The length of repeated dataset will be `times` larger than the original + dataset. This is useful when the data loading time is long but the dataset + is small. Using RepeatDataset can reduce the data loading time between + epochs. + + Args: + dataset (:obj:`Dataset`): The dataset to be repeated. + times (int): Repeat times. + """ + + def __init__(self, dataset, times): + self.dataset = dataset + self.times = times + self.CLASSES = dataset.CLASSES + + self._ori_len = len(self.dataset) + + def __getitem__(self, idx): + return self.dataset[idx % self._ori_len] + + def get_cat_ids(self, idx): + return self.dataset.get_cat_ids(idx % self._ori_len) + + def __len__(self): + return self.times * self._ori_len + + +# Modified from https://github.com/facebookresearch/detectron2/blob/41d475b75a230221e21d9cac5d69655e3415e3a4/detectron2/data/samplers/distributed_sampler.py#L57 # noqa +@DATASETS.register_module() +class ClassBalancedDataset(object): + r"""A wrapper of repeated dataset with repeat factor. + + Suitable for training on class imbalanced datasets like LVIS. Following + the sampling strategy in [#1]_, in each epoch, an image may appear multiple + times based on its "repeat factor". + + The repeat factor for an image is a function of the frequency the rarest + category labeled in that image. The "frequency of category c" in [0, 1] + is defined by the fraction of images in the training set (without repeats) + in which category c appears. + + The dataset needs to implement :func:`self.get_cat_ids` to support + ClassBalancedDataset. + + The repeat factor is computed as followed. + + 1. For each category c, compute the fraction :math:`f(c)` of images that + contain it. + 2. For each category c, compute the category-level repeat factor + + .. math:: + r(c) = \max(1, \sqrt{\frac{t}{f(c)}}) + + 3. For each image I and its labels :math:`L(I)`, compute the image-level + repeat factor + + .. math:: + r(I) = \max_{c \in L(I)} r(c) + + References: + .. [#1] https://arxiv.org/pdf/1908.03195.pdf + + Args: + dataset (:obj:`CustomDataset`): The dataset to be repeated. + oversample_thr (float): frequency threshold below which data is + repeated. For categories with `f_c` >= `oversample_thr`, there is + no oversampling. For categories with `f_c` < `oversample_thr`, the + degree of oversampling following the square-root inverse frequency + heuristic above. + """ + + def __init__(self, dataset, oversample_thr): + self.dataset = dataset + self.oversample_thr = oversample_thr + self.CLASSES = dataset.CLASSES + + repeat_factors = self._get_repeat_factors(dataset, oversample_thr) + repeat_indices = [] + for dataset_index, repeat_factor in enumerate(repeat_factors): + repeat_indices.extend([dataset_index] * math.ceil(repeat_factor)) + self.repeat_indices = repeat_indices + + flags = [] + if hasattr(self.dataset, 'flag'): + for flag, repeat_factor in zip(self.dataset.flag, repeat_factors): + flags.extend([flag] * int(math.ceil(repeat_factor))) + assert len(flags) == len(repeat_indices) + self.flag = np.asarray(flags, dtype=np.uint8) + + def _get_repeat_factors(self, dataset, repeat_thr): + # 1. For each category c, compute the fraction # of images + # that contain it: f(c) + category_freq = defaultdict(int) + num_images = len(dataset) + for idx in range(num_images): + cat_ids = set(self.dataset.get_cat_ids(idx)) + for cat_id in cat_ids: + category_freq[cat_id] += 1 + for k, v in category_freq.items(): + assert v > 0, f'caterogy {k} does not contain any images' + category_freq[k] = v / num_images + + # 2. For each category c, compute the category-level repeat factor: + # r(c) = max(1, sqrt(t/f(c))) + category_repeat = { + cat_id: max(1.0, math.sqrt(repeat_thr / cat_freq)) + for cat_id, cat_freq in category_freq.items() + } + + # 3. For each image I and its labels L(I), compute the image-level + # repeat factor: + # r(I) = max_{c in L(I)} r(c) + repeat_factors = [] + for idx in range(num_images): + cat_ids = set(self.dataset.get_cat_ids(idx)) + repeat_factor = max( + {category_repeat[cat_id] + for cat_id in cat_ids}) + repeat_factors.append(repeat_factor) + + return repeat_factors + + def __getitem__(self, idx): + ori_index = self.repeat_indices[idx] + return self.dataset[ori_index] + + def __len__(self): + return len(self.repeat_indices) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/imagenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/imagenet.py new file mode 100644 index 0000000000..9bfd31b079 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/imagenet.py @@ -0,0 +1,1103 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os + +import numpy as np + +from .base_dataset import BaseDataset +from .builder import DATASETS + + +def has_file_allowed_extension(filename, extensions): + """Checks if a file is an allowed extension. + + Args: + filename (string): path to a file + + Returns: + bool: True if the filename ends with a known image extension + """ + filename_lower = filename.lower() + return any(filename_lower.endswith(ext) for ext in extensions) + + +def find_folders(root): + """Find classes by folders under a root. + + Args: + root (string): root directory of folders + + Returns: + folder_to_idx (dict): the map from folder name to class idx + """ + folders = [ + d for d in os.listdir(root) if os.path.isdir(os.path.join(root, d)) + ] + folders.sort() + folder_to_idx = {folders[i]: i for i in range(len(folders))} + return folder_to_idx + + +def get_samples(root, folder_to_idx, extensions): + """Make dataset by walking all images under a root. + + Args: + root (string): root directory of folders + folder_to_idx (dict): the map from class name to class idx + extensions (tuple): allowed extensions + + Returns: + samples (list): a list of tuple where each element is (image, label) + """ + samples = [] + root = os.path.expanduser(root) + for folder_name in sorted(list(folder_to_idx.keys())): + _dir = os.path.join(root, folder_name) + for _, _, fns in sorted(os.walk(_dir)): + for fn in sorted(fns): + if has_file_allowed_extension(fn, extensions): + path = os.path.join(folder_name, fn) + item = (path, folder_to_idx[folder_name]) + samples.append(item) + return samples + + +@DATASETS.register_module() +class ImageNet(BaseDataset): + """`ImageNet `_ Dataset. + + This implementation is modified from + https://github.com/pytorch/vision/blob/master/torchvision/datasets/imagenet.py + """ # noqa: E501 + + IMG_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif') + CLASSES = [ + 'tench, Tinca tinca', + 'goldfish, Carassius auratus', + 'great white shark, white shark, man-eater, man-eating shark, Carcharodon carcharias', # noqa: E501 + 'tiger shark, Galeocerdo cuvieri', + 'hammerhead, hammerhead shark', + 'electric ray, crampfish, numbfish, torpedo', + 'stingray', + 'cock', + 'hen', + 'ostrich, Struthio camelus', + 'brambling, Fringilla montifringilla', + 'goldfinch, Carduelis carduelis', + 'house finch, linnet, Carpodacus mexicanus', + 'junco, snowbird', + 'indigo bunting, indigo finch, indigo bird, Passerina cyanea', + 'robin, American robin, Turdus migratorius', + 'bulbul', + 'jay', + 'magpie', + 'chickadee', + 'water ouzel, dipper', + 'kite', + 'bald eagle, American eagle, Haliaeetus leucocephalus', + 'vulture', + 'great grey owl, great gray owl, Strix nebulosa', + 'European fire salamander, Salamandra salamandra', + 'common newt, Triturus vulgaris', + 'eft', + 'spotted salamander, Ambystoma maculatum', + 'axolotl, mud puppy, Ambystoma mexicanum', + 'bullfrog, Rana catesbeiana', + 'tree frog, tree-frog', + 'tailed frog, bell toad, ribbed toad, tailed toad, Ascaphus trui', + 'loggerhead, loggerhead turtle, Caretta caretta', + 'leatherback turtle, leatherback, leathery turtle, Dermochelys coriacea', # noqa: E501 + 'mud turtle', + 'terrapin', + 'box turtle, box tortoise', + 'banded gecko', + 'common iguana, iguana, Iguana iguana', + 'American chameleon, anole, Anolis carolinensis', + 'whiptail, whiptail lizard', + 'agama', + 'frilled lizard, Chlamydosaurus kingi', + 'alligator lizard', + 'Gila monster, Heloderma suspectum', + 'green lizard, Lacerta viridis', + 'African chameleon, Chamaeleo chamaeleon', + 'Komodo dragon, Komodo lizard, dragon lizard, giant lizard, Varanus komodoensis', # noqa: E501 + 'African crocodile, Nile crocodile, Crocodylus niloticus', + 'American alligator, Alligator mississipiensis', + 'triceratops', + 'thunder snake, worm snake, Carphophis amoenus', + 'ringneck snake, ring-necked snake, ring snake', + 'hognose snake, puff adder, sand viper', + 'green snake, grass snake', + 'king snake, kingsnake', + 'garter snake, grass snake', + 'water snake', + 'vine snake', + 'night snake, Hypsiglena torquata', + 'boa constrictor, Constrictor constrictor', + 'rock python, rock snake, Python sebae', + 'Indian cobra, Naja naja', + 'green mamba', + 'sea snake', + 'horned viper, cerastes, sand viper, horned asp, Cerastes cornutus', + 'diamondback, diamondback rattlesnake, Crotalus adamanteus', + 'sidewinder, horned rattlesnake, Crotalus cerastes', + 'trilobite', + 'harvestman, daddy longlegs, Phalangium opilio', + 'scorpion', + 'black and gold garden spider, Argiope aurantia', + 'barn spider, Araneus cavaticus', + 'garden spider, Aranea diademata', + 'black widow, Latrodectus mactans', + 'tarantula', + 'wolf spider, hunting spider', + 'tick', + 'centipede', + 'black grouse', + 'ptarmigan', + 'ruffed grouse, partridge, Bonasa umbellus', + 'prairie chicken, prairie grouse, prairie fowl', + 'peacock', + 'quail', + 'partridge', + 'African grey, African gray, Psittacus erithacus', + 'macaw', + 'sulphur-crested cockatoo, Kakatoe galerita, Cacatua galerita', + 'lorikeet', + 'coucal', + 'bee eater', + 'hornbill', + 'hummingbird', + 'jacamar', + 'toucan', + 'drake', + 'red-breasted merganser, Mergus serrator', + 'goose', + 'black swan, Cygnus atratus', + 'tusker', + 'echidna, spiny anteater, anteater', + 'platypus, duckbill, duckbilled platypus, duck-billed platypus, Ornithorhynchus anatinus', # noqa: E501 + 'wallaby, brush kangaroo', + 'koala, koala bear, kangaroo bear, native bear, Phascolarctos cinereus', # noqa: E501 + 'wombat', + 'jellyfish', + 'sea anemone, anemone', + 'brain coral', + 'flatworm, platyhelminth', + 'nematode, nematode worm, roundworm', + 'conch', + 'snail', + 'slug', + 'sea slug, nudibranch', + 'chiton, coat-of-mail shell, sea cradle, polyplacophore', + 'chambered nautilus, pearly nautilus, nautilus', + 'Dungeness crab, Cancer magister', + 'rock crab, Cancer irroratus', + 'fiddler crab', + 'king crab, Alaska crab, Alaskan king crab, Alaska king crab, Paralithodes camtschatica', # noqa: E501 + 'American lobster, Northern lobster, Maine lobster, Homarus americanus', # noqa: E501 + 'spiny lobster, langouste, rock lobster, crawfish, crayfish, sea crawfish', # noqa: E501 + 'crayfish, crawfish, crawdad, crawdaddy', + 'hermit crab', + 'isopod', + 'white stork, Ciconia ciconia', + 'black stork, Ciconia nigra', + 'spoonbill', + 'flamingo', + 'little blue heron, Egretta caerulea', + 'American egret, great white heron, Egretta albus', + 'bittern', + 'crane', + 'limpkin, Aramus pictus', + 'European gallinule, Porphyrio porphyrio', + 'American coot, marsh hen, mud hen, water hen, Fulica americana', + 'bustard', + 'ruddy turnstone, Arenaria interpres', + 'red-backed sandpiper, dunlin, Erolia alpina', + 'redshank, Tringa totanus', + 'dowitcher', + 'oystercatcher, oyster catcher', + 'pelican', + 'king penguin, Aptenodytes patagonica', + 'albatross, mollymawk', + 'grey whale, gray whale, devilfish, Eschrichtius gibbosus, Eschrichtius robustus', # noqa: E501 + 'killer whale, killer, orca, grampus, sea wolf, Orcinus orca', + 'dugong, Dugong dugon', + 'sea lion', + 'Chihuahua', + 'Japanese spaniel', + 'Maltese dog, Maltese terrier, Maltese', + 'Pekinese, Pekingese, Peke', + 'Shih-Tzu', + 'Blenheim spaniel', + 'papillon', + 'toy terrier', + 'Rhodesian ridgeback', + 'Afghan hound, Afghan', + 'basset, basset hound', + 'beagle', + 'bloodhound, sleuthhound', + 'bluetick', + 'black-and-tan coonhound', + 'Walker hound, Walker foxhound', + 'English foxhound', + 'redbone', + 'borzoi, Russian wolfhound', + 'Irish wolfhound', + 'Italian greyhound', + 'whippet', + 'Ibizan hound, Ibizan Podenco', + 'Norwegian elkhound, elkhound', + 'otterhound, otter hound', + 'Saluki, gazelle hound', + 'Scottish deerhound, deerhound', + 'Weimaraner', + 'Staffordshire bullterrier, Staffordshire bull terrier', + 'American Staffordshire terrier, Staffordshire terrier, American pit bull terrier, pit bull terrier', # noqa: E501 + 'Bedlington terrier', + 'Border terrier', + 'Kerry blue terrier', + 'Irish terrier', + 'Norfolk terrier', + 'Norwich terrier', + 'Yorkshire terrier', + 'wire-haired fox terrier', + 'Lakeland terrier', + 'Sealyham terrier, Sealyham', + 'Airedale, Airedale terrier', + 'cairn, cairn terrier', + 'Australian terrier', + 'Dandie Dinmont, Dandie Dinmont terrier', + 'Boston bull, Boston terrier', + 'miniature schnauzer', + 'giant schnauzer', + 'standard schnauzer', + 'Scotch terrier, Scottish terrier, Scottie', + 'Tibetan terrier, chrysanthemum dog', + 'silky terrier, Sydney silky', + 'soft-coated wheaten terrier', + 'West Highland white terrier', + 'Lhasa, Lhasa apso', + 'flat-coated retriever', + 'curly-coated retriever', + 'golden retriever', + 'Labrador retriever', + 'Chesapeake Bay retriever', + 'German short-haired pointer', + 'vizsla, Hungarian pointer', + 'English setter', + 'Irish setter, red setter', + 'Gordon setter', + 'Brittany spaniel', + 'clumber, clumber spaniel', + 'English springer, English springer spaniel', + 'Welsh springer spaniel', + 'cocker spaniel, English cocker spaniel, cocker', + 'Sussex spaniel', + 'Irish water spaniel', + 'kuvasz', + 'schipperke', + 'groenendael', + 'malinois', + 'briard', + 'kelpie', + 'komondor', + 'Old English sheepdog, bobtail', + 'Shetland sheepdog, Shetland sheep dog, Shetland', + 'collie', + 'Border collie', + 'Bouvier des Flandres, Bouviers des Flandres', + 'Rottweiler', + 'German shepherd, German shepherd dog, German police dog, alsatian', + 'Doberman, Doberman pinscher', + 'miniature pinscher', + 'Greater Swiss Mountain dog', + 'Bernese mountain dog', + 'Appenzeller', + 'EntleBucher', + 'boxer', + 'bull mastiff', + 'Tibetan mastiff', + 'French bulldog', + 'Great Dane', + 'Saint Bernard, St Bernard', + 'Eskimo dog, husky', + 'malamute, malemute, Alaskan malamute', + 'Siberian husky', + 'dalmatian, coach dog, carriage dog', + 'affenpinscher, monkey pinscher, monkey dog', + 'basenji', + 'pug, pug-dog', + 'Leonberg', + 'Newfoundland, Newfoundland dog', + 'Great Pyrenees', + 'Samoyed, Samoyede', + 'Pomeranian', + 'chow, chow chow', + 'keeshond', + 'Brabancon griffon', + 'Pembroke, Pembroke Welsh corgi', + 'Cardigan, Cardigan Welsh corgi', + 'toy poodle', + 'miniature poodle', + 'standard poodle', + 'Mexican hairless', + 'timber wolf, grey wolf, gray wolf, Canis lupus', + 'white wolf, Arctic wolf, Canis lupus tundrarum', + 'red wolf, maned wolf, Canis rufus, Canis niger', + 'coyote, prairie wolf, brush wolf, Canis latrans', + 'dingo, warrigal, warragal, Canis dingo', + 'dhole, Cuon alpinus', + 'African hunting dog, hyena dog, Cape hunting dog, Lycaon pictus', + 'hyena, hyaena', + 'red fox, Vulpes vulpes', + 'kit fox, Vulpes macrotis', + 'Arctic fox, white fox, Alopex lagopus', + 'grey fox, gray fox, Urocyon cinereoargenteus', + 'tabby, tabby cat', + 'tiger cat', + 'Persian cat', + 'Siamese cat, Siamese', + 'Egyptian cat', + 'cougar, puma, catamount, mountain lion, painter, panther, Felis concolor', # noqa: E501 + 'lynx, catamount', + 'leopard, Panthera pardus', + 'snow leopard, ounce, Panthera uncia', + 'jaguar, panther, Panthera onca, Felis onca', + 'lion, king of beasts, Panthera leo', + 'tiger, Panthera tigris', + 'cheetah, chetah, Acinonyx jubatus', + 'brown bear, bruin, Ursus arctos', + 'American black bear, black bear, Ursus americanus, Euarctos americanus', # noqa: E501 + 'ice bear, polar bear, Ursus Maritimus, Thalarctos maritimus', + 'sloth bear, Melursus ursinus, Ursus ursinus', + 'mongoose', + 'meerkat, mierkat', + 'tiger beetle', + 'ladybug, ladybeetle, lady beetle, ladybird, ladybird beetle', + 'ground beetle, carabid beetle', + 'long-horned beetle, longicorn, longicorn beetle', + 'leaf beetle, chrysomelid', + 'dung beetle', + 'rhinoceros beetle', + 'weevil', + 'fly', + 'bee', + 'ant, emmet, pismire', + 'grasshopper, hopper', + 'cricket', + 'walking stick, walkingstick, stick insect', + 'cockroach, roach', + 'mantis, mantid', + 'cicada, cicala', + 'leafhopper', + 'lacewing, lacewing fly', + "dragonfly, darning needle, devil's darning needle, sewing needle, snake feeder, snake doctor, mosquito hawk, skeeter hawk", # noqa: E501 + 'damselfly', + 'admiral', + 'ringlet, ringlet butterfly', + 'monarch, monarch butterfly, milkweed butterfly, Danaus plexippus', + 'cabbage butterfly', + 'sulphur butterfly, sulfur butterfly', + 'lycaenid, lycaenid butterfly', + 'starfish, sea star', + 'sea urchin', + 'sea cucumber, holothurian', + 'wood rabbit, cottontail, cottontail rabbit', + 'hare', + 'Angora, Angora rabbit', + 'hamster', + 'porcupine, hedgehog', + 'fox squirrel, eastern fox squirrel, Sciurus niger', + 'marmot', + 'beaver', + 'guinea pig, Cavia cobaya', + 'sorrel', + 'zebra', + 'hog, pig, grunter, squealer, Sus scrofa', + 'wild boar, boar, Sus scrofa', + 'warthog', + 'hippopotamus, hippo, river horse, Hippopotamus amphibius', + 'ox', + 'water buffalo, water ox, Asiatic buffalo, Bubalus bubalis', + 'bison', + 'ram, tup', + 'bighorn, bighorn sheep, cimarron, Rocky Mountain bighorn, Rocky Mountain sheep, Ovis canadensis', # noqa: E501 + 'ibex, Capra ibex', + 'hartebeest', + 'impala, Aepyceros melampus', + 'gazelle', + 'Arabian camel, dromedary, Camelus dromedarius', + 'llama', + 'weasel', + 'mink', + 'polecat, fitch, foulmart, foumart, Mustela putorius', + 'black-footed ferret, ferret, Mustela nigripes', + 'otter', + 'skunk, polecat, wood pussy', + 'badger', + 'armadillo', + 'three-toed sloth, ai, Bradypus tridactylus', + 'orangutan, orang, orangutang, Pongo pygmaeus', + 'gorilla, Gorilla gorilla', + 'chimpanzee, chimp, Pan troglodytes', + 'gibbon, Hylobates lar', + 'siamang, Hylobates syndactylus, Symphalangus syndactylus', + 'guenon, guenon monkey', + 'patas, hussar monkey, Erythrocebus patas', + 'baboon', + 'macaque', + 'langur', + 'colobus, colobus monkey', + 'proboscis monkey, Nasalis larvatus', + 'marmoset', + 'capuchin, ringtail, Cebus capucinus', + 'howler monkey, howler', + 'titi, titi monkey', + 'spider monkey, Ateles geoffroyi', + 'squirrel monkey, Saimiri sciureus', + 'Madagascar cat, ring-tailed lemur, Lemur catta', + 'indri, indris, Indri indri, Indri brevicaudatus', + 'Indian elephant, Elephas maximus', + 'African elephant, Loxodonta africana', + 'lesser panda, red panda, panda, bear cat, cat bear, Ailurus fulgens', + 'giant panda, panda, panda bear, coon bear, Ailuropoda melanoleuca', + 'barracouta, snoek', + 'eel', + 'coho, cohoe, coho salmon, blue jack, silver salmon, Oncorhynchus kisutch', # noqa: E501 + 'rock beauty, Holocanthus tricolor', + 'anemone fish', + 'sturgeon', + 'gar, garfish, garpike, billfish, Lepisosteus osseus', + 'lionfish', + 'puffer, pufferfish, blowfish, globefish', + 'abacus', + 'abaya', + "academic gown, academic robe, judge's robe", + 'accordion, piano accordion, squeeze box', + 'acoustic guitar', + 'aircraft carrier, carrier, flattop, attack aircraft carrier', + 'airliner', + 'airship, dirigible', + 'altar', + 'ambulance', + 'amphibian, amphibious vehicle', + 'analog clock', + 'apiary, bee house', + 'apron', + 'ashcan, trash can, garbage can, wastebin, ash bin, ash-bin, ashbin, dustbin, trash barrel, trash bin', # noqa: E501 + 'assault rifle, assault gun', + 'backpack, back pack, knapsack, packsack, rucksack, haversack', + 'bakery, bakeshop, bakehouse', + 'balance beam, beam', + 'balloon', + 'ballpoint, ballpoint pen, ballpen, Biro', + 'Band Aid', + 'banjo', + 'bannister, banister, balustrade, balusters, handrail', + 'barbell', + 'barber chair', + 'barbershop', + 'barn', + 'barometer', + 'barrel, cask', + 'barrow, garden cart, lawn cart, wheelbarrow', + 'baseball', + 'basketball', + 'bassinet', + 'bassoon', + 'bathing cap, swimming cap', + 'bath towel', + 'bathtub, bathing tub, bath, tub', + 'beach wagon, station wagon, wagon, estate car, beach waggon, station waggon, waggon', # noqa: E501 + 'beacon, lighthouse, beacon light, pharos', + 'beaker', + 'bearskin, busby, shako', + 'beer bottle', + 'beer glass', + 'bell cote, bell cot', + 'bib', + 'bicycle-built-for-two, tandem bicycle, tandem', + 'bikini, two-piece', + 'binder, ring-binder', + 'binoculars, field glasses, opera glasses', + 'birdhouse', + 'boathouse', + 'bobsled, bobsleigh, bob', + 'bolo tie, bolo, bola tie, bola', + 'bonnet, poke bonnet', + 'bookcase', + 'bookshop, bookstore, bookstall', + 'bottlecap', + 'bow', + 'bow tie, bow-tie, bowtie', + 'brass, memorial tablet, plaque', + 'brassiere, bra, bandeau', + 'breakwater, groin, groyne, mole, bulwark, seawall, jetty', + 'breastplate, aegis, egis', + 'broom', + 'bucket, pail', + 'buckle', + 'bulletproof vest', + 'bullet train, bullet', + 'butcher shop, meat market', + 'cab, hack, taxi, taxicab', + 'caldron, cauldron', + 'candle, taper, wax light', + 'cannon', + 'canoe', + 'can opener, tin opener', + 'cardigan', + 'car mirror', + 'carousel, carrousel, merry-go-round, roundabout, whirligig', + "carpenter's kit, tool kit", + 'carton', + 'car wheel', + 'cash machine, cash dispenser, automated teller machine, automatic teller machine, automated teller, automatic teller, ATM', # noqa: E501 + 'cassette', + 'cassette player', + 'castle', + 'catamaran', + 'CD player', + 'cello, violoncello', + 'cellular telephone, cellular phone, cellphone, cell, mobile phone', + 'chain', + 'chainlink fence', + 'chain mail, ring mail, mail, chain armor, chain armour, ring armor, ring armour', # noqa: E501 + 'chain saw, chainsaw', + 'chest', + 'chiffonier, commode', + 'chime, bell, gong', + 'china cabinet, china closet', + 'Christmas stocking', + 'church, church building', + 'cinema, movie theater, movie theatre, movie house, picture palace', + 'cleaver, meat cleaver, chopper', + 'cliff dwelling', + 'cloak', + 'clog, geta, patten, sabot', + 'cocktail shaker', + 'coffee mug', + 'coffeepot', + 'coil, spiral, volute, whorl, helix', + 'combination lock', + 'computer keyboard, keypad', + 'confectionery, confectionary, candy store', + 'container ship, containership, container vessel', + 'convertible', + 'corkscrew, bottle screw', + 'cornet, horn, trumpet, trump', + 'cowboy boot', + 'cowboy hat, ten-gallon hat', + 'cradle', + 'crane', + 'crash helmet', + 'crate', + 'crib, cot', + 'Crock Pot', + 'croquet ball', + 'crutch', + 'cuirass', + 'dam, dike, dyke', + 'desk', + 'desktop computer', + 'dial telephone, dial phone', + 'diaper, nappy, napkin', + 'digital clock', + 'digital watch', + 'dining table, board', + 'dishrag, dishcloth', + 'dishwasher, dish washer, dishwashing machine', + 'disk brake, disc brake', + 'dock, dockage, docking facility', + 'dogsled, dog sled, dog sleigh', + 'dome', + 'doormat, welcome mat', + 'drilling platform, offshore rig', + 'drum, membranophone, tympan', + 'drumstick', + 'dumbbell', + 'Dutch oven', + 'electric fan, blower', + 'electric guitar', + 'electric locomotive', + 'entertainment center', + 'envelope', + 'espresso maker', + 'face powder', + 'feather boa, boa', + 'file, file cabinet, filing cabinet', + 'fireboat', + 'fire engine, fire truck', + 'fire screen, fireguard', + 'flagpole, flagstaff', + 'flute, transverse flute', + 'folding chair', + 'football helmet', + 'forklift', + 'fountain', + 'fountain pen', + 'four-poster', + 'freight car', + 'French horn, horn', + 'frying pan, frypan, skillet', + 'fur coat', + 'garbage truck, dustcart', + 'gasmask, respirator, gas helmet', + 'gas pump, gasoline pump, petrol pump, island dispenser', + 'goblet', + 'go-kart', + 'golf ball', + 'golfcart, golf cart', + 'gondola', + 'gong, tam-tam', + 'gown', + 'grand piano, grand', + 'greenhouse, nursery, glasshouse', + 'grille, radiator grille', + 'grocery store, grocery, food market, market', + 'guillotine', + 'hair slide', + 'hair spray', + 'half track', + 'hammer', + 'hamper', + 'hand blower, blow dryer, blow drier, hair dryer, hair drier', + 'hand-held computer, hand-held microcomputer', + 'handkerchief, hankie, hanky, hankey', + 'hard disc, hard disk, fixed disk', + 'harmonica, mouth organ, harp, mouth harp', + 'harp', + 'harvester, reaper', + 'hatchet', + 'holster', + 'home theater, home theatre', + 'honeycomb', + 'hook, claw', + 'hoopskirt, crinoline', + 'horizontal bar, high bar', + 'horse cart, horse-cart', + 'hourglass', + 'iPod', + 'iron, smoothing iron', + "jack-o'-lantern", + 'jean, blue jean, denim', + 'jeep, landrover', + 'jersey, T-shirt, tee shirt', + 'jigsaw puzzle', + 'jinrikisha, ricksha, rickshaw', + 'joystick', + 'kimono', + 'knee pad', + 'knot', + 'lab coat, laboratory coat', + 'ladle', + 'lampshade, lamp shade', + 'laptop, laptop computer', + 'lawn mower, mower', + 'lens cap, lens cover', + 'letter opener, paper knife, paperknife', + 'library', + 'lifeboat', + 'lighter, light, igniter, ignitor', + 'limousine, limo', + 'liner, ocean liner', + 'lipstick, lip rouge', + 'Loafer', + 'lotion', + 'loudspeaker, speaker, speaker unit, loudspeaker system, speaker system', # noqa: E501 + "loupe, jeweler's loupe", + 'lumbermill, sawmill', + 'magnetic compass', + 'mailbag, postbag', + 'mailbox, letter box', + 'maillot', + 'maillot, tank suit', + 'manhole cover', + 'maraca', + 'marimba, xylophone', + 'mask', + 'matchstick', + 'maypole', + 'maze, labyrinth', + 'measuring cup', + 'medicine chest, medicine cabinet', + 'megalith, megalithic structure', + 'microphone, mike', + 'microwave, microwave oven', + 'military uniform', + 'milk can', + 'minibus', + 'miniskirt, mini', + 'minivan', + 'missile', + 'mitten', + 'mixing bowl', + 'mobile home, manufactured home', + 'Model T', + 'modem', + 'monastery', + 'monitor', + 'moped', + 'mortar', + 'mortarboard', + 'mosque', + 'mosquito net', + 'motor scooter, scooter', + 'mountain bike, all-terrain bike, off-roader', + 'mountain tent', + 'mouse, computer mouse', + 'mousetrap', + 'moving van', + 'muzzle', + 'nail', + 'neck brace', + 'necklace', + 'nipple', + 'notebook, notebook computer', + 'obelisk', + 'oboe, hautboy, hautbois', + 'ocarina, sweet potato', + 'odometer, hodometer, mileometer, milometer', + 'oil filter', + 'organ, pipe organ', + 'oscilloscope, scope, cathode-ray oscilloscope, CRO', + 'overskirt', + 'oxcart', + 'oxygen mask', + 'packet', + 'paddle, boat paddle', + 'paddlewheel, paddle wheel', + 'padlock', + 'paintbrush', + "pajama, pyjama, pj's, jammies", + 'palace', + 'panpipe, pandean pipe, syrinx', + 'paper towel', + 'parachute, chute', + 'parallel bars, bars', + 'park bench', + 'parking meter', + 'passenger car, coach, carriage', + 'patio, terrace', + 'pay-phone, pay-station', + 'pedestal, plinth, footstall', + 'pencil box, pencil case', + 'pencil sharpener', + 'perfume, essence', + 'Petri dish', + 'photocopier', + 'pick, plectrum, plectron', + 'pickelhaube', + 'picket fence, paling', + 'pickup, pickup truck', + 'pier', + 'piggy bank, penny bank', + 'pill bottle', + 'pillow', + 'ping-pong ball', + 'pinwheel', + 'pirate, pirate ship', + 'pitcher, ewer', + "plane, carpenter's plane, woodworking plane", + 'planetarium', + 'plastic bag', + 'plate rack', + 'plow, plough', + "plunger, plumber's helper", + 'Polaroid camera, Polaroid Land camera', + 'pole', + 'police van, police wagon, paddy wagon, patrol wagon, wagon, black Maria', # noqa: E501 + 'poncho', + 'pool table, billiard table, snooker table', + 'pop bottle, soda bottle', + 'pot, flowerpot', + "potter's wheel", + 'power drill', + 'prayer rug, prayer mat', + 'printer', + 'prison, prison house', + 'projectile, missile', + 'projector', + 'puck, hockey puck', + 'punching bag, punch bag, punching ball, punchball', + 'purse', + 'quill, quill pen', + 'quilt, comforter, comfort, puff', + 'racer, race car, racing car', + 'racket, racquet', + 'radiator', + 'radio, wireless', + 'radio telescope, radio reflector', + 'rain barrel', + 'recreational vehicle, RV, R.V.', + 'reel', + 'reflex camera', + 'refrigerator, icebox', + 'remote control, remote', + 'restaurant, eating house, eating place, eatery', + 'revolver, six-gun, six-shooter', + 'rifle', + 'rocking chair, rocker', + 'rotisserie', + 'rubber eraser, rubber, pencil eraser', + 'rugby ball', + 'rule, ruler', + 'running shoe', + 'safe', + 'safety pin', + 'saltshaker, salt shaker', + 'sandal', + 'sarong', + 'sax, saxophone', + 'scabbard', + 'scale, weighing machine', + 'school bus', + 'schooner', + 'scoreboard', + 'screen, CRT screen', + 'screw', + 'screwdriver', + 'seat belt, seatbelt', + 'sewing machine', + 'shield, buckler', + 'shoe shop, shoe-shop, shoe store', + 'shoji', + 'shopping basket', + 'shopping cart', + 'shovel', + 'shower cap', + 'shower curtain', + 'ski', + 'ski mask', + 'sleeping bag', + 'slide rule, slipstick', + 'sliding door', + 'slot, one-armed bandit', + 'snorkel', + 'snowmobile', + 'snowplow, snowplough', + 'soap dispenser', + 'soccer ball', + 'sock', + 'solar dish, solar collector, solar furnace', + 'sombrero', + 'soup bowl', + 'space bar', + 'space heater', + 'space shuttle', + 'spatula', + 'speedboat', + "spider web, spider's web", + 'spindle', + 'sports car, sport car', + 'spotlight, spot', + 'stage', + 'steam locomotive', + 'steel arch bridge', + 'steel drum', + 'stethoscope', + 'stole', + 'stone wall', + 'stopwatch, stop watch', + 'stove', + 'strainer', + 'streetcar, tram, tramcar, trolley, trolley car', + 'stretcher', + 'studio couch, day bed', + 'stupa, tope', + 'submarine, pigboat, sub, U-boat', + 'suit, suit of clothes', + 'sundial', + 'sunglass', + 'sunglasses, dark glasses, shades', + 'sunscreen, sunblock, sun blocker', + 'suspension bridge', + 'swab, swob, mop', + 'sweatshirt', + 'swimming trunks, bathing trunks', + 'swing', + 'switch, electric switch, electrical switch', + 'syringe', + 'table lamp', + 'tank, army tank, armored combat vehicle, armoured combat vehicle', + 'tape player', + 'teapot', + 'teddy, teddy bear', + 'television, television system', + 'tennis ball', + 'thatch, thatched roof', + 'theater curtain, theatre curtain', + 'thimble', + 'thresher, thrasher, threshing machine', + 'throne', + 'tile roof', + 'toaster', + 'tobacco shop, tobacconist shop, tobacconist', + 'toilet seat', + 'torch', + 'totem pole', + 'tow truck, tow car, wrecker', + 'toyshop', + 'tractor', + 'trailer truck, tractor trailer, trucking rig, rig, articulated lorry, semi', # noqa: E501 + 'tray', + 'trench coat', + 'tricycle, trike, velocipede', + 'trimaran', + 'tripod', + 'triumphal arch', + 'trolleybus, trolley coach, trackless trolley', + 'trombone', + 'tub, vat', + 'turnstile', + 'typewriter keyboard', + 'umbrella', + 'unicycle, monocycle', + 'upright, upright piano', + 'vacuum, vacuum cleaner', + 'vase', + 'vault', + 'velvet', + 'vending machine', + 'vestment', + 'viaduct', + 'violin, fiddle', + 'volleyball', + 'waffle iron', + 'wall clock', + 'wallet, billfold, notecase, pocketbook', + 'wardrobe, closet, press', + 'warplane, military plane', + 'washbasin, handbasin, washbowl, lavabo, wash-hand basin', + 'washer, automatic washer, washing machine', + 'water bottle', + 'water jug', + 'water tower', + 'whiskey jug', + 'whistle', + 'wig', + 'window screen', + 'window shade', + 'Windsor tie', + 'wine bottle', + 'wing', + 'wok', + 'wooden spoon', + 'wool, woolen, woollen', + 'worm fence, snake fence, snake-rail fence, Virginia fence', + 'wreck', + 'yawl', + 'yurt', + 'web site, website, internet site, site', + 'comic book', + 'crossword puzzle, crossword', + 'street sign', + 'traffic light, traffic signal, stoplight', + 'book jacket, dust cover, dust jacket, dust wrapper', + 'menu', + 'plate', + 'guacamole', + 'consomme', + 'hot pot, hotpot', + 'trifle', + 'ice cream, icecream', + 'ice lolly, lolly, lollipop, popsicle', + 'French loaf', + 'bagel, beigel', + 'pretzel', + 'cheeseburger', + 'hotdog, hot dog, red hot', + 'mashed potato', + 'head cabbage', + 'broccoli', + 'cauliflower', + 'zucchini, courgette', + 'spaghetti squash', + 'acorn squash', + 'butternut squash', + 'cucumber, cuke', + 'artichoke, globe artichoke', + 'bell pepper', + 'cardoon', + 'mushroom', + 'Granny Smith', + 'strawberry', + 'orange', + 'lemon', + 'fig', + 'pineapple, ananas', + 'banana', + 'jackfruit, jak, jack', + 'custard apple', + 'pomegranate', + 'hay', + 'carbonara', + 'chocolate sauce, chocolate syrup', + 'dough', + 'meat loaf, meatloaf', + 'pizza, pizza pie', + 'potpie', + 'burrito', + 'red wine', + 'espresso', + 'cup', + 'eggnog', + 'alp', + 'bubble', + 'cliff, drop, drop-off', + 'coral reef', + 'geyser', + 'lakeside, lakeshore', + 'promontory, headland, head, foreland', + 'sandbar, sand bar', + 'seashore, coast, seacoast, sea-coast', + 'valley, vale', + 'volcano', + 'ballplayer, baseball player', + 'groom, bridegroom', + 'scuba diver', + 'rapeseed', + 'daisy', + "yellow lady's slipper, yellow lady-slipper, Cypripedium calceolus, Cypripedium parviflorum", # noqa: E501 + 'corn', + 'acorn', + 'hip, rose hip, rosehip', + 'buckeye, horse chestnut, conker', + 'coral fungus', + 'agaric', + 'gyromitra', + 'stinkhorn, carrion fungus', + 'earthstar', + 'hen-of-the-woods, hen of the woods, Polyporus frondosus, Grifola frondosa', # noqa: E501 + 'bolete', + 'ear, spike, capitulum', + 'toilet tissue, toilet paper, bathroom tissue' + ] + + def load_annotations(self): + if self.ann_file is None: + folder_to_idx = find_folders(self.data_prefix) + samples = get_samples( + self.data_prefix, + folder_to_idx, + extensions=self.IMG_EXTENSIONS) + if len(samples) == 0: + raise (RuntimeError('Found 0 files in subfolders of: ' + f'{self.data_prefix}. ' + 'Supported extensions are: ' + f'{",".join(self.IMG_EXTENSIONS)}')) + + self.folder_to_idx = folder_to_idx + elif isinstance(self.ann_file, str): + with open(self.ann_file) as f: + samples = [x.strip().rsplit(' ', 1) for x in f.readlines()] + else: + raise TypeError('ann_file must be a str or None') + self.samples = samples + + data_infos = [] + for filename, gt_label in self.samples: + info = {'img_prefix': self.data_prefix} + info['img_info'] = {'filename': filename} + info['gt_label'] = np.array(gt_label, dtype=np.int64) + data_infos.append(info) + return data_infos diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/imagenet21k.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/imagenet21k.py new file mode 100644 index 0000000000..6f629f60f0 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/imagenet21k.py @@ -0,0 +1,141 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os +import warnings + +import numpy as np +from mmcv.utils import scandir + +from .base_dataset import BaseDataset +from .builder import DATASETS +from .imagenet import find_folders + + +class ImageInfo(): + """class to store image info, using slots will save memory than using + dict.""" + __slots__ = ['path', 'gt_label'] + + def __init__(self, path, gt_label): + self.path = path + self.gt_label = gt_label + + +@DATASETS.register_module() +class ImageNet21k(BaseDataset): + """ImageNet21k Dataset. + + Since the dataset ImageNet21k is extremely big, cantains 21k+ classes + and 1.4B files. This class has improved the following points on the + basis of the class `ImageNet`, in order to save memory usage and time + required : + + - Delete the samples attribute + - using 'slots' create a Data_item tp replace dict + - Modify setting `info` dict from function `load_annotations` to + function `prepare_data` + - using int instead of np.array(..., np.int64) + + Args: + data_prefix (str): the prefix of data path + pipeline (list): a list of dict, where each element represents + a operation defined in `mmcls.datasets.pipelines` + ann_file (str | None): the annotation file. When ann_file is str, + the subclass is expected to read from the ann_file. When ann_file + is None, the subclass is expected to read according to data_prefix + test_mode (bool): in train mode or test mode + multi_label (bool): use multi label or not. + recursion_subdir(bool): whether to use sub-directory pictures, which + are meet the conditions in the folder under category directory. + """ + + IMG_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif', + '.JPEG', '.JPG') + CLASSES = None + + def __init__(self, + data_prefix, + pipeline, + classes=None, + ann_file=None, + multi_label=False, + recursion_subdir=False, + test_mode=False): + self.recursion_subdir = recursion_subdir + if multi_label: + raise NotImplementedError('Multi_label have not be implemented.') + self.multi_lable = multi_label + super(ImageNet21k, self).__init__(data_prefix, pipeline, classes, + ann_file, test_mode) + + def prepare_data(self, idx): + info = self.data_infos[idx] + results = { + 'img_prefix': self.data_prefix, + 'img_info': dict(filename=info.path), + 'gt_label': np.array(info.gt_label, dtype=np.int64) + } + return self.pipeline(results) + + def load_annotations(self): + """load dataset annotations.""" + if self.ann_file is None: + data_infos = self._load_annotations_from_dir() + elif isinstance(self.ann_file, str): + data_infos = self._load_annotations_from_file() + else: + raise TypeError('ann_file must be a str or None') + + if len(data_infos) == 0: + msg = 'Found no valid file in ' + msg += f'{self.ann_file}. ' if self.ann_file \ + else f'{self.data_prefix}. ' + msg += 'Supported extensions are: ' + \ + ', '.join(self.IMG_EXTENSIONS) + raise RuntimeError(msg) + + return data_infos + + def _find_allowed_files(self, root, folder_name): + """find all the allowed files in a folder, including sub folder if + recursion_subdir is true.""" + _dir = os.path.join(root, folder_name) + infos_pre_class = [] + for path in scandir(_dir, self.IMG_EXTENSIONS, self.recursion_subdir): + path = os.path.join(folder_name, path) + item = ImageInfo(path, self.folder_to_idx[folder_name]) + infos_pre_class.append(item) + return infos_pre_class + + def _load_annotations_from_dir(self): + """load annotations from self.data_prefix directory.""" + data_infos, empty_classes = [], [] + folder_to_idx = find_folders(self.data_prefix) + self.folder_to_idx = folder_to_idx + root = os.path.expanduser(self.data_prefix) + for folder_name in folder_to_idx.keys(): + infos_pre_class = self._find_allowed_files(root, folder_name) + if len(infos_pre_class) == 0: + empty_classes.append(folder_name) + data_infos.extend(infos_pre_class) + + if len(empty_classes) != 0: + msg = 'Found no valid file for the classes ' + \ + f"{', '.join(sorted(empty_classes))} " + msg += 'Supported extensions are: ' + \ + f"{', '.join(self.IMG_EXTENSIONS)}." + warnings.warn(msg) + + return data_infos + + def _load_annotations_from_file(self): + """load annotations from self.ann_file.""" + data_infos = [] + with open(self.ann_file) as f: + for line in f.readlines(): + if line == '': + continue + filepath, gt_label = line.strip().rsplit(' ', 1) + info = ImageInfo(filepath, int(gt_label)) + data_infos.append(info) + + return data_infos diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/mnist.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/mnist.py new file mode 100644 index 0000000000..4065e0d542 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/mnist.py @@ -0,0 +1,185 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import codecs +import os +import os.path as osp + +import numpy as np +import torch +import torch.distributed as dist +from mmcv.runner import get_dist_info, master_only + +from .base_dataset import BaseDataset +from .builder import DATASETS +from .utils import download_and_extract_archive, rm_suffix + + +@DATASETS.register_module() +class MNIST(BaseDataset): + """`MNIST `_ Dataset. + + This implementation is modified from + https://github.com/pytorch/vision/blob/master/torchvision/datasets/mnist.py + """ # noqa: E501 + + resource_prefix = 'http://yann.lecun.com/exdb/mnist/' + resources = { + 'train_image_file': + ('train-images-idx3-ubyte.gz', 'f68b3c2dcbeaaa9fbdd348bbdeb94873'), + 'train_label_file': + ('train-labels-idx1-ubyte.gz', 'd53e105ee54ea40749a09fcbcd1e9432'), + 'test_image_file': + ('t10k-images-idx3-ubyte.gz', '9fb629c4189551a2d022fa330f9573f3'), + 'test_label_file': + ('t10k-labels-idx1-ubyte.gz', 'ec29112dd5afa0611ce80d1b7f02629c') + } + + CLASSES = [ + '0 - zero', '1 - one', '2 - two', '3 - three', '4 - four', '5 - five', + '6 - six', '7 - seven', '8 - eight', '9 - nine' + ] + + def load_annotations(self): + train_image_file = osp.join( + self.data_prefix, rm_suffix(self.resources['train_image_file'][0])) + train_label_file = osp.join( + self.data_prefix, rm_suffix(self.resources['train_label_file'][0])) + test_image_file = osp.join( + self.data_prefix, rm_suffix(self.resources['test_image_file'][0])) + test_label_file = osp.join( + self.data_prefix, rm_suffix(self.resources['test_label_file'][0])) + + if not osp.exists(train_image_file) or not osp.exists( + train_label_file) or not osp.exists( + test_image_file) or not osp.exists(test_label_file): + self.download() + + _, world_size = get_dist_info() + if world_size > 1: + dist.barrier() + assert osp.exists(train_image_file) and osp.exists( + train_label_file) and osp.exists( + test_image_file) and osp.exists(test_label_file), \ + 'Shared storage seems unavailable. Please download dataset ' \ + f'manually through {self.resource_prefix}.' + + train_set = (read_image_file(train_image_file), + read_label_file(train_label_file)) + test_set = (read_image_file(test_image_file), + read_label_file(test_label_file)) + + if not self.test_mode: + imgs, gt_labels = train_set + else: + imgs, gt_labels = test_set + + data_infos = [] + for img, gt_label in zip(imgs, gt_labels): + gt_label = np.array(gt_label, dtype=np.int64) + info = {'img': img.numpy(), 'gt_label': gt_label} + data_infos.append(info) + return data_infos + + @master_only + def download(self): + os.makedirs(self.data_prefix, exist_ok=True) + + # download files + for url, md5 in self.resources.values(): + url = osp.join(self.resource_prefix, url) + filename = url.rpartition('/')[2] + download_and_extract_archive( + url, + download_root=self.data_prefix, + filename=filename, + md5=md5) + + +@DATASETS.register_module() +class FashionMNIST(MNIST): + """`Fashion-MNIST `_ + Dataset.""" + + resource_prefix = 'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/' # noqa: E501 + resources = { + 'train_image_file': + ('train-images-idx3-ubyte.gz', '8d4fb7e6c68d591d4c3dfef9ec88bf0d'), + 'train_label_file': + ('train-labels-idx1-ubyte.gz', '25c81989df183df01b3e8a0aad5dffbe'), + 'test_image_file': + ('t10k-images-idx3-ubyte.gz', 'bef4ecab320f06d8554ea6380940ec79'), + 'test_label_file': + ('t10k-labels-idx1-ubyte.gz', 'bb300cfdad3c16e7a12a480ee83cd310') + } + CLASSES = [ + 'T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', + 'Shirt', 'Sneaker', 'Bag', 'Ankle boot' + ] + + +def get_int(b): + return int(codecs.encode(b, 'hex'), 16) + + +def open_maybe_compressed_file(path): + """Return a file object that possibly decompresses 'path' on the fly. + + Decompression occurs when argument `path` is a string and ends with '.gz' + or '.xz'. + """ + if not isinstance(path, str): + return path + if path.endswith('.gz'): + import gzip + return gzip.open(path, 'rb') + if path.endswith('.xz'): + import lzma + return lzma.open(path, 'rb') + return open(path, 'rb') + + +def read_sn3_pascalvincent_tensor(path, strict=True): + """Read a SN3 file in "Pascal Vincent" format (Lush file 'libidx/idx- + io.lsh'). + + Argument may be a filename, compressed filename, or file object. + """ + # typemap + if not hasattr(read_sn3_pascalvincent_tensor, 'typemap'): + read_sn3_pascalvincent_tensor.typemap = { + 8: (torch.uint8, np.uint8, np.uint8), + 9: (torch.int8, np.int8, np.int8), + 11: (torch.int16, np.dtype('>i2'), 'i2'), + 12: (torch.int32, np.dtype('>i4'), 'i4'), + 13: (torch.float32, np.dtype('>f4'), 'f4'), + 14: (torch.float64, np.dtype('>f8'), 'f8') + } + # read + with open_maybe_compressed_file(path) as f: + data = f.read() + # parse + magic = get_int(data[0:4]) + nd = magic % 256 + ty = magic // 256 + assert nd >= 1 and nd <= 3 + assert ty >= 8 and ty <= 14 + m = read_sn3_pascalvincent_tensor.typemap[ty] + s = [get_int(data[4 * (i + 1):4 * (i + 2)]) for i in range(nd)] + parsed = np.frombuffer(data, dtype=m[1], offset=(4 * (nd + 1))) + assert parsed.shape[0] == np.prod(s) or not strict + return torch.from_numpy(parsed.astype(m[2], copy=False)).view(*s) + + +def read_label_file(path): + with open(path, 'rb') as f: + x = read_sn3_pascalvincent_tensor(f, strict=False) + assert (x.dtype == torch.uint8) + assert (x.ndimension() == 1) + return x.long() + + +def read_image_file(path): + with open(path, 'rb') as f: + x = read_sn3_pascalvincent_tensor(f, strict=False) + assert (x.dtype == torch.uint8) + assert (x.ndimension() == 3) + return x diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/multi_label.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/multi_label.py new file mode 100644 index 0000000000..a52bb88e00 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/multi_label.py @@ -0,0 +1,83 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +import numpy as np + +from mmcls.core import average_performance, mAP +from .base_dataset import BaseDataset + + +class MultiLabelDataset(BaseDataset): + """Multi-label Dataset.""" + + def get_cat_ids(self, idx): + """Get category ids by index. + + Args: + idx (int): Index of data. + + Returns: + np.ndarray: Image categories of specified index. + """ + gt_labels = self.data_infos[idx]['gt_label'] + cat_ids = np.where(gt_labels == 1)[0] + return cat_ids + + def evaluate(self, + results, + metric='mAP', + metric_options=None, + logger=None, + **deprecated_kwargs): + """Evaluate the dataset. + + Args: + results (list): Testing results of the dataset. + metric (str | list[str]): Metrics to be evaluated. + Default value is 'mAP'. Options are 'mAP', 'CP', 'CR', 'CF1', + 'OP', 'OR' and 'OF1'. + metric_options (dict, optional): Options for calculating metrics. + Allowed keys are 'k' and 'thr'. Defaults to None + logger (logging.Logger | str, optional): Logger used for printing + related information during evaluation. Defaults to None. + deprecated_kwargs (dict): Used for containing deprecated arguments. + + Returns: + dict: evaluation results + """ + if metric_options is None: + metric_options = {'thr': 0.5} + + if deprecated_kwargs != {}: + warnings.warn('Option arguments for metrics has been changed to ' + '`metric_options`.') + metric_options = {**deprecated_kwargs} + + if isinstance(metric, str): + metrics = [metric] + else: + metrics = metric + allowed_metrics = ['mAP', 'CP', 'CR', 'CF1', 'OP', 'OR', 'OF1'] + eval_results = {} + results = np.vstack(results) + gt_labels = self.get_gt_labels() + num_imgs = len(results) + assert len(gt_labels) == num_imgs, 'dataset testing results should '\ + 'be of the same length as gt_labels.' + + invalid_metrics = set(metrics) - set(allowed_metrics) + if len(invalid_metrics) != 0: + raise ValueError(f'metric {invalid_metrics} is not supported.') + + if 'mAP' in metrics: + mAP_value = mAP(results, gt_labels) + eval_results['mAP'] = mAP_value + if len(set(metrics) - {'mAP'}) != 0: + performance_keys = ['CP', 'CR', 'CF1', 'OP', 'OR', 'OF1'] + performance_values = average_performance(results, gt_labels, + **metric_options) + for k, v in zip(performance_keys, performance_values): + if k in metrics: + eval_results[k] = v + + return eval_results diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/pipelines/__init__.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/pipelines/__init__.py new file mode 100644 index 0000000000..d6b86f23bf --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/pipelines/__init__.py @@ -0,0 +1,22 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .auto_augment import (AutoAugment, AutoContrast, Brightness, + ColorTransform, Contrast, Cutout, Equalize, Invert, + Posterize, RandAugment, Rotate, Sharpness, Shear, + Solarize, SolarizeAdd, Translate) +from .compose import Compose +from .formatting import (Collect, ImageToTensor, ToNumpy, ToPIL, ToTensor, + Transpose, to_tensor) +from .loading import LoadImageFromFile +from .transforms import (CenterCrop, ColorJitter, Lighting, Normalize, + RandomCrop, RandomErasing, RandomFlip, + RandomGrayscale, RandomResizedCrop, Resize) + +__all__ = [ + 'Compose', 'to_tensor', 'ToTensor', 'ImageToTensor', 'ToPIL', 'ToNumpy', + 'Transpose', 'Collect', 'LoadImageFromFile', 'Resize', 'CenterCrop', + 'RandomFlip', 'Normalize', 'RandomCrop', 'RandomResizedCrop', + 'RandomGrayscale', 'Shear', 'Translate', 'Rotate', 'Invert', + 'ColorTransform', 'Solarize', 'Posterize', 'AutoContrast', 'Equalize', + 'Contrast', 'Brightness', 'Sharpness', 'AutoAugment', 'SolarizeAdd', + 'Cutout', 'RandAugment', 'Lighting', 'ColorJitter', 'RandomErasing' +] diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/pipelines/auto_augment.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/pipelines/auto_augment.py new file mode 100644 index 0000000000..973c1bd250 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/pipelines/auto_augment.py @@ -0,0 +1,921 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +import inspect +import random +from math import ceil +from numbers import Number +from typing import Sequence + +import mmcv +import numpy as np + +from ..builder import PIPELINES +from .compose import Compose + +# Default hyperparameters for all Ops +_HPARAMS_DEFAULT = dict(pad_val=128) + + +def random_negative(value, random_negative_prob): + """Randomly negate value based on random_negative_prob.""" + return -value if np.random.rand() < random_negative_prob else value + + +def merge_hparams(policy: dict, hparams: dict): + """Merge hyperparameters into policy config. + + Only merge partial hyperparameters required of the policy. + + Args: + policy (dict): Original policy config dict. + hparams (dict): Hyperparameters need to be merged. + + Returns: + dict: Policy config dict after adding ``hparams``. + """ + op = PIPELINES.get(policy['type']) + assert op is not None, f'Invalid policy type "{policy["type"]}".' + for key, value in hparams.items(): + if policy.get(key, None) is not None: + continue + if key in inspect.getfullargspec(op.__init__).args: + policy[key] = value + return policy + + +@PIPELINES.register_module() +class AutoAugment(object): + """Auto augmentation. + + This data augmentation is proposed in `AutoAugment: Learning Augmentation + Policies from Data `_. + + Args: + policies (list[list[dict]]): The policies of auto augmentation. Each + policy in ``policies`` is a specific augmentation policy, and is + composed by several augmentations (dict). When AutoAugment is + called, a random policy in ``policies`` will be selected to + augment images. + hparams (dict): Configs of hyperparameters. Hyperparameters will be + used in policies that require these arguments if these arguments + are not set in policy dicts. Defaults to use _HPARAMS_DEFAULT. + """ + + def __init__(self, policies, hparams=_HPARAMS_DEFAULT): + assert isinstance(policies, list) and len(policies) > 0, \ + 'Policies must be a non-empty list.' + for policy in policies: + assert isinstance(policy, list) and len(policy) > 0, \ + 'Each policy in policies must be a non-empty list.' + for augment in policy: + assert isinstance(augment, dict) and 'type' in augment, \ + 'Each specific augmentation must be a dict with key' \ + ' "type".' + + self.hparams = hparams + policies = copy.deepcopy(policies) + self.policies = [] + for sub in policies: + merged_sub = [merge_hparams(policy, hparams) for policy in sub] + self.policies.append(merged_sub) + + self.sub_policy = [Compose(policy) for policy in self.policies] + + def __call__(self, results): + sub_policy = random.choice(self.sub_policy) + return sub_policy(results) + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(policies={self.policies})' + return repr_str + + +@PIPELINES.register_module() +class RandAugment(object): + r"""Random augmentation. + + This data augmentation is proposed in `RandAugment: Practical automated + data augmentation with a reduced search space + `_. + + Args: + policies (list[dict]): The policies of random augmentation. Each + policy in ``policies`` is one specific augmentation policy (dict). + The policy shall at least have key `type`, indicating the type of + augmentation. For those which have magnitude, (given to the fact + they are named differently in different augmentation, ) + `magnitude_key` and `magnitude_range` shall be the magnitude + argument (str) and the range of magnitude (tuple in the format of + (val1, val2)), respectively. Note that val1 is not necessarily + less than val2. + num_policies (int): Number of policies to select from policies each + time. + magnitude_level (int | float): Magnitude level for all the augmentation + selected. + total_level (int | float): Total level for the magnitude. Defaults to + 30. + magnitude_std (Number | str): Deviation of magnitude noise applied. + + - If positive number, magnitude is sampled from normal distribution + (mean=magnitude, std=magnitude_std). + - If 0 or negative number, magnitude remains unchanged. + - If str "inf", magnitude is sampled from uniform distribution + (range=[min, magnitude]). + hparams (dict): Configs of hyperparameters. Hyperparameters will be + used in policies that require these arguments if these arguments + are not set in policy dicts. Defaults to use _HPARAMS_DEFAULT. + + Note: + `magnitude_std` will introduce some randomness to policy, modified by + https://github.com/rwightman/pytorch-image-models. + + When magnitude_std=0, we calculate the magnitude as follows: + + .. math:: + \text{magnitude} = \frac{\text{magnitude\_level}} + {\text{total\_level}} \times (\text{val2} - \text{val1}) + + \text{val1} + """ + + def __init__(self, + policies, + num_policies, + magnitude_level, + magnitude_std=0., + total_level=30, + hparams=_HPARAMS_DEFAULT): + assert isinstance(num_policies, int), 'Number of policies must be ' \ + f'of int type, got {type(num_policies)} instead.' + assert isinstance(magnitude_level, (int, float)), \ + 'Magnitude level must be of int or float type, ' \ + f'got {type(magnitude_level)} instead.' + assert isinstance(total_level, (int, float)), 'Total level must be ' \ + f'of int or float type, got {type(total_level)} instead.' + assert isinstance(policies, list) and len(policies) > 0, \ + 'Policies must be a non-empty list.' + + assert isinstance(magnitude_std, (Number, str)), \ + 'Magnitude std must be of number or str type, ' \ + f'got {type(magnitude_std)} instead.' + if isinstance(magnitude_std, str): + assert magnitude_std == 'inf', \ + 'Magnitude std must be of number or "inf", ' \ + f'got "{magnitude_std}" instead.' + + assert num_policies > 0, 'num_policies must be greater than 0.' + assert magnitude_level >= 0, 'magnitude_level must be no less than 0.' + assert total_level > 0, 'total_level must be greater than 0.' + + self.num_policies = num_policies + self.magnitude_level = magnitude_level + self.magnitude_std = magnitude_std + self.total_level = total_level + self.hparams = hparams + policies = copy.deepcopy(policies) + self._check_policies(policies) + self.policies = [merge_hparams(policy, hparams) for policy in policies] + + def _check_policies(self, policies): + for policy in policies: + assert isinstance(policy, dict) and 'type' in policy, \ + 'Each policy must be a dict with key "type".' + type_name = policy['type'] + + magnitude_key = policy.get('magnitude_key', None) + if magnitude_key is not None: + assert 'magnitude_range' in policy, \ + f'RandAugment policy {type_name} needs `magnitude_range`.' + magnitude_range = policy['magnitude_range'] + assert (isinstance(magnitude_range, Sequence) + and len(magnitude_range) == 2), \ + f'`magnitude_range` of RandAugment policy {type_name} ' \ + f'should be a Sequence with two numbers.' + + def _process_policies(self, policies): + processed_policies = [] + for policy in policies: + processed_policy = copy.deepcopy(policy) + magnitude_key = processed_policy.pop('magnitude_key', None) + if magnitude_key is not None: + magnitude = self.magnitude_level + # if magnitude_std is positive number or 'inf', move + # magnitude_value randomly. + if self.magnitude_std == 'inf': + magnitude = random.uniform(0, magnitude) + elif self.magnitude_std > 0: + magnitude = random.gauss(magnitude, self.magnitude_std) + magnitude = min(self.total_level, max(0, magnitude)) + + val1, val2 = processed_policy.pop('magnitude_range') + magnitude = (magnitude / self.total_level) * (val2 - + val1) + val1 + + processed_policy.update({magnitude_key: magnitude}) + processed_policies.append(processed_policy) + return processed_policies + + def __call__(self, results): + if self.num_policies == 0: + return results + sub_policy = random.choices(self.policies, k=self.num_policies) + sub_policy = self._process_policies(sub_policy) + sub_policy = Compose(sub_policy) + return sub_policy(results) + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(policies={self.policies}, ' + repr_str += f'num_policies={self.num_policies}, ' + repr_str += f'magnitude_level={self.magnitude_level}, ' + repr_str += f'total_level={self.total_level})' + return repr_str + + +@PIPELINES.register_module() +class Shear(object): + """Shear images. + + Args: + magnitude (int | float): The magnitude used for shear. + pad_val (int, Sequence[int]): Pixel pad_val value for constant fill. + If a sequence of length 3, it is used to pad_val R, G, B channels + respectively. Defaults to 128. + prob (float): The probability for performing Shear therefore should be + in range [0, 1]. Defaults to 0.5. + direction (str): The shearing direction. Options are 'horizontal' and + 'vertical'. Defaults to 'horizontal'. + random_negative_prob (float): The probability that turns the magnitude + negative, which should be in range [0,1]. Defaults to 0.5. + interpolation (str): Interpolation method. Options are 'nearest', + 'bilinear', 'bicubic', 'area', 'lanczos'. Defaults to 'bicubic'. + """ + + def __init__(self, + magnitude, + pad_val=128, + prob=0.5, + direction='horizontal', + random_negative_prob=0.5, + interpolation='bicubic'): + assert isinstance(magnitude, (int, float)), 'The magnitude type must '\ + f'be int or float, but got {type(magnitude)} instead.' + if isinstance(pad_val, int): + pad_val = tuple([pad_val] * 3) + elif isinstance(pad_val, Sequence): + assert len(pad_val) == 3, 'pad_val as a tuple must have 3 ' \ + f'elements, got {len(pad_val)} instead.' + assert all(isinstance(i, int) for i in pad_val), 'pad_val as a '\ + 'tuple must got elements of int type.' + else: + raise TypeError('pad_val must be int or tuple with 3 elements.') + assert 0 <= prob <= 1.0, 'The prob should be in range [0,1], ' \ + f'got {prob} instead.' + assert direction in ('horizontal', 'vertical'), 'direction must be ' \ + f'either "horizontal" or "vertical", got {direction} instead.' + assert 0 <= random_negative_prob <= 1.0, 'The random_negative_prob ' \ + f'should be in range [0,1], got {random_negative_prob} instead.' + + self.magnitude = magnitude + self.pad_val = tuple(pad_val) + self.prob = prob + self.direction = direction + self.random_negative_prob = random_negative_prob + self.interpolation = interpolation + + def __call__(self, results): + if np.random.rand() > self.prob: + return results + magnitude = random_negative(self.magnitude, self.random_negative_prob) + for key in results.get('img_fields', ['img']): + img = results[key] + img_sheared = mmcv.imshear( + img, + magnitude, + direction=self.direction, + border_value=self.pad_val, + interpolation=self.interpolation) + results[key] = img_sheared.astype(img.dtype) + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(magnitude={self.magnitude}, ' + repr_str += f'pad_val={self.pad_val}, ' + repr_str += f'prob={self.prob}, ' + repr_str += f'direction={self.direction}, ' + repr_str += f'random_negative_prob={self.random_negative_prob}, ' + repr_str += f'interpolation={self.interpolation})' + return repr_str + + +@PIPELINES.register_module() +class Translate(object): + """Translate images. + + Args: + magnitude (int | float): The magnitude used for translate. Note that + the offset is calculated by magnitude * size in the corresponding + direction. With a magnitude of 1, the whole image will be moved out + of the range. + pad_val (int, Sequence[int]): Pixel pad_val value for constant fill. + If a sequence of length 3, it is used to pad_val R, G, B channels + respectively. Defaults to 128. + prob (float): The probability for performing translate therefore should + be in range [0, 1]. Defaults to 0.5. + direction (str): The translating direction. Options are 'horizontal' + and 'vertical'. Defaults to 'horizontal'. + random_negative_prob (float): The probability that turns the magnitude + negative, which should be in range [0,1]. Defaults to 0.5. + interpolation (str): Interpolation method. Options are 'nearest', + 'bilinear', 'bicubic', 'area', 'lanczos'. Defaults to 'nearest'. + """ + + def __init__(self, + magnitude, + pad_val=128, + prob=0.5, + direction='horizontal', + random_negative_prob=0.5, + interpolation='nearest'): + assert isinstance(magnitude, (int, float)), 'The magnitude type must '\ + f'be int or float, but got {type(magnitude)} instead.' + if isinstance(pad_val, int): + pad_val = tuple([pad_val] * 3) + elif isinstance(pad_val, Sequence): + assert len(pad_val) == 3, 'pad_val as a tuple must have 3 ' \ + f'elements, got {len(pad_val)} instead.' + assert all(isinstance(i, int) for i in pad_val), 'pad_val as a '\ + 'tuple must got elements of int type.' + else: + raise TypeError('pad_val must be int or tuple with 3 elements.') + assert 0 <= prob <= 1.0, 'The prob should be in range [0,1], ' \ + f'got {prob} instead.' + assert direction in ('horizontal', 'vertical'), 'direction must be ' \ + f'either "horizontal" or "vertical", got {direction} instead.' + assert 0 <= random_negative_prob <= 1.0, 'The random_negative_prob ' \ + f'should be in range [0,1], got {random_negative_prob} instead.' + + self.magnitude = magnitude + self.pad_val = tuple(pad_val) + self.prob = prob + self.direction = direction + self.random_negative_prob = random_negative_prob + self.interpolation = interpolation + + def __call__(self, results): + if np.random.rand() > self.prob: + return results + magnitude = random_negative(self.magnitude, self.random_negative_prob) + for key in results.get('img_fields', ['img']): + img = results[key] + height, width = img.shape[:2] + if self.direction == 'horizontal': + offset = magnitude * width + else: + offset = magnitude * height + img_translated = mmcv.imtranslate( + img, + offset, + direction=self.direction, + border_value=self.pad_val, + interpolation=self.interpolation) + results[key] = img_translated.astype(img.dtype) + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(magnitude={self.magnitude}, ' + repr_str += f'pad_val={self.pad_val}, ' + repr_str += f'prob={self.prob}, ' + repr_str += f'direction={self.direction}, ' + repr_str += f'random_negative_prob={self.random_negative_prob}, ' + repr_str += f'interpolation={self.interpolation})' + return repr_str + + +@PIPELINES.register_module() +class Rotate(object): + """Rotate images. + + Args: + angle (float): The angle used for rotate. Positive values stand for + clockwise rotation. + center (tuple[float], optional): Center point (w, h) of the rotation in + the source image. If None, the center of the image will be used. + Defaults to None. + scale (float): Isotropic scale factor. Defaults to 1.0. + pad_val (int, Sequence[int]): Pixel pad_val value for constant fill. + If a sequence of length 3, it is used to pad_val R, G, B channels + respectively. Defaults to 128. + prob (float): The probability for performing Rotate therefore should be + in range [0, 1]. Defaults to 0.5. + random_negative_prob (float): The probability that turns the angle + negative, which should be in range [0,1]. Defaults to 0.5. + interpolation (str): Interpolation method. Options are 'nearest', + 'bilinear', 'bicubic', 'area', 'lanczos'. Defaults to 'nearest'. + """ + + def __init__(self, + angle, + center=None, + scale=1.0, + pad_val=128, + prob=0.5, + random_negative_prob=0.5, + interpolation='nearest'): + assert isinstance(angle, float), 'The angle type must be float, but ' \ + f'got {type(angle)} instead.' + if isinstance(center, tuple): + assert len(center) == 2, 'center as a tuple must have 2 ' \ + f'elements, got {len(center)} elements instead.' + else: + assert center is None, 'The center type' \ + f'must be tuple or None, got {type(center)} instead.' + assert isinstance(scale, float), 'the scale type must be float, but ' \ + f'got {type(scale)} instead.' + if isinstance(pad_val, int): + pad_val = tuple([pad_val] * 3) + elif isinstance(pad_val, Sequence): + assert len(pad_val) == 3, 'pad_val as a tuple must have 3 ' \ + f'elements, got {len(pad_val)} instead.' + assert all(isinstance(i, int) for i in pad_val), 'pad_val as a '\ + 'tuple must got elements of int type.' + else: + raise TypeError('pad_val must be int or tuple with 3 elements.') + assert 0 <= prob <= 1.0, 'The prob should be in range [0,1], ' \ + f'got {prob} instead.' + assert 0 <= random_negative_prob <= 1.0, 'The random_negative_prob ' \ + f'should be in range [0,1], got {random_negative_prob} instead.' + + self.angle = angle + self.center = center + self.scale = scale + self.pad_val = tuple(pad_val) + self.prob = prob + self.random_negative_prob = random_negative_prob + self.interpolation = interpolation + + def __call__(self, results): + if np.random.rand() > self.prob: + return results + angle = random_negative(self.angle, self.random_negative_prob) + for key in results.get('img_fields', ['img']): + img = results[key] + img_rotated = mmcv.imrotate( + img, + angle, + center=self.center, + scale=self.scale, + border_value=self.pad_val, + interpolation=self.interpolation) + results[key] = img_rotated.astype(img.dtype) + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(angle={self.angle}, ' + repr_str += f'center={self.center}, ' + repr_str += f'scale={self.scale}, ' + repr_str += f'pad_val={self.pad_val}, ' + repr_str += f'prob={self.prob}, ' + repr_str += f'random_negative_prob={self.random_negative_prob}, ' + repr_str += f'interpolation={self.interpolation})' + return repr_str + + +@PIPELINES.register_module() +class AutoContrast(object): + """Auto adjust image contrast. + + Args: + prob (float): The probability for performing invert therefore should + be in range [0, 1]. Defaults to 0.5. + """ + + def __init__(self, prob=0.5): + assert 0 <= prob <= 1.0, 'The prob should be in range [0,1], ' \ + f'got {prob} instead.' + + self.prob = prob + + def __call__(self, results): + if np.random.rand() > self.prob: + return results + for key in results.get('img_fields', ['img']): + img = results[key] + img_contrasted = mmcv.auto_contrast(img) + results[key] = img_contrasted.astype(img.dtype) + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(prob={self.prob})' + return repr_str + + +@PIPELINES.register_module() +class Invert(object): + """Invert images. + + Args: + prob (float): The probability for performing invert therefore should + be in range [0, 1]. Defaults to 0.5. + """ + + def __init__(self, prob=0.5): + assert 0 <= prob <= 1.0, 'The prob should be in range [0,1], ' \ + f'got {prob} instead.' + + self.prob = prob + + def __call__(self, results): + if np.random.rand() > self.prob: + return results + for key in results.get('img_fields', ['img']): + img = results[key] + img_inverted = mmcv.iminvert(img) + results[key] = img_inverted.astype(img.dtype) + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(prob={self.prob})' + return repr_str + + +@PIPELINES.register_module() +class Equalize(object): + """Equalize the image histogram. + + Args: + prob (float): The probability for performing invert therefore should + be in range [0, 1]. Defaults to 0.5. + """ + + def __init__(self, prob=0.5): + assert 0 <= prob <= 1.0, 'The prob should be in range [0,1], ' \ + f'got {prob} instead.' + + self.prob = prob + + def __call__(self, results): + if np.random.rand() > self.prob: + return results + for key in results.get('img_fields', ['img']): + img = results[key] + img_equalized = mmcv.imequalize(img) + results[key] = img_equalized.astype(img.dtype) + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(prob={self.prob})' + return repr_str + + +@PIPELINES.register_module() +class Solarize(object): + """Solarize images (invert all pixel values above a threshold). + + Args: + thr (int | float): The threshold above which the pixels value will be + inverted. + prob (float): The probability for solarizing therefore should be in + range [0, 1]. Defaults to 0.5. + """ + + def __init__(self, thr, prob=0.5): + assert isinstance(thr, (int, float)), 'The thr type must '\ + f'be int or float, but got {type(thr)} instead.' + assert 0 <= prob <= 1.0, 'The prob should be in range [0,1], ' \ + f'got {prob} instead.' + + self.thr = thr + self.prob = prob + + def __call__(self, results): + if np.random.rand() > self.prob: + return results + for key in results.get('img_fields', ['img']): + img = results[key] + img_solarized = mmcv.solarize(img, thr=self.thr) + results[key] = img_solarized.astype(img.dtype) + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(thr={self.thr}, ' + repr_str += f'prob={self.prob})' + return repr_str + + +@PIPELINES.register_module() +class SolarizeAdd(object): + """SolarizeAdd images (add a certain value to pixels below a threshold). + + Args: + magnitude (int | float): The value to be added to pixels below the thr. + thr (int | float): The threshold below which the pixels value will be + adjusted. + prob (float): The probability for solarizing therefore should be in + range [0, 1]. Defaults to 0.5. + """ + + def __init__(self, magnitude, thr=128, prob=0.5): + assert isinstance(magnitude, (int, float)), 'The thr magnitude must '\ + f'be int or float, but got {type(magnitude)} instead.' + assert isinstance(thr, (int, float)), 'The thr type must '\ + f'be int or float, but got {type(thr)} instead.' + assert 0 <= prob <= 1.0, 'The prob should be in range [0,1], ' \ + f'got {prob} instead.' + + self.magnitude = magnitude + self.thr = thr + self.prob = prob + + def __call__(self, results): + if np.random.rand() > self.prob: + return results + for key in results.get('img_fields', ['img']): + img = results[key] + img_solarized = np.where(img < self.thr, + np.minimum(img + self.magnitude, 255), + img) + results[key] = img_solarized.astype(img.dtype) + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(magnitude={self.magnitude}, ' + repr_str += f'thr={self.thr}, ' + repr_str += f'prob={self.prob})' + return repr_str + + +@PIPELINES.register_module() +class Posterize(object): + """Posterize images (reduce the number of bits for each color channel). + + Args: + bits (int | float): Number of bits for each pixel in the output img, + which should be less or equal to 8. + prob (float): The probability for posterizing therefore should be in + range [0, 1]. Defaults to 0.5. + """ + + def __init__(self, bits, prob=0.5): + assert bits <= 8, f'The bits must be less than 8, got {bits} instead.' + assert 0 <= prob <= 1.0, 'The prob should be in range [0,1], ' \ + f'got {prob} instead.' + + # To align timm version, we need to round up to integer here. + self.bits = ceil(bits) + self.prob = prob + + def __call__(self, results): + if np.random.rand() > self.prob: + return results + for key in results.get('img_fields', ['img']): + img = results[key] + img_posterized = mmcv.posterize(img, bits=self.bits) + results[key] = img_posterized.astype(img.dtype) + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(bits={self.bits}, ' + repr_str += f'prob={self.prob})' + return repr_str + + +@PIPELINES.register_module() +class Contrast(object): + """Adjust images contrast. + + Args: + magnitude (int | float): The magnitude used for adjusting contrast. A + positive magnitude would enhance the contrast and a negative + magnitude would make the image grayer. A magnitude=0 gives the + origin img. + prob (float): The probability for performing contrast adjusting + therefore should be in range [0, 1]. Defaults to 0.5. + random_negative_prob (float): The probability that turns the magnitude + negative, which should be in range [0,1]. Defaults to 0.5. + """ + + def __init__(self, magnitude, prob=0.5, random_negative_prob=0.5): + assert isinstance(magnitude, (int, float)), 'The magnitude type must '\ + f'be int or float, but got {type(magnitude)} instead.' + assert 0 <= prob <= 1.0, 'The prob should be in range [0,1], ' \ + f'got {prob} instead.' + assert 0 <= random_negative_prob <= 1.0, 'The random_negative_prob ' \ + f'should be in range [0,1], got {random_negative_prob} instead.' + + self.magnitude = magnitude + self.prob = prob + self.random_negative_prob = random_negative_prob + + def __call__(self, results): + if np.random.rand() > self.prob: + return results + magnitude = random_negative(self.magnitude, self.random_negative_prob) + for key in results.get('img_fields', ['img']): + img = results[key] + img_contrasted = mmcv.adjust_contrast(img, factor=1 + magnitude) + results[key] = img_contrasted.astype(img.dtype) + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(magnitude={self.magnitude}, ' + repr_str += f'prob={self.prob}, ' + repr_str += f'random_negative_prob={self.random_negative_prob})' + return repr_str + + +@PIPELINES.register_module() +class ColorTransform(object): + """Adjust images color balance. + + Args: + magnitude (int | float): The magnitude used for color transform. A + positive magnitude would enhance the color and a negative magnitude + would make the image grayer. A magnitude=0 gives the origin img. + prob (float): The probability for performing ColorTransform therefore + should be in range [0, 1]. Defaults to 0.5. + random_negative_prob (float): The probability that turns the magnitude + negative, which should be in range [0,1]. Defaults to 0.5. + """ + + def __init__(self, magnitude, prob=0.5, random_negative_prob=0.5): + assert isinstance(magnitude, (int, float)), 'The magnitude type must '\ + f'be int or float, but got {type(magnitude)} instead.' + assert 0 <= prob <= 1.0, 'The prob should be in range [0,1], ' \ + f'got {prob} instead.' + assert 0 <= random_negative_prob <= 1.0, 'The random_negative_prob ' \ + f'should be in range [0,1], got {random_negative_prob} instead.' + + self.magnitude = magnitude + self.prob = prob + self.random_negative_prob = random_negative_prob + + def __call__(self, results): + if np.random.rand() > self.prob: + return results + magnitude = random_negative(self.magnitude, self.random_negative_prob) + for key in results.get('img_fields', ['img']): + img = results[key] + img_color_adjusted = mmcv.adjust_color(img, alpha=1 + magnitude) + results[key] = img_color_adjusted.astype(img.dtype) + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(magnitude={self.magnitude}, ' + repr_str += f'prob={self.prob}, ' + repr_str += f'random_negative_prob={self.random_negative_prob})' + return repr_str + + +@PIPELINES.register_module() +class Brightness(object): + """Adjust images brightness. + + Args: + magnitude (int | float): The magnitude used for adjusting brightness. A + positive magnitude would enhance the brightness and a negative + magnitude would make the image darker. A magnitude=0 gives the + origin img. + prob (float): The probability for performing contrast adjusting + therefore should be in range [0, 1]. Defaults to 0.5. + random_negative_prob (float): The probability that turns the magnitude + negative, which should be in range [0,1]. Defaults to 0.5. + """ + + def __init__(self, magnitude, prob=0.5, random_negative_prob=0.5): + assert isinstance(magnitude, (int, float)), 'The magnitude type must '\ + f'be int or float, but got {type(magnitude)} instead.' + assert 0 <= prob <= 1.0, 'The prob should be in range [0,1], ' \ + f'got {prob} instead.' + assert 0 <= random_negative_prob <= 1.0, 'The random_negative_prob ' \ + f'should be in range [0,1], got {random_negative_prob} instead.' + + self.magnitude = magnitude + self.prob = prob + self.random_negative_prob = random_negative_prob + + def __call__(self, results): + if np.random.rand() > self.prob: + return results + magnitude = random_negative(self.magnitude, self.random_negative_prob) + for key in results.get('img_fields', ['img']): + img = results[key] + img_brightened = mmcv.adjust_brightness(img, factor=1 + magnitude) + results[key] = img_brightened.astype(img.dtype) + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(magnitude={self.magnitude}, ' + repr_str += f'prob={self.prob}, ' + repr_str += f'random_negative_prob={self.random_negative_prob})' + return repr_str + + +@PIPELINES.register_module() +class Sharpness(object): + """Adjust images sharpness. + + Args: + magnitude (int | float): The magnitude used for adjusting sharpness. A + positive magnitude would enhance the sharpness and a negative + magnitude would make the image bulr. A magnitude=0 gives the + origin img. + prob (float): The probability for performing contrast adjusting + therefore should be in range [0, 1]. Defaults to 0.5. + random_negative_prob (float): The probability that turns the magnitude + negative, which should be in range [0,1]. Defaults to 0.5. + """ + + def __init__(self, magnitude, prob=0.5, random_negative_prob=0.5): + assert isinstance(magnitude, (int, float)), 'The magnitude type must '\ + f'be int or float, but got {type(magnitude)} instead.' + assert 0 <= prob <= 1.0, 'The prob should be in range [0,1], ' \ + f'got {prob} instead.' + assert 0 <= random_negative_prob <= 1.0, 'The random_negative_prob ' \ + f'should be in range [0,1], got {random_negative_prob} instead.' + + self.magnitude = magnitude + self.prob = prob + self.random_negative_prob = random_negative_prob + + def __call__(self, results): + if np.random.rand() > self.prob: + return results + magnitude = random_negative(self.magnitude, self.random_negative_prob) + for key in results.get('img_fields', ['img']): + img = results[key] + img_sharpened = mmcv.adjust_sharpness(img, factor=1 + magnitude) + results[key] = img_sharpened.astype(img.dtype) + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(magnitude={self.magnitude}, ' + repr_str += f'prob={self.prob}, ' + repr_str += f'random_negative_prob={self.random_negative_prob})' + return repr_str + + +@PIPELINES.register_module() +class Cutout(object): + """Cutout images. + + Args: + shape (int | float | tuple(int | float)): Expected cutout shape (h, w). + If given as a single value, the value will be used for + both h and w. + pad_val (int, Sequence[int]): Pixel pad_val value for constant fill. + If it is a sequence, it must have the same length with the image + channels. Defaults to 128. + prob (float): The probability for performing cutout therefore should + be in range [0, 1]. Defaults to 0.5. + """ + + def __init__(self, shape, pad_val=128, prob=0.5): + if isinstance(shape, float): + shape = int(shape) + elif isinstance(shape, tuple): + shape = tuple(int(i) for i in shape) + elif not isinstance(shape, int): + raise TypeError( + 'shape must be of ' + f'type int, float or tuple, got {type(shape)} instead') + if isinstance(pad_val, int): + pad_val = tuple([pad_val] * 3) + elif isinstance(pad_val, Sequence): + assert len(pad_val) == 3, 'pad_val as a tuple must have 3 ' \ + f'elements, got {len(pad_val)} instead.' + assert 0 <= prob <= 1.0, 'The prob should be in range [0,1], ' \ + f'got {prob} instead.' + + self.shape = shape + self.pad_val = tuple(pad_val) + self.prob = prob + + def __call__(self, results): + if np.random.rand() > self.prob: + return results + for key in results.get('img_fields', ['img']): + img = results[key] + img_cutout = mmcv.cutout(img, self.shape, pad_val=self.pad_val) + results[key] = img_cutout.astype(img.dtype) + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(shape={self.shape}, ' + repr_str += f'pad_val={self.pad_val}, ' + repr_str += f'prob={self.prob})' + return repr_str diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/pipelines/compose.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/pipelines/compose.py new file mode 100644 index 0000000000..012d2b63b8 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/pipelines/compose.py @@ -0,0 +1,43 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from collections.abc import Sequence + +from mmcv.utils import build_from_cfg + +from ..builder import PIPELINES + + +@PIPELINES.register_module() +class Compose(object): + """Compose a data pipeline with a sequence of transforms. + + Args: + transforms (list[dict | callable]): + Either config dicts of transforms or transform objects. + """ + + def __init__(self, transforms): + assert isinstance(transforms, Sequence) + self.transforms = [] + for transform in transforms: + if isinstance(transform, dict): + transform = build_from_cfg(transform, PIPELINES) + self.transforms.append(transform) + elif callable(transform): + self.transforms.append(transform) + else: + raise TypeError('transform must be callable or a dict, but got' + f' {type(transform)}') + + def __call__(self, data): + for t in self.transforms: + data = t(data) + if data is None: + return None + return data + + def __repr__(self): + format_string = self.__class__.__name__ + '(' + for t in self.transforms: + format_string += f'\n {t}' + format_string += '\n)' + return format_string diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/pipelines/formating.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/pipelines/formating.py new file mode 100644 index 0000000000..555e38244b --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/pipelines/formating.py @@ -0,0 +1,9 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# flake8: noqa +import warnings + +from .formatting import * + +warnings.warn('DeprecationWarning: mmcls.datasets.pipelines.formating will be ' + 'deprecated in 2021, please replace it with ' + 'mmcls.datasets.pipelines.formatting.') diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/pipelines/formatting.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/pipelines/formatting.py new file mode 100644 index 0000000000..0eee667efc --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/pipelines/formatting.py @@ -0,0 +1,180 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from collections.abc import Sequence + +import mmcv +import numpy as np +import torch +from mmcv.parallel import DataContainer as DC +from PIL import Image + +from ..builder import PIPELINES + + +def to_tensor(data): + """Convert objects of various python types to :obj:`torch.Tensor`. + + Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`, + :class:`Sequence`, :class:`int` and :class:`float`. + """ + if isinstance(data, torch.Tensor): + return data + elif isinstance(data, np.ndarray): + return torch.from_numpy(data) + elif isinstance(data, Sequence) and not mmcv.is_str(data): + return torch.tensor(data) + elif isinstance(data, int): + return torch.LongTensor([data]) + elif isinstance(data, float): + return torch.FloatTensor([data]) + else: + raise TypeError( + f'Type {type(data)} cannot be converted to tensor.' + 'Supported types are: `numpy.ndarray`, `torch.Tensor`, ' + '`Sequence`, `int` and `float`') + + +@PIPELINES.register_module() +class ToTensor(object): + + def __init__(self, keys): + self.keys = keys + + def __call__(self, results): + for key in self.keys: + results[key] = to_tensor(results[key]) + return results + + def __repr__(self): + return self.__class__.__name__ + f'(keys={self.keys})' + + +@PIPELINES.register_module() +class ImageToTensor(object): + + def __init__(self, keys): + self.keys = keys + + def __call__(self, results): + for key in self.keys: + img = results[key] + if len(img.shape) < 3: + img = np.expand_dims(img, -1) + results[key] = to_tensor(img.transpose(2, 0, 1)) + return results + + def __repr__(self): + return self.__class__.__name__ + f'(keys={self.keys})' + + +@PIPELINES.register_module() +class Transpose(object): + + def __init__(self, keys, order): + self.keys = keys + self.order = order + + def __call__(self, results): + for key in self.keys: + results[key] = results[key].transpose(self.order) + return results + + def __repr__(self): + return self.__class__.__name__ + \ + f'(keys={self.keys}, order={self.order})' + + +@PIPELINES.register_module() +class ToPIL(object): + + def __init__(self): + pass + + def __call__(self, results): + results['img'] = Image.fromarray(results['img']) + return results + + +@PIPELINES.register_module() +class ToNumpy(object): + + def __init__(self): + pass + + def __call__(self, results): + results['img'] = np.array(results['img'], dtype=np.float32) + return results + + +@PIPELINES.register_module() +class Collect(object): + """Collect data from the loader relevant to the specific task. + + This is usually the last stage of the data loader pipeline. Typically keys + is set to some subset of "img" and "gt_label". + + Args: + keys (Sequence[str]): Keys of results to be collected in ``data``. + meta_keys (Sequence[str], optional): Meta keys to be converted to + ``mmcv.DataContainer`` and collected in ``data[img_metas]``. + Default: ('filename', 'ori_shape', 'img_shape', 'flip', + 'flip_direction', 'img_norm_cfg') + + Returns: + dict: The result dict contains the following keys + + - keys in ``self.keys`` + - ``img_metas`` if available + """ + + def __init__(self, + keys, + meta_keys=('filename', 'ori_filename', 'ori_shape', + 'img_shape', 'flip', 'flip_direction', + 'img_norm_cfg')): + self.keys = keys + self.meta_keys = meta_keys + + def __call__(self, results): + data = {} + img_meta = {} + for key in self.meta_keys: + if key in results: + img_meta[key] = results[key] + data['img_metas'] = DC(img_meta, cpu_only=True) + for key in self.keys: + data[key] = results[key] + return data + + def __repr__(self): + return self.__class__.__name__ + \ + f'(keys={self.keys}, meta_keys={self.meta_keys})' + + +@PIPELINES.register_module() +class WrapFieldsToLists(object): + """Wrap fields of the data dictionary into lists for evaluation. + + This class can be used as a last step of a test or validation + pipeline for single image evaluation or inference. + + Example: + >>> test_pipeline = [ + >>> dict(type='LoadImageFromFile'), + >>> dict(type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + >>> dict(type='ImageToTensor', keys=['img']), + >>> dict(type='Collect', keys=['img']), + >>> dict(type='WrapIntoLists') + >>> ] + """ + + def __call__(self, results): + # Wrap dict fields into lists + for key, val in results.items(): + results[key] = [val] + return results + + def __repr__(self): + return f'{self.__class__.__name__}()' diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/pipelines/loading.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/pipelines/loading.py new file mode 100644 index 0000000000..b5d8e95d76 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/pipelines/loading.py @@ -0,0 +1,70 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp + +import mmcv +import numpy as np + +from ..builder import PIPELINES + + +@PIPELINES.register_module() +class LoadImageFromFile(object): + """Load an image from file. + + Required keys are "img_prefix" and "img_info" (a dict that must contain the + key "filename"). Added or updated keys are "filename", "img", "img_shape", + "ori_shape" (same as `img_shape`) and "img_norm_cfg" (means=0 and stds=1). + + Args: + to_float32 (bool): Whether to convert the loaded image to a float32 + numpy array. If set to False, the loaded image is an uint8 array. + Defaults to False. + color_type (str): The flag argument for :func:`mmcv.imfrombytes()`. + Defaults to 'color'. + file_client_args (dict): Arguments to instantiate a FileClient. + See :class:`mmcv.fileio.FileClient` for details. + Defaults to ``dict(backend='disk')``. + """ + + def __init__(self, + to_float32=False, + color_type='color', + file_client_args=dict(backend='disk')): + self.to_float32 = to_float32 + self.color_type = color_type + self.file_client_args = file_client_args.copy() + self.file_client = None + + def __call__(self, results): + if self.file_client is None: + self.file_client = mmcv.FileClient(**self.file_client_args) + + if results['img_prefix'] is not None: + filename = osp.join(results['img_prefix'], + results['img_info']['filename']) + else: + filename = results['img_info']['filename'] + + img_bytes = self.file_client.get(filename) + img = mmcv.imfrombytes(img_bytes, flag=self.color_type) + if self.to_float32: + img = img.astype(np.float32) + + results['filename'] = filename + results['ori_filename'] = results['img_info']['filename'] + results['img'] = img + results['img_shape'] = img.shape + results['ori_shape'] = img.shape + num_channels = 1 if len(img.shape) < 3 else img.shape[2] + results['img_norm_cfg'] = dict( + mean=np.zeros(num_channels, dtype=np.float32), + std=np.ones(num_channels, dtype=np.float32), + to_rgb=False) + return results + + def __repr__(self): + repr_str = (f'{self.__class__.__name__}(' + f'to_float32={self.to_float32}, ' + f"color_type='{self.color_type}', " + f'file_client_args={self.file_client_args})') + return repr_str diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/pipelines/transforms.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/pipelines/transforms.py new file mode 100644 index 0000000000..d864ae81be --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/pipelines/transforms.py @@ -0,0 +1,1065 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import inspect +import math +import random +from numbers import Number +from typing import Sequence + +import mmcv +import numpy as np + +from ..builder import PIPELINES +from .compose import Compose + +try: + import albumentations +except ImportError: + albumentations = None + + +@PIPELINES.register_module() +class RandomCrop(object): + """Crop the given Image at a random location. + + Args: + size (sequence or int): Desired output size of the crop. If size is an + int instead of sequence like (h, w), a square crop (size, size) is + made. + padding (int or sequence, optional): Optional padding on each border + of the image. If a sequence of length 4 is provided, it is used to + pad left, top, right, bottom borders respectively. If a sequence + of length 2 is provided, it is used to pad left/right, top/bottom + borders, respectively. Default: None, which means no padding. + pad_if_needed (boolean): It will pad the image if smaller than the + desired size to avoid raising an exception. Since cropping is done + after padding, the padding seems to be done at a random offset. + Default: False. + pad_val (Number | Sequence[Number]): Pixel pad_val value for constant + fill. If a tuple of length 3, it is used to pad_val R, G, B + channels respectively. Default: 0. + padding_mode (str): Type of padding. Defaults to "constant". Should + be one of the following: + + - constant: Pads with a constant value, this value is specified \ + with pad_val. + - edge: pads with the last value at the edge of the image. + - reflect: Pads with reflection of image without repeating the \ + last value on the edge. For example, padding [1, 2, 3, 4] \ + with 2 elements on both sides in reflect mode will result \ + in [3, 2, 1, 2, 3, 4, 3, 2]. + - symmetric: Pads with reflection of image repeating the last \ + value on the edge. For example, padding [1, 2, 3, 4] with \ + 2 elements on both sides in symmetric mode will result in \ + [2, 1, 1, 2, 3, 4, 4, 3]. + """ + + def __init__(self, + size, + padding=None, + pad_if_needed=False, + pad_val=0, + padding_mode='constant'): + if isinstance(size, (tuple, list)): + self.size = size + else: + self.size = (size, size) + # check padding mode + assert padding_mode in ['constant', 'edge', 'reflect', 'symmetric'] + self.padding = padding + self.pad_if_needed = pad_if_needed + self.pad_val = pad_val + self.padding_mode = padding_mode + + @staticmethod + def get_params(img, output_size): + """Get parameters for ``crop`` for a random crop. + + Args: + img (ndarray): Image to be cropped. + output_size (tuple): Expected output size of the crop. + + Returns: + tuple: Params (xmin, ymin, target_height, target_width) to be + passed to ``crop`` for random crop. + """ + height = img.shape[0] + width = img.shape[1] + target_height, target_width = output_size + if width == target_width and height == target_height: + return 0, 0, height, width + + ymin = random.randint(0, height - target_height) + xmin = random.randint(0, width - target_width) + return ymin, xmin, target_height, target_width + + def __call__(self, results): + """ + Args: + img (ndarray): Image to be cropped. + """ + for key in results.get('img_fields', ['img']): + img = results[key] + if self.padding is not None: + img = mmcv.impad( + img, padding=self.padding, pad_val=self.pad_val) + + # pad the height if needed + if self.pad_if_needed and img.shape[0] < self.size[0]: + img = mmcv.impad( + img, + padding=(0, self.size[0] - img.shape[0], 0, + self.size[0] - img.shape[0]), + pad_val=self.pad_val, + padding_mode=self.padding_mode) + + # pad the width if needed + if self.pad_if_needed and img.shape[1] < self.size[1]: + img = mmcv.impad( + img, + padding=(self.size[1] - img.shape[1], 0, + self.size[1] - img.shape[1], 0), + pad_val=self.pad_val, + padding_mode=self.padding_mode) + + ymin, xmin, height, width = self.get_params(img, self.size) + results[key] = mmcv.imcrop( + img, + np.array([ + xmin, + ymin, + xmin + width - 1, + ymin + height - 1, + ])) + return results + + def __repr__(self): + return (self.__class__.__name__ + + f'(size={self.size}, padding={self.padding})') + + +@PIPELINES.register_module() +class RandomResizedCrop(object): + """Crop the given image to random size and aspect ratio. + + A crop of random size (default: of 0.08 to 1.0) of the original size and a + random aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio + is made. This crop is finally resized to given size. + + Args: + size (sequence | int): Desired output size of the crop. If size is an + int instead of sequence like (h, w), a square crop (size, size) is + made. + scale (tuple): Range of the random size of the cropped image compared + to the original image. Defaults to (0.08, 1.0). + ratio (tuple): Range of the random aspect ratio of the cropped image + compared to the original image. Defaults to (3. / 4., 4. / 3.). + max_attempts (int): Maximum number of attempts before falling back to + Central Crop. Defaults to 10. + efficientnet_style (bool): Whether to use efficientnet style Random + ResizedCrop. Defaults to False. + min_covered (Number): Minimum ratio of the cropped area to the original + area. Only valid if efficientnet_style is true. Defaults to 0.1. + crop_padding (int): The crop padding parameter in efficientnet style + center crop. Only valid if efficientnet_style is true. + Defaults to 32. + interpolation (str): Interpolation method, accepted values are + 'nearest', 'bilinear', 'bicubic', 'area', 'lanczos'. Defaults to + 'bilinear'. + backend (str): The image resize backend type, accepted values are + `cv2` and `pillow`. Defaults to `cv2`. + """ + + def __init__(self, + size, + scale=(0.08, 1.0), + ratio=(3. / 4., 4. / 3.), + max_attempts=10, + efficientnet_style=False, + min_covered=0.1, + crop_padding=32, + interpolation='bilinear', + backend='cv2'): + if efficientnet_style: + assert isinstance(size, int) + self.size = (size, size) + assert crop_padding >= 0 + else: + if isinstance(size, (tuple, list)): + self.size = size + else: + self.size = (size, size) + if (scale[0] > scale[1]) or (ratio[0] > ratio[1]): + raise ValueError('range should be of kind (min, max). ' + f'But received scale {scale} and rato {ratio}.') + assert min_covered >= 0, 'min_covered should be no less than 0.' + assert isinstance(max_attempts, int) and max_attempts >= 0, \ + 'max_attempts mush be int and no less than 0.' + assert interpolation in ('nearest', 'bilinear', 'bicubic', 'area', + 'lanczos') + if backend not in ['cv2', 'pillow']: + raise ValueError(f'backend: {backend} is not supported for resize.' + 'Supported backends are "cv2", "pillow"') + + self.scale = scale + self.ratio = ratio + self.max_attempts = max_attempts + self.efficientnet_style = efficientnet_style + self.min_covered = min_covered + self.crop_padding = crop_padding + self.interpolation = interpolation + self.backend = backend + + @staticmethod + def get_params(img, scale, ratio, max_attempts=10): + """Get parameters for ``crop`` for a random sized crop. + + Args: + img (ndarray): Image to be cropped. + scale (tuple): Range of the random size of the cropped image + compared to the original image size. + ratio (tuple): Range of the random aspect ratio of the cropped + image compared to the original image area. + max_attempts (int): Maximum number of attempts before falling back + to central crop. Defaults to 10. + + Returns: + tuple: Params (ymin, xmin, ymax, xmax) to be passed to `crop` for + a random sized crop. + """ + height = img.shape[0] + width = img.shape[1] + area = height * width + + for _ in range(max_attempts): + target_area = random.uniform(*scale) * area + log_ratio = (math.log(ratio[0]), math.log(ratio[1])) + aspect_ratio = math.exp(random.uniform(*log_ratio)) + + target_width = int(round(math.sqrt(target_area * aspect_ratio))) + target_height = int(round(math.sqrt(target_area / aspect_ratio))) + + if 0 < target_width <= width and 0 < target_height <= height: + ymin = random.randint(0, height - target_height) + xmin = random.randint(0, width - target_width) + ymax = ymin + target_height - 1 + xmax = xmin + target_width - 1 + return ymin, xmin, ymax, xmax + + # Fallback to central crop + in_ratio = float(width) / float(height) + if in_ratio < min(ratio): + target_width = width + target_height = int(round(target_width / min(ratio))) + elif in_ratio > max(ratio): + target_height = height + target_width = int(round(target_height * max(ratio))) + else: # whole image + target_width = width + target_height = height + ymin = (height - target_height) // 2 + xmin = (width - target_width) // 2 + ymax = ymin + target_height - 1 + xmax = xmin + target_width - 1 + return ymin, xmin, ymax, xmax + + # https://github.com/kakaobrain/fast-autoaugment/blob/master/FastAutoAugment/data.py # noqa + @staticmethod + def get_params_efficientnet_style(img, + size, + scale, + ratio, + max_attempts=10, + min_covered=0.1, + crop_padding=32): + """Get parameters for ``crop`` for a random sized crop in efficientnet + style. + + Args: + img (ndarray): Image to be cropped. + size (sequence): Desired output size of the crop. + scale (tuple): Range of the random size of the cropped image + compared to the original image size. + ratio (tuple): Range of the random aspect ratio of the cropped + image compared to the original image area. + max_attempts (int): Maximum number of attempts before falling back + to central crop. Defaults to 10. + min_covered (Number): Minimum ratio of the cropped area to the + original area. Only valid if efficientnet_style is true. + Defaults to 0.1. + crop_padding (int): The crop padding parameter in efficientnet + style center crop. Defaults to 32. + + Returns: + tuple: Params (ymin, xmin, ymax, xmax) to be passed to `crop` for + a random sized crop. + """ + height, width = img.shape[:2] + area = height * width + min_target_area = scale[0] * area + max_target_area = scale[1] * area + + for _ in range(max_attempts): + aspect_ratio = random.uniform(*ratio) + min_target_height = int( + round(math.sqrt(min_target_area / aspect_ratio))) + max_target_height = int( + round(math.sqrt(max_target_area / aspect_ratio))) + + if max_target_height * aspect_ratio > width: + max_target_height = int((width + 0.5 - 1e-7) / aspect_ratio) + if max_target_height * aspect_ratio > width: + max_target_height -= 1 + + max_target_height = min(max_target_height, height) + min_target_height = min(max_target_height, min_target_height) + + # slightly differs from tf implementation + target_height = int( + round(random.uniform(min_target_height, max_target_height))) + target_width = int(round(target_height * aspect_ratio)) + target_area = target_height * target_width + + # slight differs from tf. In tf, if target_area > max_target_area, + # area will be recalculated + if (target_area < min_target_area or target_area > max_target_area + or target_width > width or target_height > height + or target_area < min_covered * area): + continue + + ymin = random.randint(0, height - target_height) + xmin = random.randint(0, width - target_width) + ymax = ymin + target_height - 1 + xmax = xmin + target_width - 1 + + return ymin, xmin, ymax, xmax + + # Fallback to central crop + img_short = min(height, width) + crop_size = size[0] / (size[0] + crop_padding) * img_short + + ymin = max(0, int(round((height - crop_size) / 2.))) + xmin = max(0, int(round((width - crop_size) / 2.))) + ymax = min(height, ymin + crop_size) - 1 + xmax = min(width, xmin + crop_size) - 1 + + return ymin, xmin, ymax, xmax + + def __call__(self, results): + for key in results.get('img_fields', ['img']): + img = results[key] + if self.efficientnet_style: + get_params_func = self.get_params_efficientnet_style + get_params_args = dict( + img=img, + size=self.size, + scale=self.scale, + ratio=self.ratio, + max_attempts=self.max_attempts, + min_covered=self.min_covered, + crop_padding=self.crop_padding) + else: + get_params_func = self.get_params + get_params_args = dict( + img=img, + scale=self.scale, + ratio=self.ratio, + max_attempts=self.max_attempts) + ymin, xmin, ymax, xmax = get_params_func(**get_params_args) + img = mmcv.imcrop(img, bboxes=np.array([xmin, ymin, xmax, ymax])) + results[key] = mmcv.imresize( + img, + tuple(self.size[::-1]), + interpolation=self.interpolation, + backend=self.backend) + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + f'(size={self.size}' + repr_str += f', scale={tuple(round(s, 4) for s in self.scale)}' + repr_str += f', ratio={tuple(round(r, 4) for r in self.ratio)}' + repr_str += f', max_attempts={self.max_attempts}' + repr_str += f', efficientnet_style={self.efficientnet_style}' + repr_str += f', min_covered={self.min_covered}' + repr_str += f', crop_padding={self.crop_padding}' + repr_str += f', interpolation={self.interpolation}' + repr_str += f', backend={self.backend})' + return repr_str + + +@PIPELINES.register_module() +class RandomGrayscale(object): + """Randomly convert image to grayscale with a probability of gray_prob. + + Args: + gray_prob (float): Probability that image should be converted to + grayscale. Default: 0.1. + + Returns: + ndarray: Image after randomly grayscale transform. + + Notes: + - If input image is 1 channel: grayscale version is 1 channel. + - If input image is 3 channel: grayscale version is 3 channel + with r == g == b. + """ + + def __init__(self, gray_prob=0.1): + self.gray_prob = gray_prob + + def __call__(self, results): + """ + Args: + img (ndarray): Image to be converted to grayscale. + + Returns: + ndarray: Randomly grayscaled image. + """ + for key in results.get('img_fields', ['img']): + img = results[key] + num_output_channels = img.shape[2] + if random.random() < self.gray_prob: + if num_output_channels > 1: + img = mmcv.rgb2gray(img)[:, :, None] + results[key] = np.dstack( + [img for _ in range(num_output_channels)]) + return results + results[key] = img + return results + + def __repr__(self): + return self.__class__.__name__ + f'(gray_prob={self.gray_prob})' + + +@PIPELINES.register_module() +class RandomFlip(object): + """Flip the image randomly. + + Flip the image randomly based on flip probaility and flip direction. + + Args: + flip_prob (float): probability of the image being flipped. Default: 0.5 + direction (str): The flipping direction. Options are + 'horizontal' and 'vertical'. Default: 'horizontal'. + """ + + def __init__(self, flip_prob=0.5, direction='horizontal'): + assert 0 <= flip_prob <= 1 + assert direction in ['horizontal', 'vertical'] + self.flip_prob = flip_prob + self.direction = direction + + def __call__(self, results): + """Call function to flip image. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: Flipped results, 'flip', 'flip_direction' keys are added into + result dict. + """ + flip = True if np.random.rand() < self.flip_prob else False + results['flip'] = flip + results['flip_direction'] = self.direction + if results['flip']: + # flip image + for key in results.get('img_fields', ['img']): + results[key] = mmcv.imflip( + results[key], direction=results['flip_direction']) + return results + + def __repr__(self): + return self.__class__.__name__ + f'(flip_prob={self.flip_prob})' + + +@PIPELINES.register_module() +class RandomErasing(object): + """Randomly selects a rectangle region in an image and erase pixels. + + Args: + erase_prob (float): Probability that image will be randomly erased. + Default: 0.5 + min_area_ratio (float): Minimum erased area / input image area + Default: 0.02 + max_area_ratio (float): Maximum erased area / input image area + Default: 0.4 + aspect_range (sequence | float): Aspect ratio range of erased area. + if float, it will be converted to (aspect_ratio, 1/aspect_ratio) + Default: (3/10, 10/3) + mode (str): Fill method in erased area, can be: + + - const (default): All pixels are assign with the same value. + - rand: each pixel is assigned with a random value in [0, 255] + + fill_color (sequence | Number): Base color filled in erased area. + Defaults to (128, 128, 128). + fill_std (sequence | Number, optional): If set and ``mode`` is 'rand', + fill erased area with random color from normal distribution + (mean=fill_color, std=fill_std); If not set, fill erased area with + random color from uniform distribution (0~255). Defaults to None. + + Note: + See `Random Erasing Data Augmentation + `_ + + This paper provided 4 modes: RE-R, RE-M, RE-0, RE-255, and use RE-M as + default. The config of these 4 modes are: + + - RE-R: RandomErasing(mode='rand') + - RE-M: RandomErasing(mode='const', fill_color=(123.67, 116.3, 103.5)) + - RE-0: RandomErasing(mode='const', fill_color=0) + - RE-255: RandomErasing(mode='const', fill_color=255) + """ + + def __init__(self, + erase_prob=0.5, + min_area_ratio=0.02, + max_area_ratio=0.4, + aspect_range=(3 / 10, 10 / 3), + mode='const', + fill_color=(128, 128, 128), + fill_std=None): + assert isinstance(erase_prob, float) and 0. <= erase_prob <= 1. + assert isinstance(min_area_ratio, float) and 0. <= min_area_ratio <= 1. + assert isinstance(max_area_ratio, float) and 0. <= max_area_ratio <= 1. + assert min_area_ratio <= max_area_ratio, \ + 'min_area_ratio should be smaller than max_area_ratio' + if isinstance(aspect_range, float): + aspect_range = min(aspect_range, 1 / aspect_range) + aspect_range = (aspect_range, 1 / aspect_range) + assert isinstance(aspect_range, Sequence) and len(aspect_range) == 2 \ + and all(isinstance(x, float) for x in aspect_range), \ + 'aspect_range should be a float or Sequence with two float.' + assert all(x > 0 for x in aspect_range), \ + 'aspect_range should be positive.' + assert aspect_range[0] <= aspect_range[1], \ + 'In aspect_range (min, max), min should be smaller than max.' + assert mode in ['const', 'rand'] + if isinstance(fill_color, Number): + fill_color = [fill_color] * 3 + assert isinstance(fill_color, Sequence) and len(fill_color) == 3 \ + and all(isinstance(x, Number) for x in fill_color), \ + 'fill_color should be a float or Sequence with three int.' + if fill_std is not None: + if isinstance(fill_std, Number): + fill_std = [fill_std] * 3 + assert isinstance(fill_std, Sequence) and len(fill_std) == 3 \ + and all(isinstance(x, Number) for x in fill_std), \ + 'fill_std should be a float or Sequence with three int.' + + self.erase_prob = erase_prob + self.min_area_ratio = min_area_ratio + self.max_area_ratio = max_area_ratio + self.aspect_range = aspect_range + self.mode = mode + self.fill_color = fill_color + self.fill_std = fill_std + + def _fill_pixels(self, img, top, left, h, w): + if self.mode == 'const': + patch = np.empty((h, w, 3), dtype=np.uint8) + patch[:, :] = np.array(self.fill_color, dtype=np.uint8) + elif self.fill_std is None: + # Uniform distribution + patch = np.random.uniform(0, 256, (h, w, 3)).astype(np.uint8) + else: + # Normal distribution + patch = np.random.normal(self.fill_color, self.fill_std, (h, w, 3)) + patch = np.clip(patch.astype(np.int32), 0, 255).astype(np.uint8) + + img[top:top + h, left:left + w] = patch + return img + + def __call__(self, results): + """ + Args: + results (dict): Results dict from pipeline + + Returns: + dict: Results after the transformation. + """ + for key in results.get('img_fields', ['img']): + if np.random.rand() > self.erase_prob: + continue + img = results[key] + img_h, img_w = img.shape[:2] + + # convert to log aspect to ensure equal probability of aspect ratio + log_aspect_range = np.log( + np.array(self.aspect_range, dtype=np.float32)) + aspect_ratio = np.exp(np.random.uniform(*log_aspect_range)) + area = img_h * img_w + area *= np.random.uniform(self.min_area_ratio, self.max_area_ratio) + + h = min(int(round(np.sqrt(area * aspect_ratio))), img_h) + w = min(int(round(np.sqrt(area / aspect_ratio))), img_w) + top = np.random.randint(0, img_h - h) if img_h > h else 0 + left = np.random.randint(0, img_w - w) if img_w > w else 0 + img = self._fill_pixels(img, top, left, h, w) + + results[key] = img + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(erase_prob={self.erase_prob}, ' + repr_str += f'min_area_ratio={self.min_area_ratio}, ' + repr_str += f'max_area_ratio={self.max_area_ratio}, ' + repr_str += f'aspect_range={self.aspect_range}, ' + repr_str += f'mode={self.mode}, ' + repr_str += f'fill_color={self.fill_color}, ' + repr_str += f'fill_std={self.fill_std})' + return repr_str + + +@PIPELINES.register_module() +class Resize(object): + """Resize images. + + Args: + size (int | tuple): Images scales for resizing (h, w). + When size is int, the default behavior is to resize an image + to (size, size). When size is tuple and the second value is -1, + the short edge of an image is resized to its first value. + For example, when size is 224, the image is resized to 224x224. + When size is (224, -1), the short side is resized to 224 and the + other side is computed based on the short side, maintaining the + aspect ratio. + interpolation (str): Interpolation method, accepted values are + "nearest", "bilinear", "bicubic", "area", "lanczos". + More details can be found in `mmcv.image.geometric`. + backend (str): The image resize backend type, accepted values are + `cv2` and `pillow`. Default: `cv2`. + """ + + def __init__(self, size, interpolation='bilinear', backend='cv2'): + assert isinstance(size, int) or (isinstance(size, tuple) + and len(size) == 2) + self.resize_w_short_side = False + if isinstance(size, int): + assert size > 0 + size = (size, size) + else: + assert size[0] > 0 and (size[1] > 0 or size[1] == -1) + if size[1] == -1: + self.resize_w_short_side = True + assert interpolation in ('nearest', 'bilinear', 'bicubic', 'area', + 'lanczos') + if backend not in ['cv2', 'pillow']: + raise ValueError(f'backend: {backend} is not supported for resize.' + 'Supported backends are "cv2", "pillow"') + + self.size = size + self.interpolation = interpolation + self.backend = backend + + def _resize_img(self, results): + for key in results.get('img_fields', ['img']): + img = results[key] + ignore_resize = False + if self.resize_w_short_side: + h, w = img.shape[:2] + short_side = self.size[0] + if (w <= h and w == short_side) or (h <= w + and h == short_side): + ignore_resize = True + else: + if w < h: + width = short_side + height = int(short_side * h / w) + else: + height = short_side + width = int(short_side * w / h) + else: + height, width = self.size + if not ignore_resize: + img = mmcv.imresize( + img, + size=(width, height), + interpolation=self.interpolation, + return_scale=False, + backend=self.backend) + results[key] = img + results['img_shape'] = img.shape + + def __call__(self, results): + self._resize_img(results) + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(size={self.size}, ' + repr_str += f'interpolation={self.interpolation})' + return repr_str + + +@PIPELINES.register_module() +class CenterCrop(object): + r"""Center crop the image. + + Args: + crop_size (int | tuple): Expected size after cropping with the format + of (h, w). + efficientnet_style (bool): Whether to use efficientnet style center + crop. Defaults to False. + crop_padding (int): The crop padding parameter in efficientnet style + center crop. Only valid if efficientnet style is True. Defaults to + 32. + interpolation (str): Interpolation method, accepted values are + 'nearest', 'bilinear', 'bicubic', 'area', 'lanczos'. Only valid if + ``efficientnet_style`` is True. Defaults to 'bilinear'. + backend (str): The image resize backend type, accepted values are + `cv2` and `pillow`. Only valid if efficientnet style is True. + Defaults to `cv2`. + + + Notes: + - If the image is smaller than the crop size, return the original + image. + - If efficientnet_style is set to False, the pipeline would be a simple + center crop using the crop_size. + - If efficientnet_style is set to True, the pipeline will be to first + to perform the center crop with the ``crop_size_`` as: + + .. math:: + \text{crop\_size\_} = \frac{\text{crop\_size}}{\text{crop\_size} + + \text{crop\_padding}} \times \text{short\_edge} + + And then the pipeline resizes the img to the input crop size. + """ + + def __init__(self, + crop_size, + efficientnet_style=False, + crop_padding=32, + interpolation='bilinear', + backend='cv2'): + if efficientnet_style: + assert isinstance(crop_size, int) + assert crop_padding >= 0 + assert interpolation in ('nearest', 'bilinear', 'bicubic', 'area', + 'lanczos') + if backend not in ['cv2', 'pillow']: + raise ValueError( + f'backend: {backend} is not supported for ' + 'resize. Supported backends are "cv2", "pillow"') + else: + assert isinstance(crop_size, int) or (isinstance(crop_size, tuple) + and len(crop_size) == 2) + if isinstance(crop_size, int): + crop_size = (crop_size, crop_size) + assert crop_size[0] > 0 and crop_size[1] > 0 + self.crop_size = crop_size + self.efficientnet_style = efficientnet_style + self.crop_padding = crop_padding + self.interpolation = interpolation + self.backend = backend + + def __call__(self, results): + crop_height, crop_width = self.crop_size[0], self.crop_size[1] + for key in results.get('img_fields', ['img']): + img = results[key] + # img.shape has length 2 for grayscale, length 3 for color + img_height, img_width = img.shape[:2] + + # https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/preprocessing.py#L118 # noqa + if self.efficientnet_style: + img_short = min(img_height, img_width) + crop_height = crop_height / (crop_height + + self.crop_padding) * img_short + crop_width = crop_width / (crop_width + + self.crop_padding) * img_short + + y1 = max(0, int(round((img_height - crop_height) / 2.))) + x1 = max(0, int(round((img_width - crop_width) / 2.))) + y2 = min(img_height, y1 + crop_height) - 1 + x2 = min(img_width, x1 + crop_width) - 1 + + # crop the image + img = mmcv.imcrop(img, bboxes=np.array([x1, y1, x2, y2])) + + if self.efficientnet_style: + img = mmcv.imresize( + img, + tuple(self.crop_size[::-1]), + interpolation=self.interpolation, + backend=self.backend) + img_shape = img.shape + results[key] = img + results['img_shape'] = img_shape + + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + f'(crop_size={self.crop_size}' + repr_str += f', efficientnet_style={self.efficientnet_style}' + repr_str += f', crop_padding={self.crop_padding}' + repr_str += f', interpolation={self.interpolation}' + repr_str += f', backend={self.backend})' + return repr_str + + +@PIPELINES.register_module() +class Normalize(object): + """Normalize the image. + + Args: + mean (sequence): Mean values of 3 channels. + std (sequence): Std values of 3 channels. + to_rgb (bool): Whether to convert the image from BGR to RGB, + default is true. + """ + + def __init__(self, mean, std, to_rgb=True): + self.mean = np.array(mean, dtype=np.float32) + self.std = np.array(std, dtype=np.float32) + self.to_rgb = to_rgb + + def __call__(self, results): + for key in results.get('img_fields', ['img']): + results[key] = mmcv.imnormalize(results[key], self.mean, self.std, + self.to_rgb) + results['img_norm_cfg'] = dict( + mean=self.mean, std=self.std, to_rgb=self.to_rgb) + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(mean={list(self.mean)}, ' + repr_str += f'std={list(self.std)}, ' + repr_str += f'to_rgb={self.to_rgb})' + return repr_str + + +@PIPELINES.register_module() +class ColorJitter(object): + """Randomly change the brightness, contrast and saturation of an image. + + Args: + brightness (float): How much to jitter brightness. + brightness_factor is chosen uniformly from + [max(0, 1 - brightness), 1 + brightness]. + contrast (float): How much to jitter contrast. + contrast_factor is chosen uniformly from + [max(0, 1 - contrast), 1 + contrast]. + saturation (float): How much to jitter saturation. + saturation_factor is chosen uniformly from + [max(0, 1 - saturation), 1 + saturation]. + """ + + def __init__(self, brightness, contrast, saturation): + self.brightness = brightness + self.contrast = contrast + self.saturation = saturation + + def __call__(self, results): + brightness_factor = random.uniform(0, self.brightness) + contrast_factor = random.uniform(0, self.contrast) + saturation_factor = random.uniform(0, self.saturation) + color_jitter_transforms = [ + dict( + type='Brightness', + magnitude=brightness_factor, + prob=1., + random_negative_prob=0.5), + dict( + type='Contrast', + magnitude=contrast_factor, + prob=1., + random_negative_prob=0.5), + dict( + type='ColorTransform', + magnitude=saturation_factor, + prob=1., + random_negative_prob=0.5) + ] + random.shuffle(color_jitter_transforms) + transform = Compose(color_jitter_transforms) + return transform(results) + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(brightness={self.brightness}, ' + repr_str += f'contrast={self.contrast}, ' + repr_str += f'saturation={self.saturation})' + return repr_str + + +@PIPELINES.register_module() +class Lighting(object): + """Adjust images lighting using AlexNet-style PCA jitter. + + Args: + eigval (list): the eigenvalue of the convariance matrix of pixel + values, respectively. + eigvec (list[list]): the eigenvector of the convariance matrix of pixel + values, respectively. + alphastd (float): The standard deviation for distribution of alpha. + Defaults to 0.1 + to_rgb (bool): Whether to convert img to rgb. + """ + + def __init__(self, eigval, eigvec, alphastd=0.1, to_rgb=True): + assert isinstance(eigval, list), \ + f'eigval must be of type list, got {type(eigval)} instead.' + assert isinstance(eigvec, list), \ + f'eigvec must be of type list, got {type(eigvec)} instead.' + for vec in eigvec: + assert isinstance(vec, list) and len(vec) == len(eigvec[0]), \ + 'eigvec must contains lists with equal length.' + self.eigval = np.array(eigval) + self.eigvec = np.array(eigvec) + self.alphastd = alphastd + self.to_rgb = to_rgb + + def __call__(self, results): + for key in results.get('img_fields', ['img']): + img = results[key] + results[key] = mmcv.adjust_lighting( + img, + self.eigval, + self.eigvec, + alphastd=self.alphastd, + to_rgb=self.to_rgb) + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(eigval={self.eigval.tolist()}, ' + repr_str += f'eigvec={self.eigvec.tolist()}, ' + repr_str += f'alphastd={self.alphastd}, ' + repr_str += f'to_rgb={self.to_rgb})' + return repr_str + + +@PIPELINES.register_module() +class Albu(object): + """Albumentation augmentation. + + Adds custom transformations from Albumentations library. + Please, visit `https://albumentations.readthedocs.io` + to get more information. + An example of ``transforms`` is as followed: + + .. code-block:: + [ + dict( + type='ShiftScaleRotate', + shift_limit=0.0625, + scale_limit=0.0, + rotate_limit=0, + interpolation=1, + p=0.5), + dict( + type='RandomBrightnessContrast', + brightness_limit=[0.1, 0.3], + contrast_limit=[0.1, 0.3], + p=0.2), + dict(type='ChannelShuffle', p=0.1), + dict( + type='OneOf', + transforms=[ + dict(type='Blur', blur_limit=3, p=1.0), + dict(type='MedianBlur', blur_limit=3, p=1.0) + ], + p=0.1), + ] + + Args: + transforms (list[dict]): A list of albu transformations + keymap (dict): Contains {'input key':'albumentation-style key'} + """ + + def __init__(self, transforms, keymap=None, update_pad_shape=False): + if albumentations is None: + raise RuntimeError('albumentations is not installed') + else: + from albumentations import Compose + + self.transforms = transforms + self.filter_lost_elements = False + self.update_pad_shape = update_pad_shape + + self.aug = Compose([self.albu_builder(t) for t in self.transforms]) + + if not keymap: + self.keymap_to_albu = { + 'img': 'image', + } + else: + self.keymap_to_albu = keymap + self.keymap_back = {v: k for k, v in self.keymap_to_albu.items()} + + def albu_builder(self, cfg): + """Import a module from albumentations. + + It inherits some of :func:`build_from_cfg` logic. + Args: + cfg (dict): Config dict. It should at least contain the key "type". + Returns: + obj: The constructed object. + """ + + assert isinstance(cfg, dict) and 'type' in cfg + args = cfg.copy() + + obj_type = args.pop('type') + if mmcv.is_str(obj_type): + if albumentations is None: + raise RuntimeError('albumentations is not installed') + obj_cls = getattr(albumentations, obj_type) + elif inspect.isclass(obj_type): + obj_cls = obj_type + else: + raise TypeError( + f'type must be a str or valid type, but got {type(obj_type)}') + + if 'transforms' in args: + args['transforms'] = [ + self.albu_builder(transform) + for transform in args['transforms'] + ] + + return obj_cls(**args) + + @staticmethod + def mapper(d, keymap): + """Dictionary mapper. + + Renames keys according to keymap provided. + Args: + d (dict): old dict + keymap (dict): {'old_key':'new_key'} + Returns: + dict: new dict. + """ + + updated_dict = {} + for k, v in zip(d.keys(), d.values()): + new_k = keymap.get(k, k) + updated_dict[new_k] = d[k] + return updated_dict + + def __call__(self, results): + # dict to albumentations format + results = self.mapper(results, self.keymap_to_albu) + + results = self.aug(**results) + + if 'gt_labels' in results: + if isinstance(results['gt_labels'], list): + results['gt_labels'] = np.array(results['gt_labels']) + results['gt_labels'] = results['gt_labels'].astype(np.int64) + + # back to the original format + results = self.mapper(results, self.keymap_back) + + # update final shape + if self.update_pad_shape: + results['pad_shape'] = results['img'].shape + + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + f'(transforms={self.transforms})' + return repr_str diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/samplers/__init__.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/samplers/__init__.py new file mode 100644 index 0000000000..da09effaf2 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/samplers/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .distributed_sampler import DistributedSampler + +__all__ = ['DistributedSampler'] diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/samplers/distributed_sampler.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/samplers/distributed_sampler.py new file mode 100644 index 0000000000..4d10f7f2a0 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/samplers/distributed_sampler.py @@ -0,0 +1,43 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from torch.utils.data import DistributedSampler as _DistributedSampler + + +class DistributedSampler(_DistributedSampler): + + def __init__(self, + dataset, + num_replicas=None, + rank=None, + shuffle=True, + round_up=True): + super().__init__(dataset, num_replicas=num_replicas, rank=rank) + self.shuffle = shuffle + self.round_up = round_up + if self.round_up: + self.total_size = self.num_samples * self.num_replicas + else: + self.total_size = len(self.dataset) + + def __iter__(self): + # deterministically shuffle based on epoch + if self.shuffle: + g = torch.Generator() + g.manual_seed(self.epoch) + indices = torch.randperm(len(self.dataset), generator=g).tolist() + else: + indices = torch.arange(len(self.dataset)).tolist() + + # add extra samples to make it evenly divisible + if self.round_up: + indices = ( + indices * + int(self.total_size / len(indices) + 1))[:self.total_size] + assert len(indices) == self.total_size + + # subsample + indices = indices[self.rank:self.total_size:self.num_replicas] + if self.round_up: + assert len(indices) == self.num_samples + + return iter(indices) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/utils.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/utils.py new file mode 100644 index 0000000000..75070bc064 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/utils.py @@ -0,0 +1,153 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import gzip +import hashlib +import os +import os.path +import shutil +import tarfile +import urllib.error +import urllib.request +import zipfile + +__all__ = ['rm_suffix', 'check_integrity', 'download_and_extract_archive'] + + +def rm_suffix(s, suffix=None): + if suffix is None: + return s[:s.rfind('.')] + else: + return s[:s.rfind(suffix)] + + +def calculate_md5(fpath, chunk_size=1024 * 1024): + md5 = hashlib.md5() + with open(fpath, 'rb') as f: + for chunk in iter(lambda: f.read(chunk_size), b''): + md5.update(chunk) + return md5.hexdigest() + + +def check_md5(fpath, md5, **kwargs): + return md5 == calculate_md5(fpath, **kwargs) + + +def check_integrity(fpath, md5=None): + if not os.path.isfile(fpath): + return False + if md5 is None: + return True + return check_md5(fpath, md5) + + +def download_url_to_file(url, fpath): + with urllib.request.urlopen(url) as resp, open(fpath, 'wb') as of: + shutil.copyfileobj(resp, of) + + +def download_url(url, root, filename=None, md5=None): + """Download a file from a url and place it in root. + + Args: + url (str): URL to download file from. + root (str): Directory to place downloaded file in. + filename (str | None): Name to save the file under. + If filename is None, use the basename of the URL. + md5 (str | None): MD5 checksum of the download. + If md5 is None, download without md5 check. + """ + root = os.path.expanduser(root) + if not filename: + filename = os.path.basename(url) + fpath = os.path.join(root, filename) + + os.makedirs(root, exist_ok=True) + + if check_integrity(fpath, md5): + print(f'Using downloaded and verified file: {fpath}') + else: + try: + print(f'Downloading {url} to {fpath}') + download_url_to_file(url, fpath) + except (urllib.error.URLError, IOError) as e: + if url[:5] == 'https': + url = url.replace('https:', 'http:') + print('Failed download. Trying https -> http instead.' + f' Downloading {url} to {fpath}') + download_url_to_file(url, fpath) + else: + raise e + # check integrity of downloaded file + if not check_integrity(fpath, md5): + raise RuntimeError('File not found or corrupted.') + + +def _is_tarxz(filename): + return filename.endswith('.tar.xz') + + +def _is_tar(filename): + return filename.endswith('.tar') + + +def _is_targz(filename): + return filename.endswith('.tar.gz') + + +def _is_tgz(filename): + return filename.endswith('.tgz') + + +def _is_gzip(filename): + return filename.endswith('.gz') and not filename.endswith('.tar.gz') + + +def _is_zip(filename): + return filename.endswith('.zip') + + +def extract_archive(from_path, to_path=None, remove_finished=False): + if to_path is None: + to_path = os.path.dirname(from_path) + + if _is_tar(from_path): + with tarfile.open(from_path, 'r') as tar: + tar.extractall(path=to_path) + elif _is_targz(from_path) or _is_tgz(from_path): + with tarfile.open(from_path, 'r:gz') as tar: + tar.extractall(path=to_path) + elif _is_tarxz(from_path): + with tarfile.open(from_path, 'r:xz') as tar: + tar.extractall(path=to_path) + elif _is_gzip(from_path): + to_path = os.path.join( + to_path, + os.path.splitext(os.path.basename(from_path))[0]) + with open(to_path, 'wb') as out_f, gzip.GzipFile(from_path) as zip_f: + out_f.write(zip_f.read()) + elif _is_zip(from_path): + with zipfile.ZipFile(from_path, 'r') as z: + z.extractall(to_path) + else: + raise ValueError(f'Extraction of {from_path} not supported') + + if remove_finished: + os.remove(from_path) + + +def download_and_extract_archive(url, + download_root, + extract_root=None, + filename=None, + md5=None, + remove_finished=False): + download_root = os.path.expanduser(download_root) + if extract_root is None: + extract_root = download_root + if not filename: + filename = os.path.basename(url) + + download_url(url, download_root, filename, md5) + + archive = os.path.join(download_root, filename) + print(f'Extracting {archive} to {extract_root}') + extract_archive(archive, extract_root, remove_finished) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/voc.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/voc.py new file mode 100644 index 0000000000..be8c0a05a5 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/datasets/voc.py @@ -0,0 +1,69 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +import xml.etree.ElementTree as ET + +import mmcv +import numpy as np + +from .builder import DATASETS +from .multi_label import MultiLabelDataset + + +@DATASETS.register_module() +class VOC(MultiLabelDataset): + """`Pascal VOC `_ Dataset.""" + + CLASSES = ('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', + 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', + 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', + 'tvmonitor') + + def __init__(self, **kwargs): + super(VOC, self).__init__(**kwargs) + if 'VOC2007' in self.data_prefix: + self.year = 2007 + else: + raise ValueError('Cannot infer dataset year from img_prefix.') + + def load_annotations(self): + """Load annotations. + + Returns: + list[dict]: Annotation info from XML file. + """ + data_infos = [] + img_ids = mmcv.list_from_file(self.ann_file) + for img_id in img_ids: + filename = f'JPEGImages/{img_id}.jpg' + xml_path = osp.join(self.data_prefix, 'Annotations', + f'{img_id}.xml') + tree = ET.parse(xml_path) + root = tree.getroot() + labels = [] + labels_difficult = [] + for obj in root.findall('object'): + label_name = obj.find('name').text + # in case customized dataset has wrong labels + # or CLASSES has been override. + if label_name not in self.CLASSES: + continue + label = self.class_to_idx[label_name] + difficult = int(obj.find('difficult').text) + if difficult: + labels_difficult.append(label) + else: + labels.append(label) + + gt_label = np.zeros(len(self.CLASSES)) + # The order cannot be swapped for the case where multiple objects + # of the same kind exist and some are difficult. + gt_label[labels_difficult] = -1 + gt_label[labels] = 1 + + info = dict( + img_prefix=self.data_prefix, + img_info=dict(filename=filename), + gt_label=gt_label.astype(np.int8)) + data_infos.append(info) + + return data_infos diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/__init__.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/__init__.py new file mode 100644 index 0000000000..b501833eae --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/__init__.py @@ -0,0 +1,14 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .backbones import * # noqa: F401,F403 +from .builder import (BACKBONES, CLASSIFIERS, HEADS, LOSSES, NECKS, + build_backbone, build_classifier, build_head, build_loss, + build_neck) +from .classifiers import * # noqa: F401,F403 +from .heads import * # noqa: F401,F403 +from .losses import * # noqa: F401,F403 +from .necks import * # noqa: F401,F403 + +__all__ = [ + 'BACKBONES', 'HEADS', 'NECKS', 'LOSSES', 'CLASSIFIERS', 'build_backbone', + 'build_head', 'build_neck', 'build_loss', 'build_classifier' +] diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/__init__.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/__init__.py new file mode 100644 index 0000000000..3be2e92426 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/__init__.py @@ -0,0 +1,29 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .alexnet import AlexNet +from .lenet import LeNet5 +from .mobilenet_v2 import MobileNetV2 +from .mobilenet_v3 import MobileNetV3 +from .regnet import RegNet +from .repvgg import RepVGG +from .res2net import Res2Net +from .resnest import ResNeSt +from .resnet import ResNet, ResNetV1d +from .resnet_cifar import ResNet_CIFAR +from .resnext import ResNeXt +from .seresnet import SEResNet +from .seresnext import SEResNeXt +from .shufflenet_v1 import ShuffleNetV1 +from .shufflenet_v2 import ShuffleNetV2 +from .swin_transformer import SwinTransformer +from .t2t_vit import T2T_ViT +from .timm_backbone import TIMMBackbone +from .tnt import TNT +from .vgg import VGG +from .vision_transformer import VisionTransformer + +__all__ = [ + 'LeNet5', 'AlexNet', 'VGG', 'RegNet', 'ResNet', 'ResNeXt', 'ResNetV1d', + 'ResNeSt', 'ResNet_CIFAR', 'SEResNet', 'SEResNeXt', 'ShuffleNetV1', + 'ShuffleNetV2', 'MobileNetV2', 'MobileNetV3', 'VisionTransformer', + 'SwinTransformer', 'TNT', 'TIMMBackbone', 'T2T_ViT', 'Res2Net', 'RepVGG' +] diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/alexnet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/alexnet.py new file mode 100644 index 0000000000..1b74dc70aa --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/alexnet.py @@ -0,0 +1,56 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn + +from ..builder import BACKBONES +from .base_backbone import BaseBackbone + + +@BACKBONES.register_module() +class AlexNet(BaseBackbone): + """`AlexNet `_ backbone. + + The input for AlexNet is a 224x224 RGB image. + + Args: + num_classes (int): number of classes for classification. + The default value is -1, which uses the backbone as + a feature extractor without the top classifier. + """ + + def __init__(self, num_classes=-1): + super(AlexNet, self).__init__() + self.num_classes = num_classes + self.features = nn.Sequential( + nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2), + nn.ReLU(inplace=True), + nn.MaxPool2d(kernel_size=3, stride=2), + nn.Conv2d(64, 192, kernel_size=5, padding=2), + nn.ReLU(inplace=True), + nn.MaxPool2d(kernel_size=3, stride=2), + nn.Conv2d(192, 384, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(384, 256, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(256, 256, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + nn.MaxPool2d(kernel_size=3, stride=2), + ) + if self.num_classes > 0: + self.classifier = nn.Sequential( + nn.Dropout(), + nn.Linear(256 * 6 * 6, 4096), + nn.ReLU(inplace=True), + nn.Dropout(), + nn.Linear(4096, 4096), + nn.ReLU(inplace=True), + nn.Linear(4096, num_classes), + ) + + def forward(self, x): + + x = self.features(x) + if self.num_classes > 0: + x = x.view(x.size(0), 256 * 6 * 6) + x = self.classifier(x) + + return (x, ) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/base_backbone.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/base_backbone.py new file mode 100644 index 0000000000..c1050fab1c --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/base_backbone.py @@ -0,0 +1,33 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from abc import ABCMeta, abstractmethod + +from mmcv.runner import BaseModule + + +class BaseBackbone(BaseModule, metaclass=ABCMeta): + """Base backbone. + + This class defines the basic functions of a backbone. Any backbone that + inherits this class should at least define its own `forward` function. + """ + + def __init__(self, init_cfg=None): + super(BaseBackbone, self).__init__(init_cfg) + + @abstractmethod + def forward(self, x): + """Forward computation. + + Args: + x (tensor | tuple[tensor]): x could be a Torch.tensor or a tuple of + Torch.tensor, containing input data for forward computation. + """ + pass + + def train(self, mode=True): + """Set module status before forward computation. + + Args: + mode (bool): Whether it is train_mode or test_mode + """ + super(BaseBackbone, self).train(mode) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/lenet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/lenet.py new file mode 100644 index 0000000000..11686619eb --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/lenet.py @@ -0,0 +1,42 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn + +from ..builder import BACKBONES +from .base_backbone import BaseBackbone + + +@BACKBONES.register_module() +class LeNet5(BaseBackbone): + """`LeNet5 `_ backbone. + + The input for LeNet-5 is a 32×32 grayscale image. + + Args: + num_classes (int): number of classes for classification. + The default value is -1, which uses the backbone as + a feature extractor without the top classifier. + """ + + def __init__(self, num_classes=-1): + super(LeNet5, self).__init__() + self.num_classes = num_classes + self.features = nn.Sequential( + nn.Conv2d(1, 6, kernel_size=5, stride=1), nn.Tanh(), + nn.AvgPool2d(kernel_size=2), + nn.Conv2d(6, 16, kernel_size=5, stride=1), nn.Tanh(), + nn.AvgPool2d(kernel_size=2), + nn.Conv2d(16, 120, kernel_size=5, stride=1), nn.Tanh()) + if self.num_classes > 0: + self.classifier = nn.Sequential( + nn.Linear(120, 84), + nn.Tanh(), + nn.Linear(84, num_classes), + ) + + def forward(self, x): + + x = self.features(x) + if self.num_classes > 0: + x = self.classifier(x.squeeze()) + + return (x, ) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/mobilenet_v2.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/mobilenet_v2.py new file mode 100644 index 0000000000..8f171eda79 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/mobilenet_v2.py @@ -0,0 +1,264 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn +import torch.utils.checkpoint as cp +from mmcv.cnn import ConvModule +from mmcv.runner import BaseModule +from torch.nn.modules.batchnorm import _BatchNorm + +from mmcls.models.utils import make_divisible +from ..builder import BACKBONES +from .base_backbone import BaseBackbone + + +class InvertedResidual(BaseModule): + """InvertedResidual block for MobileNetV2. + + Args: + in_channels (int): The input channels of the InvertedResidual block. + out_channels (int): The output channels of the InvertedResidual block. + stride (int): Stride of the middle (first) 3x3 convolution. + expand_ratio (int): adjusts number of channels of the hidden layer + in InvertedResidual by this amount. + conv_cfg (dict, optional): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN'). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='ReLU6'). + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + + Returns: + Tensor: The output tensor + """ + + def __init__(self, + in_channels, + out_channels, + stride, + expand_ratio, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU6'), + with_cp=False, + init_cfg=None): + super(InvertedResidual, self).__init__(init_cfg) + self.stride = stride + assert stride in [1, 2], f'stride must in [1, 2]. ' \ + f'But received {stride}.' + self.with_cp = with_cp + self.use_res_connect = self.stride == 1 and in_channels == out_channels + hidden_dim = int(round(in_channels * expand_ratio)) + + layers = [] + if expand_ratio != 1: + layers.append( + ConvModule( + in_channels=in_channels, + out_channels=hidden_dim, + kernel_size=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + layers.extend([ + ConvModule( + in_channels=hidden_dim, + out_channels=hidden_dim, + kernel_size=3, + stride=stride, + padding=1, + groups=hidden_dim, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg), + ConvModule( + in_channels=hidden_dim, + out_channels=out_channels, + kernel_size=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None) + ]) + self.conv = nn.Sequential(*layers) + + def forward(self, x): + + def _inner_forward(x): + if self.use_res_connect: + return x + self.conv(x) + else: + return self.conv(x) + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + return out + + +@BACKBONES.register_module() +class MobileNetV2(BaseBackbone): + """MobileNetV2 backbone. + + Args: + widen_factor (float): Width multiplier, multiply number of + channels in each layer by this amount. Default: 1.0. + out_indices (None or Sequence[int]): Output from which stages. + Default: (7, ). + frozen_stages (int): Stages to be frozen (all param fixed). + Default: -1, which means not freezing any parameters. + conv_cfg (dict, optional): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN'). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='ReLU6'). + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + """ + + # Parameters to build layers. 4 parameters are needed to construct a + # layer, from left to right: expand_ratio, channel, num_blocks, stride. + arch_settings = [[1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 2], + [6, 64, 4, 2], [6, 96, 3, 1], [6, 160, 3, 2], + [6, 320, 1, 1]] + + def __init__(self, + widen_factor=1., + out_indices=(7, ), + frozen_stages=-1, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU6'), + norm_eval=False, + with_cp=False, + init_cfg=[ + dict(type='Kaiming', layer=['Conv2d']), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ]): + super(MobileNetV2, self).__init__(init_cfg) + self.widen_factor = widen_factor + self.out_indices = out_indices + for index in out_indices: + if index not in range(0, 8): + raise ValueError('the item in out_indices must in ' + f'range(0, 8). But received {index}') + + if frozen_stages not in range(-1, 8): + raise ValueError('frozen_stages must be in range(-1, 8). ' + f'But received {frozen_stages}') + self.out_indices = out_indices + self.frozen_stages = frozen_stages + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.norm_eval = norm_eval + self.with_cp = with_cp + + self.in_channels = make_divisible(32 * widen_factor, 8) + + self.conv1 = ConvModule( + in_channels=3, + out_channels=self.in_channels, + kernel_size=3, + stride=2, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + self.layers = [] + + for i, layer_cfg in enumerate(self.arch_settings): + expand_ratio, channel, num_blocks, stride = layer_cfg + out_channels = make_divisible(channel * widen_factor, 8) + inverted_res_layer = self.make_layer( + out_channels=out_channels, + num_blocks=num_blocks, + stride=stride, + expand_ratio=expand_ratio) + layer_name = f'layer{i + 1}' + self.add_module(layer_name, inverted_res_layer) + self.layers.append(layer_name) + + if widen_factor > 1.0: + self.out_channel = int(1280 * widen_factor) + else: + self.out_channel = 1280 + + layer = ConvModule( + in_channels=self.in_channels, + out_channels=self.out_channel, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + self.add_module('conv2', layer) + self.layers.append('conv2') + + def make_layer(self, out_channels, num_blocks, stride, expand_ratio): + """Stack InvertedResidual blocks to build a layer for MobileNetV2. + + Args: + out_channels (int): out_channels of block. + num_blocks (int): number of blocks. + stride (int): stride of the first block. Default: 1 + expand_ratio (int): Expand the number of channels of the + hidden layer in InvertedResidual by this ratio. Default: 6. + """ + layers = [] + for i in range(num_blocks): + if i >= 1: + stride = 1 + layers.append( + InvertedResidual( + self.in_channels, + out_channels, + stride, + expand_ratio=expand_ratio, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg, + with_cp=self.with_cp)) + self.in_channels = out_channels + + return nn.Sequential(*layers) + + def forward(self, x): + x = self.conv1(x) + + outs = [] + for i, layer_name in enumerate(self.layers): + layer = getattr(self, layer_name) + x = layer(x) + if i in self.out_indices: + outs.append(x) + + return tuple(outs) + + def _freeze_stages(self): + if self.frozen_stages >= 0: + for param in self.conv1.parameters(): + param.requires_grad = False + for i in range(1, self.frozen_stages + 1): + layer = getattr(self, f'layer{i}') + layer.eval() + for param in layer.parameters(): + param.requires_grad = False + + def train(self, mode=True): + super(MobileNetV2, self).train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + if isinstance(m, _BatchNorm): + m.eval() diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/mobilenet_v3.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/mobilenet_v3.py new file mode 100644 index 0000000000..b612b88781 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/mobilenet_v3.py @@ -0,0 +1,195 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmcv.cnn import ConvModule +from torch.nn.modules.batchnorm import _BatchNorm + +from ..builder import BACKBONES +from ..utils import InvertedResidual +from .base_backbone import BaseBackbone + + +@BACKBONES.register_module() +class MobileNetV3(BaseBackbone): + """MobileNetV3 backbone. + + Args: + arch (str): Architecture of mobilnetv3, from {small, large}. + Default: small. + conv_cfg (dict, optional): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN'). + out_indices (None or Sequence[int]): Output from which stages. + Default: None, which means output tensors from final stage. + frozen_stages (int): Stages to be frozen (all param fixed). + Default: -1, which means not freezing any parameters. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save + some memory while slowing down the training speed. + Default: False. + """ + # Parameters to build each block: + # [kernel size, mid channels, out channels, with_se, act type, stride] + arch_settings = { + 'small': [[3, 16, 16, True, 'ReLU', 2], + [3, 72, 24, False, 'ReLU', 2], + [3, 88, 24, False, 'ReLU', 1], + [5, 96, 40, True, 'HSwish', 2], + [5, 240, 40, True, 'HSwish', 1], + [5, 240, 40, True, 'HSwish', 1], + [5, 120, 48, True, 'HSwish', 1], + [5, 144, 48, True, 'HSwish', 1], + [5, 288, 96, True, 'HSwish', 2], + [5, 576, 96, True, 'HSwish', 1], + [5, 576, 96, True, 'HSwish', 1]], + 'large': [[3, 16, 16, False, 'ReLU', 1], + [3, 64, 24, False, 'ReLU', 2], + [3, 72, 24, False, 'ReLU', 1], + [5, 72, 40, True, 'ReLU', 2], + [5, 120, 40, True, 'ReLU', 1], + [5, 120, 40, True, 'ReLU', 1], + [3, 240, 80, False, 'HSwish', 2], + [3, 200, 80, False, 'HSwish', 1], + [3, 184, 80, False, 'HSwish', 1], + [3, 184, 80, False, 'HSwish', 1], + [3, 480, 112, True, 'HSwish', 1], + [3, 672, 112, True, 'HSwish', 1], + [5, 672, 160, True, 'HSwish', 2], + [5, 960, 160, True, 'HSwish', 1], + [5, 960, 160, True, 'HSwish', 1]] + } # yapf: disable + + def __init__(self, + arch='small', + conv_cfg=None, + norm_cfg=dict(type='BN', eps=0.001, momentum=0.01), + out_indices=None, + frozen_stages=-1, + norm_eval=False, + with_cp=False, + init_cfg=[ + dict( + type='Kaiming', + layer=['Conv2d'], + nonlinearity='leaky_relu'), + dict(type='Normal', layer=['Linear'], std=0.01), + dict(type='Constant', layer=['BatchNorm2d'], val=1) + ]): + super(MobileNetV3, self).__init__(init_cfg) + assert arch in self.arch_settings + if out_indices is None: + out_indices = (12, ) if arch == 'small' else (16, ) + for order, index in enumerate(out_indices): + if index not in range(0, len(self.arch_settings[arch]) + 2): + raise ValueError( + 'the item in out_indices must in ' + f'range(0, {len(self.arch_settings[arch]) + 2}). ' + f'But received {index}') + + if frozen_stages not in range(-1, len(self.arch_settings[arch]) + 2): + raise ValueError('frozen_stages must be in range(-1, ' + f'{len(self.arch_settings[arch]) + 2}). ' + f'But received {frozen_stages}') + self.arch = arch + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.out_indices = out_indices + self.frozen_stages = frozen_stages + self.norm_eval = norm_eval + self.with_cp = with_cp + + self.layers = self._make_layer() + self.feat_dim = self.arch_settings[arch][-1][1] + + def _make_layer(self): + layers = [] + layer_setting = self.arch_settings[self.arch] + in_channels = 16 + + layer = ConvModule( + in_channels=3, + out_channels=in_channels, + kernel_size=3, + stride=2, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=dict(type='HSwish')) + self.add_module('layer0', layer) + layers.append('layer0') + + for i, params in enumerate(layer_setting): + (kernel_size, mid_channels, out_channels, with_se, act, + stride) = params + if with_se: + se_cfg = dict( + channels=mid_channels, + ratio=4, + act_cfg=(dict(type='ReLU'), + dict( + type='HSigmoid', + bias=3, + divisor=6, + min_value=0, + max_value=1))) + else: + se_cfg = None + + layer = InvertedResidual( + in_channels=in_channels, + out_channels=out_channels, + mid_channels=mid_channels, + kernel_size=kernel_size, + stride=stride, + se_cfg=se_cfg, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=dict(type=act), + with_cp=self.with_cp) + in_channels = out_channels + layer_name = 'layer{}'.format(i + 1) + self.add_module(layer_name, layer) + layers.append(layer_name) + + # Build the last layer before pooling + # TODO: No dilation + layer = ConvModule( + in_channels=in_channels, + out_channels=576 if self.arch == 'small' else 960, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=dict(type='HSwish')) + layer_name = 'layer{}'.format(len(layer_setting) + 1) + self.add_module(layer_name, layer) + layers.append(layer_name) + + return layers + + def forward(self, x): + outs = [] + for i, layer_name in enumerate(self.layers): + layer = getattr(self, layer_name) + x = layer(x) + if i in self.out_indices: + outs.append(x) + + return tuple(outs) + + def _freeze_stages(self): + for i in range(0, self.frozen_stages + 1): + layer = getattr(self, f'layer{i}') + layer.eval() + for param in layer.parameters(): + param.requires_grad = False + + def train(self, mode=True): + super(MobileNetV3, self).train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + if isinstance(m, _BatchNorm): + m.eval() diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/regnet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/regnet.py new file mode 100644 index 0000000000..1dce86aa63 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/regnet.py @@ -0,0 +1,312 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import torch.nn as nn +from mmcv.cnn import build_conv_layer, build_norm_layer + +from ..builder import BACKBONES +from .resnet import ResNet +from .resnext import Bottleneck + + +@BACKBONES.register_module() +class RegNet(ResNet): + """RegNet backbone. + + More details can be found in `paper `_ . + + Args: + arch (dict): The parameter of RegNets. + - w0 (int): initial width + - wa (float): slope of width + - wm (float): quantization parameter to quantize the width + - depth (int): depth of the backbone + - group_w (int): width of group + - bot_mul (float): bottleneck ratio, i.e. expansion of bottleneck. + strides (Sequence[int]): Strides of the first block of each stage. + base_channels (int): Base channels after stem layer. + in_channels (int): Number of input image channels. Default: 3. + dilations (Sequence[int]): Dilation of each stage. + out_indices (Sequence[int]): Output from which stages. + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. Default: "pytorch". + frozen_stages (int): Stages to be frozen (all param fixed). -1 means + not freezing any parameters. Default: -1. + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN', requires_grad=True). + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + zero_init_residual (bool): whether to use zero init for last norm layer + in resblocks to let them behave as identity. Default: True. + + Example: + >>> from mmcls.models import RegNet + >>> import torch + >>> self = RegNet( + arch=dict( + w0=88, + wa=26.31, + wm=2.25, + group_w=48, + depth=25, + bot_mul=1.0)) + >>> self.eval() + >>> inputs = torch.rand(1, 3, 32, 32) + >>> level_outputs = self.forward(inputs) + >>> for level_out in level_outputs: + ... print(tuple(level_out.shape)) + (1, 96, 8, 8) + (1, 192, 4, 4) + (1, 432, 2, 2) + (1, 1008, 1, 1) + """ + arch_settings = { + 'regnetx_400mf': + dict(w0=24, wa=24.48, wm=2.54, group_w=16, depth=22, bot_mul=1.0), + 'regnetx_800mf': + dict(w0=56, wa=35.73, wm=2.28, group_w=16, depth=16, bot_mul=1.0), + 'regnetx_1.6gf': + dict(w0=80, wa=34.01, wm=2.25, group_w=24, depth=18, bot_mul=1.0), + 'regnetx_3.2gf': + dict(w0=88, wa=26.31, wm=2.25, group_w=48, depth=25, bot_mul=1.0), + 'regnetx_4.0gf': + dict(w0=96, wa=38.65, wm=2.43, group_w=40, depth=23, bot_mul=1.0), + 'regnetx_6.4gf': + dict(w0=184, wa=60.83, wm=2.07, group_w=56, depth=17, bot_mul=1.0), + 'regnetx_8.0gf': + dict(w0=80, wa=49.56, wm=2.88, group_w=120, depth=23, bot_mul=1.0), + 'regnetx_12gf': + dict(w0=168, wa=73.36, wm=2.37, group_w=112, depth=19, bot_mul=1.0), + } + + def __init__(self, + arch, + in_channels=3, + stem_channels=32, + base_channels=32, + strides=(2, 2, 2, 2), + dilations=(1, 1, 1, 1), + out_indices=(3, ), + style='pytorch', + deep_stem=False, + avg_down=False, + frozen_stages=-1, + conv_cfg=None, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=False, + with_cp=False, + zero_init_residual=True, + init_cfg=None): + super(ResNet, self).__init__(init_cfg) + + # Generate RegNet parameters first + if isinstance(arch, str): + assert arch in self.arch_settings, \ + f'"arch": "{arch}" is not one of the' \ + ' arch_settings' + arch = self.arch_settings[arch] + elif not isinstance(arch, dict): + raise TypeError('Expect "arch" to be either a string ' + f'or a dict, got {type(arch)}') + + widths, num_stages = self.generate_regnet( + arch['w0'], + arch['wa'], + arch['wm'], + arch['depth'], + ) + # Convert to per stage format + stage_widths, stage_blocks = self.get_stages_from_blocks(widths) + # Generate group widths and bot muls + group_widths = [arch['group_w'] for _ in range(num_stages)] + self.bottleneck_ratio = [arch['bot_mul'] for _ in range(num_stages)] + # Adjust the compatibility of stage_widths and group_widths + stage_widths, group_widths = self.adjust_width_group( + stage_widths, self.bottleneck_ratio, group_widths) + + # Group params by stage + self.stage_widths = stage_widths + self.group_widths = group_widths + self.depth = sum(stage_blocks) + self.stem_channels = stem_channels + self.base_channels = base_channels + self.num_stages = num_stages + assert num_stages >= 1 and num_stages <= 4 + self.strides = strides + self.dilations = dilations + assert len(strides) == len(dilations) == num_stages + self.out_indices = out_indices + assert max(out_indices) < num_stages + self.style = style + self.deep_stem = deep_stem + if self.deep_stem: + raise NotImplementedError( + 'deep_stem has not been implemented for RegNet') + self.avg_down = avg_down + self.frozen_stages = frozen_stages + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.with_cp = with_cp + self.norm_eval = norm_eval + self.zero_init_residual = zero_init_residual + self.stage_blocks = stage_blocks[:num_stages] + + self._make_stem_layer(in_channels, stem_channels) + + _in_channels = stem_channels + self.res_layers = [] + for i, num_blocks in enumerate(self.stage_blocks): + stride = self.strides[i] + dilation = self.dilations[i] + group_width = self.group_widths[i] + width = int(round(self.stage_widths[i] * self.bottleneck_ratio[i])) + stage_groups = width // group_width + + res_layer = self.make_res_layer( + block=Bottleneck, + num_blocks=num_blocks, + in_channels=_in_channels, + out_channels=self.stage_widths[i], + expansion=1, + stride=stride, + dilation=dilation, + style=self.style, + avg_down=self.avg_down, + with_cp=self.with_cp, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + base_channels=self.stage_widths[i], + groups=stage_groups, + width_per_group=group_width) + _in_channels = self.stage_widths[i] + layer_name = f'layer{i + 1}' + self.add_module(layer_name, res_layer) + self.res_layers.append(layer_name) + + self._freeze_stages() + + self.feat_dim = stage_widths[-1] + + def _make_stem_layer(self, in_channels, base_channels): + self.conv1 = build_conv_layer( + self.conv_cfg, + in_channels, + base_channels, + kernel_size=3, + stride=2, + padding=1, + bias=False) + self.norm1_name, norm1 = build_norm_layer( + self.norm_cfg, base_channels, postfix=1) + self.add_module(self.norm1_name, norm1) + self.relu = nn.ReLU(inplace=True) + + def generate_regnet(self, + initial_width, + width_slope, + width_parameter, + depth, + divisor=8): + """Generates per block width from RegNet parameters. + + Args: + initial_width ([int]): Initial width of the backbone + width_slope ([float]): Slope of the quantized linear function + width_parameter ([int]): Parameter used to quantize the width. + depth ([int]): Depth of the backbone. + divisor (int): The divisor of channels. Defaults to 8. + + Returns: + tuple: tuple containing: + - list: Widths of each stage. + - int: The number of stages. + """ + assert width_slope >= 0 + assert initial_width > 0 + assert width_parameter > 1 + assert initial_width % divisor == 0 + widths_cont = np.arange(depth) * width_slope + initial_width + ks = np.round( + np.log(widths_cont / initial_width) / np.log(width_parameter)) + widths = initial_width * np.power(width_parameter, ks) + widths = np.round(np.divide(widths, divisor)) * divisor + num_stages = len(np.unique(widths)) + widths, widths_cont = widths.astype(int).tolist(), widths_cont.tolist() + return widths, num_stages + + @staticmethod + def quantize_float(number, divisor): + """Converts a float to closest non-zero int divisible by divior. + + Args: + number (int): Original number to be quantized. + divisor (int): Divisor used to quantize the number. + + Returns: + int: quantized number that is divisible by devisor. + """ + return int(round(number / divisor) * divisor) + + def adjust_width_group(self, widths, bottleneck_ratio, groups): + """Adjusts the compatibility of widths and groups. + + Args: + widths (list[int]): Width of each stage. + bottleneck_ratio (float): Bottleneck ratio. + groups (int): number of groups in each stage + + Returns: + tuple(list): The adjusted widths and groups of each stage. + """ + bottleneck_width = [ + int(w * b) for w, b in zip(widths, bottleneck_ratio) + ] + groups = [min(g, w_bot) for g, w_bot in zip(groups, bottleneck_width)] + bottleneck_width = [ + self.quantize_float(w_bot, g) + for w_bot, g in zip(bottleneck_width, groups) + ] + widths = [ + int(w_bot / b) + for w_bot, b in zip(bottleneck_width, bottleneck_ratio) + ] + return widths, groups + + def get_stages_from_blocks(self, widths): + """Gets widths/stage_blocks of network at each stage. + + Args: + widths (list[int]): Width in each stage. + + Returns: + tuple(list): width and depth of each stage + """ + width_diff = [ + width != width_prev + for width, width_prev in zip(widths + [0], [0] + widths) + ] + stage_widths = [ + width for width, diff in zip(widths, width_diff[:-1]) if diff + ] + stage_blocks = np.diff([ + depth for depth, diff in zip(range(len(width_diff)), width_diff) + if diff + ]).tolist() + return stage_widths, stage_blocks + + def forward(self, x): + x = self.conv1(x) + x = self.norm1(x) + x = self.relu(x) + + outs = [] + for i, layer_name in enumerate(self.res_layers): + res_layer = getattr(self, layer_name) + x = res_layer(x) + if i in self.out_indices: + outs.append(x) + + return tuple(outs) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/repvgg.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/repvgg.py new file mode 100644 index 0000000000..b257a12b14 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/repvgg.py @@ -0,0 +1,537 @@ +import torch +import torch.nn.functional as F +import torch.utils.checkpoint as cp +from mmcv.cnn import build_activation_layer, build_conv_layer, build_norm_layer +from mmcv.runner import BaseModule, Sequential +from mmcv.utils.parrots_wrapper import _BatchNorm + +from ..builder import BACKBONES +from ..utils.se_layer import SELayer +from .base_backbone import BaseBackbone + + +class RepVGGBlock(BaseModule): + """RepVGG block for RepVGG backbone. + + Args: + in_channels (int): The input channels of the block. + out_channels (int): The output channels of the block. + stride (int): Stride of the 3x3 and 1x1 convolution layer. Default: 1. + padding (int): Padding of the 3x3 convolution layer. + dilation (int): Dilation of the 3x3 convolution layer. + groups (int): Groups of the 3x3 and 1x1 convolution layer. Default: 1. + padding_mode (str): Padding mode of the 3x3 convolution layer. + Default: 'zeros'. + se_cfg (None or dict): The configuration of the se module. + Default: None. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + conv_cfg (dict, optional): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN', requires_grad=True). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='ReLU'). + deploy (bool): Whether to switch the model structure to + deployment mode. Default: False. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + in_channels, + out_channels, + stride=1, + padding=1, + dilation=1, + groups=1, + padding_mode='zeros', + se_cfg=None, + with_cp=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + deploy=False, + init_cfg=None): + super(RepVGGBlock, self).__init__(init_cfg) + + assert se_cfg is None or isinstance(se_cfg, dict) + + self.in_channels = in_channels + self.out_channels = out_channels + self.stride = stride + self.padding = padding + self.dilation = dilation + self.groups = groups + self.se_cfg = se_cfg + self.with_cp = with_cp + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.deploy = deploy + + if deploy: + self.branch_reparam = build_conv_layer( + conv_cfg, + in_channels=in_channels, + out_channels=out_channels, + kernel_size=3, + stride=stride, + padding=padding, + dilation=dilation, + groups=groups, + bias=True, + padding_mode=padding_mode) + else: + # judge if input shape and output shape are the same. + # If true, add a normalized identity shortcut. + if out_channels == in_channels and stride == 1 and \ + padding == dilation: + self.branch_norm = build_norm_layer(norm_cfg, in_channels)[1] + else: + self.branch_norm = None + + self.branch_3x3 = self.create_conv_bn( + kernel_size=3, + dilation=dilation, + padding=padding, + ) + self.branch_1x1 = self.create_conv_bn(kernel_size=1) + + if se_cfg is not None: + self.se_layer = SELayer(channels=out_channels, **se_cfg) + else: + self.se_layer = None + + self.act = build_activation_layer(act_cfg) + + def create_conv_bn(self, kernel_size, dilation=1, padding=0): + conv_bn = Sequential() + conv_bn.add_module( + 'conv', + build_conv_layer( + self.conv_cfg, + in_channels=self.in_channels, + out_channels=self.out_channels, + kernel_size=kernel_size, + stride=self.stride, + dilation=dilation, + padding=padding, + groups=self.groups, + bias=False)) + conv_bn.add_module( + 'norm', + build_norm_layer(self.norm_cfg, num_features=self.out_channels)[1]) + + return conv_bn + + def forward(self, x): + + def _inner_forward(inputs): + if self.deploy: + return self.branch_reparam(inputs) + + if self.branch_norm is None: + branch_norm_out = 0 + else: + branch_norm_out = self.branch_norm(inputs) + + inner_out = self.branch_3x3(inputs) + self.branch_1x1( + inputs) + branch_norm_out + + if self.se_cfg is not None: + inner_out = self.se_layer(inner_out) + + return inner_out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + out = self.act(out) + + return out + + def switch_to_deploy(self): + """Switch the model structure from training mode to deployment mode.""" + if self.deploy: + return + assert self.norm_cfg['type'] == 'BN', \ + "Switch is not allowed when norm_cfg['type'] != 'BN'." + + reparam_weight, reparam_bias = self.reparameterize() + self.branch_reparam = build_conv_layer( + self.conv_cfg, + self.in_channels, + self.out_channels, + kernel_size=3, + stride=self.stride, + padding=self.padding, + dilation=self.dilation, + groups=self.groups, + bias=True) + self.branch_reparam.weight.data = reparam_weight + self.branch_reparam.bias.data = reparam_bias + + for param in self.parameters(): + param.detach_() + delattr(self, 'branch_3x3') + delattr(self, 'branch_1x1') + delattr(self, 'branch_norm') + + self.deploy = True + + def reparameterize(self): + """Fuse all the parameters of all branches. + + Returns: + tuple[torch.Tensor, torch.Tensor]: Parameters after fusion of all + branches. the first element is the weights and the second is + the bias. + """ + weight_3x3, bias_3x3 = self._fuse_conv_bn(self.branch_3x3) + weight_1x1, bias_1x1 = self._fuse_conv_bn(self.branch_1x1) + # pad a conv1x1 weight to a conv3x3 weight + weight_1x1 = F.pad(weight_1x1, [1, 1, 1, 1], value=0) + + weight_norm, bias_norm = 0, 0 + if self.branch_norm: + tmp_conv_bn = self._norm_to_conv3x3(self.branch_norm) + weight_norm, bias_norm = self._fuse_conv_bn(tmp_conv_bn) + + return (weight_3x3 + weight_1x1 + weight_norm, + bias_3x3 + bias_1x1 + bias_norm) + + def _fuse_conv_bn(self, branch): + """Fuse the parameters in a branch with a conv and bn. + + Args: + branch (mmcv.runner.Sequential): A branch with conv and bn. + + Returns: + tuple[torch.Tensor, torch.Tensor]: The parameters obtained after + fusing the parameters of conv and bn in one branch. + The first element is the weight and the second is the bias. + """ + if branch is None: + return 0, 0 + conv_weight = branch.conv.weight + running_mean = branch.norm.running_mean + running_var = branch.norm.running_var + gamma = branch.norm.weight + beta = branch.norm.bias + eps = branch.norm.eps + + std = (running_var + eps).sqrt() + fused_weight = (gamma / std).reshape(-1, 1, 1, 1) * conv_weight + fused_bias = -running_mean * gamma / std + beta + + return fused_weight, fused_bias + + def _norm_to_conv3x3(self, branch_nrom): + """Convert a norm layer to a conv3x3-bn sequence. + + Args: + branch (nn.BatchNorm2d): A branch only with bn in the block. + + Returns: + tmp_conv3x3 (mmcv.runner.Sequential): a sequential with conv3x3 and + bn. + """ + input_dim = self.in_channels // self.groups + conv_weight = torch.zeros((self.in_channels, input_dim, 3, 3), + dtype=branch_nrom.weight.dtype) + + for i in range(self.in_channels): + conv_weight[i, i % input_dim, 1, 1] = 1 + conv_weight = conv_weight.to(branch_nrom.weight.device) + + tmp_conv3x3 = self.create_conv_bn(kernel_size=3) + tmp_conv3x3.conv.weight.data = conv_weight + tmp_conv3x3.norm = branch_nrom + return tmp_conv3x3 + + +@BACKBONES.register_module() +class RepVGG(BaseBackbone): + """RepVGG backbone. + + A PyTorch impl of : `RepVGG: Making VGG-style ConvNets Great Again + `_ + + Args: + arch (str | dict): The parameter of RepVGG. + If it's a dict, it should contain the following keys: + + - num_blocks (Sequence[int]): Number of blocks in each stage. + - width_factor (Sequence[float]): Width deflator in each stage. + - group_layer_map (dict | None): RepVGG Block that declares + the need to apply group convolution. + - se_cfg (dict | None): Se Layer config + in_channels (int): Number of input image channels. Default: 3. + base_channels (int): Base channels of RepVGG backbone, work + with width_factor together. Default: 64. + out_indices (Sequence[int]): Output from which stages. Default: (3, ). + strides (Sequence[int]): Strides of the first block of each stage. + Default: (2, 2, 2, 2). + dilations (Sequence[int]): Dilation of each stage. + Default: (1, 1, 1, 1). + frozen_stages (int): Stages to be frozen (all param fixed). -1 means + not freezing any parameters. Default: -1. + conv_cfg (dict | None): The config dict for conv layers. Default: None. + norm_cfg (dict): The config dict for norm layers. + Default: dict(type='BN'). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='ReLU'). + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + deploy (bool): Whether to switch the model structure to deployment + mode. Default: False. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + init_cfg (dict or list[dict], optional): Initialization config dict. + """ + + groupwise_layers = [2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26] + g2_layer_map = {layer: 2 for layer in groupwise_layers} + g4_layer_map = {layer: 4 for layer in groupwise_layers} + + arch_settings = { + 'A0': + dict( + num_blocks=[2, 4, 14, 1], + width_factor=[0.75, 0.75, 0.75, 2.5], + group_layer_map=None, + se_cfg=None), + 'A1': + dict( + num_blocks=[2, 4, 14, 1], + width_factor=[1, 1, 1, 2.5], + group_layer_map=None, + se_cfg=None), + 'A2': + dict( + num_blocks=[2, 4, 14, 1], + width_factor=[1.5, 1.5, 1.5, 2.75], + group_layer_map=None, + se_cfg=None), + 'B0': + dict( + num_blocks=[4, 6, 16, 1], + width_factor=[1, 1, 1, 2.5], + group_layer_map=None, + se_cfg=None), + 'B1': + dict( + num_blocks=[4, 6, 16, 1], + width_factor=[2, 2, 2, 4], + group_layer_map=None, + se_cfg=None), + 'B1g2': + dict( + num_blocks=[4, 6, 16, 1], + width_factor=[2, 2, 2, 4], + group_layer_map=g2_layer_map, + se_cfg=None), + 'B1g4': + dict( + num_blocks=[4, 6, 16, 1], + width_factor=[2, 2, 2, 4], + group_layer_map=g4_layer_map, + se_cfg=None), + 'B2': + dict( + num_blocks=[4, 6, 16, 1], + width_factor=[2.5, 2.5, 2.5, 5], + group_layer_map=None, + se_cfg=None), + 'B2g2': + dict( + num_blocks=[4, 6, 16, 1], + width_factor=[2.5, 2.5, 2.5, 5], + group_layer_map=g2_layer_map, + se_cfg=None), + 'B2g4': + dict( + num_blocks=[4, 6, 16, 1], + width_factor=[2.5, 2.5, 2.5, 5], + group_layer_map=g4_layer_map, + se_cfg=None), + 'B3': + dict( + num_blocks=[4, 6, 16, 1], + width_factor=[3, 3, 3, 5], + group_layer_map=None, + se_cfg=None), + 'B3g2': + dict( + num_blocks=[4, 6, 16, 1], + width_factor=[3, 3, 3, 5], + group_layer_map=g2_layer_map, + se_cfg=None), + 'B3g4': + dict( + num_blocks=[4, 6, 16, 1], + width_factor=[3, 3, 3, 5], + group_layer_map=g4_layer_map, + se_cfg=None), + 'D2se': + dict( + num_blocks=[8, 14, 24, 1], + width_factor=[2.5, 2.5, 2.5, 5], + group_layer_map=None, + se_cfg=dict(ratio=16, divisor=1)) + } + + def __init__(self, + arch, + in_channels=3, + base_channels=64, + out_indices=(3, ), + strides=(2, 2, 2, 2), + dilations=(1, 1, 1, 1), + frozen_stages=-1, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + with_cp=False, + deploy=False, + norm_eval=False, + init_cfg=[ + dict(type='Kaiming', layer=['Conv2d']), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ]): + super(RepVGG, self).__init__(init_cfg) + + if isinstance(arch, str): + assert arch in self.arch_settings, \ + f'"arch": "{arch}" is not one of the arch_settings' + arch = self.arch_settings[arch] + elif not isinstance(arch, dict): + raise TypeError('Expect "arch" to be either a string ' + f'or a dict, got {type(arch)}') + + assert len(arch['num_blocks']) == len( + arch['width_factor']) == len(strides) == len(dilations) + assert max(out_indices) < len(arch['num_blocks']) + if arch['group_layer_map'] is not None: + assert max(arch['group_layer_map'].keys()) <= sum( + arch['num_blocks']) + + if arch['se_cfg'] is not None: + assert isinstance(arch['se_cfg'], dict) + + self.arch = arch + self.in_channels = in_channels + self.base_channels = base_channels + self.out_indices = out_indices + self.strides = strides + self.dilations = dilations + self.deploy = deploy + self.frozen_stages = frozen_stages + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.with_cp = with_cp + self.norm_eval = norm_eval + + channels = min(64, int(base_channels * self.arch['width_factor'][0])) + self.stem = RepVGGBlock( + self.in_channels, + channels, + stride=2, + se_cfg=arch['se_cfg'], + with_cp=with_cp, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + deploy=deploy) + + next_create_block_idx = 1 + self.stages = [] + for i in range(len(arch['num_blocks'])): + num_blocks = self.arch['num_blocks'][i] + stride = self.strides[i] + dilation = self.dilations[i] + out_channels = int(base_channels * 2**i * + self.arch['width_factor'][i]) + + stage, next_create_block_idx = self._make_stage( + channels, out_channels, num_blocks, stride, dilation, + next_create_block_idx, init_cfg) + stage_name = f'stage_{i + 1}' + self.add_module(stage_name, stage) + self.stages.append(stage_name) + + channels = out_channels + + def _make_stage(self, in_channels, out_channels, num_blocks, stride, + dilation, next_create_block_idx, init_cfg): + strides = [stride] + [1] * (num_blocks - 1) + dilations = [dilation] * num_blocks + + blocks = [] + for i in range(num_blocks): + groups = self.arch['group_layer_map'].get( + next_create_block_idx, + 1) if self.arch['group_layer_map'] is not None else 1 + blocks.append( + RepVGGBlock( + in_channels, + out_channels, + stride=strides[i], + padding=dilations[i], + dilation=dilations[i], + groups=groups, + se_cfg=self.arch['se_cfg'], + with_cp=self.with_cp, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg, + deploy=self.deploy, + init_cfg=init_cfg)) + in_channels = out_channels + next_create_block_idx += 1 + + return Sequential(*blocks), next_create_block_idx + + def forward(self, x): + x = self.stem(x) + outs = [] + for i, stage_name in enumerate(self.stages): + stage = getattr(self, stage_name) + x = stage(x) + if i in self.out_indices: + outs.append(x) + + return tuple(outs) + + def _freeze_stages(self): + if self.frozen_stages >= 0: + self.stem.eval() + for param in self.stem.parameters(): + param.requires_grad = False + for i in range(self.frozen_stages): + stage = getattr(self, f'stage_{i+1}') + stage.eval() + for param in stage.parameters(): + param.requires_grad = False + + def train(self, mode=True): + super(RepVGG, self).train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + if isinstance(m, _BatchNorm): + m.eval() + + def switch_to_deploy(self): + for m in self.modules(): + if isinstance(m, RepVGGBlock): + m.switch_to_deploy() + self.deploy = True diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/res2net.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/res2net.py new file mode 100644 index 0000000000..491b6f4717 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/res2net.py @@ -0,0 +1,306 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math + +import torch +import torch.nn as nn +import torch.utils.checkpoint as cp +from mmcv.cnn import build_conv_layer, build_norm_layer +from mmcv.runner import ModuleList, Sequential + +from ..builder import BACKBONES +from .resnet import Bottleneck as _Bottleneck +from .resnet import ResNet + + +class Bottle2neck(_Bottleneck): + expansion = 4 + + def __init__(self, + in_channels, + out_channels, + scales=4, + base_width=26, + base_channels=64, + stage_type='normal', + **kwargs): + """Bottle2neck block for Res2Net.""" + super(Bottle2neck, self).__init__(in_channels, out_channels, **kwargs) + assert scales > 1, 'Res2Net degenerates to ResNet when scales = 1.' + + mid_channels = out_channels // self.expansion + width = int(math.floor(mid_channels * (base_width / base_channels))) + + self.norm1_name, norm1 = build_norm_layer( + self.norm_cfg, width * scales, postfix=1) + self.norm3_name, norm3 = build_norm_layer( + self.norm_cfg, self.out_channels, postfix=3) + + self.conv1 = build_conv_layer( + self.conv_cfg, + self.in_channels, + width * scales, + kernel_size=1, + stride=self.conv1_stride, + bias=False) + self.add_module(self.norm1_name, norm1) + + if stage_type == 'stage': + self.pool = nn.AvgPool2d( + kernel_size=3, stride=self.conv2_stride, padding=1) + + self.convs = ModuleList() + self.bns = ModuleList() + for i in range(scales - 1): + self.convs.append( + build_conv_layer( + self.conv_cfg, + width, + width, + kernel_size=3, + stride=self.conv2_stride, + padding=self.dilation, + dilation=self.dilation, + bias=False)) + self.bns.append( + build_norm_layer(self.norm_cfg, width, postfix=i + 1)[1]) + + self.conv3 = build_conv_layer( + self.conv_cfg, + width * scales, + self.out_channels, + kernel_size=1, + bias=False) + self.add_module(self.norm3_name, norm3) + + self.stage_type = stage_type + self.scales = scales + self.width = width + delattr(self, 'conv2') + delattr(self, self.norm2_name) + + def forward(self, x): + """Forward function.""" + + def _inner_forward(x): + identity = x + + out = self.conv1(x) + out = self.norm1(out) + out = self.relu(out) + + spx = torch.split(out, self.width, 1) + sp = self.convs[0](spx[0].contiguous()) + sp = self.relu(self.bns[0](sp)) + out = sp + for i in range(1, self.scales - 1): + if self.stage_type == 'stage': + sp = spx[i] + else: + sp = sp + spx[i] + sp = self.convs[i](sp.contiguous()) + sp = self.relu(self.bns[i](sp)) + out = torch.cat((out, sp), 1) + + if self.stage_type == 'normal' and self.scales != 1: + out = torch.cat((out, spx[self.scales - 1]), 1) + elif self.stage_type == 'stage' and self.scales != 1: + out = torch.cat((out, self.pool(spx[self.scales - 1])), 1) + + out = self.conv3(out) + out = self.norm3(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + out = self.relu(out) + + return out + + +class Res2Layer(Sequential): + """Res2Layer to build Res2Net style backbone. + + Args: + block (nn.Module): block used to build ResLayer. + inplanes (int): inplanes of block. + planes (int): planes of block. + num_blocks (int): number of blocks. + stride (int): stride of the first block. Default: 1 + avg_down (bool): Use AvgPool instead of stride conv when + downsampling in the bottle2neck. Defaults to True. + conv_cfg (dict): dictionary to construct and config conv layer. + Default: None + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN') + scales (int): Scales used in Res2Net. Default: 4 + base_width (int): Basic width of each scale. Default: 26 + """ + + def __init__(self, + block, + in_channels, + out_channels, + num_blocks, + stride=1, + avg_down=True, + conv_cfg=None, + norm_cfg=dict(type='BN'), + scales=4, + base_width=26, + **kwargs): + self.block = block + + downsample = None + if stride != 1 or in_channels != out_channels: + if avg_down: + downsample = nn.Sequential( + nn.AvgPool2d( + kernel_size=stride, + stride=stride, + ceil_mode=True, + count_include_pad=False), + build_conv_layer( + conv_cfg, + in_channels, + out_channels, + kernel_size=1, + stride=1, + bias=False), + build_norm_layer(norm_cfg, out_channels)[1], + ) + else: + downsample = nn.Sequential( + build_conv_layer( + conv_cfg, + in_channels, + out_channels, + kernel_size=1, + stride=stride, + bias=False), + build_norm_layer(norm_cfg, out_channels)[1], + ) + + layers = [] + layers.append( + block( + in_channels=in_channels, + out_channels=out_channels, + stride=stride, + downsample=downsample, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + scales=scales, + base_width=base_width, + stage_type='stage', + **kwargs)) + in_channels = out_channels + for _ in range(1, num_blocks): + layers.append( + block( + in_channels=in_channels, + out_channels=out_channels, + stride=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + scales=scales, + base_width=base_width, + **kwargs)) + super(Res2Layer, self).__init__(*layers) + + +@BACKBONES.register_module() +class Res2Net(ResNet): + """Res2Net backbone. + + A PyTorch implement of : `Res2Net: A New Multi-scale Backbone + Architecture `_ + + Args: + depth (int): Depth of Res2Net, choose from {50, 101, 152}. + scales (int): Scales used in Res2Net. Defaults to 4. + base_width (int): Basic width of each scale. Defaults to 26. + in_channels (int): Number of input image channels. Defaults to 3. + num_stages (int): Number of Res2Net stages. Defaults to 4. + strides (Sequence[int]): Strides of the first block of each stage. + Defaults to ``(1, 2, 2, 2)``. + dilations (Sequence[int]): Dilation of each stage. + Defaults to ``(1, 1, 1, 1)``. + out_indices (Sequence[int]): Output from which stages. + Defaults to ``(3, )``. + style (str): "pytorch" or "caffe". If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. Defaults to "pytorch". + deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv. + Defaults to True. + avg_down (bool): Use AvgPool instead of stride conv when + downsampling in the bottle2neck. Defaults to True. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. Defaults to -1. + norm_cfg (dict): Dictionary to construct and config norm layer. + Defaults to ``dict(type='BN', requires_grad=True)``. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Defaults to False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Defaults to False. + zero_init_residual (bool): Whether to use zero init for last norm layer + in resblocks to let them behave as identity. Defaults to True. + init_cfg (dict or list[dict], optional): Initialization config dict. + Defaults to None. + + Example: + >>> from mmcls.models import Res2Net + >>> import torch + >>> model = Res2Net(depth=50, + ... scales=4, + ... base_width=26, + ... out_indices=(0, 1, 2, 3)) + >>> model.eval() + >>> inputs = torch.rand(1, 3, 32, 32) + >>> level_outputs = model.forward(inputs) + >>> for level_out in level_outputs: + ... print(tuple(level_out.shape)) + (1, 256, 8, 8) + (1, 512, 4, 4) + (1, 1024, 2, 2) + (1, 2048, 1, 1) + """ + + arch_settings = { + 50: (Bottle2neck, (3, 4, 6, 3)), + 101: (Bottle2neck, (3, 4, 23, 3)), + 152: (Bottle2neck, (3, 8, 36, 3)) + } + + def __init__(self, + scales=4, + base_width=26, + style='pytorch', + deep_stem=True, + avg_down=True, + init_cfg=None, + **kwargs): + self.scales = scales + self.base_width = base_width + super(Res2Net, self).__init__( + style=style, + deep_stem=deep_stem, + avg_down=avg_down, + init_cfg=init_cfg, + **kwargs) + + def make_res_layer(self, **kwargs): + return Res2Layer( + scales=self.scales, + base_width=self.base_width, + base_channels=self.base_channels, + **kwargs) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/resnest.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/resnest.py new file mode 100644 index 0000000000..0a82398871 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/resnest.py @@ -0,0 +1,339 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.checkpoint as cp +from mmcv.cnn import build_conv_layer, build_norm_layer + +from ..builder import BACKBONES +from .resnet import Bottleneck as _Bottleneck +from .resnet import ResLayer, ResNetV1d + + +class RSoftmax(nn.Module): + """Radix Softmax module in ``SplitAttentionConv2d``. + + Args: + radix (int): Radix of input. + groups (int): Groups of input. + """ + + def __init__(self, radix, groups): + super().__init__() + self.radix = radix + self.groups = groups + + def forward(self, x): + batch = x.size(0) + if self.radix > 1: + x = x.view(batch, self.groups, self.radix, -1).transpose(1, 2) + x = F.softmax(x, dim=1) + x = x.reshape(batch, -1) + else: + x = torch.sigmoid(x) + return x + + +class SplitAttentionConv2d(nn.Module): + """Split-Attention Conv2d. + + Args: + in_channels (int): Same as nn.Conv2d. + out_channels (int): Same as nn.Conv2d. + kernel_size (int | tuple[int]): Same as nn.Conv2d. + stride (int | tuple[int]): Same as nn.Conv2d. + padding (int | tuple[int]): Same as nn.Conv2d. + dilation (int | tuple[int]): Same as nn.Conv2d. + groups (int): Same as nn.Conv2d. + radix (int): Radix of SpltAtConv2d. Default: 2 + reduction_factor (int): Reduction factor of SplitAttentionConv2d. + Default: 4. + conv_cfg (dict, optional): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict, optional): Config dict for normalization layer. + Default: None. + """ + + def __init__(self, + in_channels, + channels, + kernel_size, + stride=1, + padding=0, + dilation=1, + groups=1, + radix=2, + reduction_factor=4, + conv_cfg=None, + norm_cfg=dict(type='BN')): + super(SplitAttentionConv2d, self).__init__() + inter_channels = max(in_channels * radix // reduction_factor, 32) + self.radix = radix + self.groups = groups + self.channels = channels + self.conv = build_conv_layer( + conv_cfg, + in_channels, + channels * radix, + kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + groups=groups * radix, + bias=False) + self.norm0_name, norm0 = build_norm_layer( + norm_cfg, channels * radix, postfix=0) + self.add_module(self.norm0_name, norm0) + self.relu = nn.ReLU(inplace=True) + self.fc1 = build_conv_layer( + None, channels, inter_channels, 1, groups=self.groups) + self.norm1_name, norm1 = build_norm_layer( + norm_cfg, inter_channels, postfix=1) + self.add_module(self.norm1_name, norm1) + self.fc2 = build_conv_layer( + None, inter_channels, channels * radix, 1, groups=self.groups) + self.rsoftmax = RSoftmax(radix, groups) + + @property + def norm0(self): + return getattr(self, self.norm0_name) + + @property + def norm1(self): + return getattr(self, self.norm1_name) + + def forward(self, x): + x = self.conv(x) + x = self.norm0(x) + x = self.relu(x) + + batch, rchannel = x.shape[:2] + if self.radix > 1: + splits = x.view(batch, self.radix, -1, *x.shape[2:]) + gap = splits.sum(dim=1) + else: + gap = x + gap = F.adaptive_avg_pool2d(gap, 1) + gap = self.fc1(gap) + + gap = self.norm1(gap) + gap = self.relu(gap) + + atten = self.fc2(gap) + atten = self.rsoftmax(atten).view(batch, -1, 1, 1) + + if self.radix > 1: + attens = atten.view(batch, self.radix, -1, *atten.shape[2:]) + out = torch.sum(attens * splits, dim=1) + else: + out = atten * x + return out.contiguous() + + +class Bottleneck(_Bottleneck): + """Bottleneck block for ResNeSt. + + Args: + in_channels (int): Input channels of this block. + out_channels (int): Output channels of this block. + groups (int): Groups of conv2. + width_per_group (int): Width per group of conv2. 64x4d indicates + ``groups=64, width_per_group=4`` and 32x8d indicates + ``groups=32, width_per_group=8``. + radix (int): Radix of SpltAtConv2d. Default: 2 + reduction_factor (int): Reduction factor of SplitAttentionConv2d. + Default: 4. + avg_down_stride (bool): Whether to use average pool for stride in + Bottleneck. Default: True. + stride (int): stride of the block. Default: 1 + dilation (int): dilation of convolution. Default: 1 + downsample (nn.Module, optional): downsample operation on identity + branch. Default: None + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. + conv_cfg (dict, optional): dictionary to construct and config conv + layer. Default: None + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN') + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. + """ + + def __init__(self, + in_channels, + out_channels, + groups=1, + width_per_group=4, + base_channels=64, + radix=2, + reduction_factor=4, + avg_down_stride=True, + **kwargs): + super(Bottleneck, self).__init__(in_channels, out_channels, **kwargs) + + self.groups = groups + self.width_per_group = width_per_group + + # For ResNet bottleneck, middle channels are determined by expansion + # and out_channels, but for ResNeXt bottleneck, it is determined by + # groups and width_per_group and the stage it is located in. + if groups != 1: + assert self.mid_channels % base_channels == 0 + self.mid_channels = ( + groups * width_per_group * self.mid_channels // base_channels) + + self.avg_down_stride = avg_down_stride and self.conv2_stride > 1 + + self.norm1_name, norm1 = build_norm_layer( + self.norm_cfg, self.mid_channels, postfix=1) + self.norm3_name, norm3 = build_norm_layer( + self.norm_cfg, self.out_channels, postfix=3) + + self.conv1 = build_conv_layer( + self.conv_cfg, + self.in_channels, + self.mid_channels, + kernel_size=1, + stride=self.conv1_stride, + bias=False) + self.add_module(self.norm1_name, norm1) + self.conv2 = SplitAttentionConv2d( + self.mid_channels, + self.mid_channels, + kernel_size=3, + stride=1 if self.avg_down_stride else self.conv2_stride, + padding=self.dilation, + dilation=self.dilation, + groups=groups, + radix=radix, + reduction_factor=reduction_factor, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg) + delattr(self, self.norm2_name) + + if self.avg_down_stride: + self.avd_layer = nn.AvgPool2d(3, self.conv2_stride, padding=1) + + self.conv3 = build_conv_layer( + self.conv_cfg, + self.mid_channels, + self.out_channels, + kernel_size=1, + bias=False) + self.add_module(self.norm3_name, norm3) + + def forward(self, x): + + def _inner_forward(x): + identity = x + + out = self.conv1(x) + out = self.norm1(out) + out = self.relu(out) + + out = self.conv2(out) + + if self.avg_down_stride: + out = self.avd_layer(out) + + out = self.conv3(out) + out = self.norm3(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + out = self.relu(out) + + return out + + +@BACKBONES.register_module() +class ResNeSt(ResNetV1d): + """ResNeSt backbone. + + Please refer to the `paper `__ for + details. + + Args: + depth (int): Network depth, from {50, 101, 152, 200}. + groups (int): Groups of conv2 in Bottleneck. Default: 32. + width_per_group (int): Width per group of conv2 in Bottleneck. + Default: 4. + radix (int): Radix of SpltAtConv2d. Default: 2 + reduction_factor (int): Reduction factor of SplitAttentionConv2d. + Default: 4. + avg_down_stride (bool): Whether to use average pool for stride in + Bottleneck. Default: True. + in_channels (int): Number of input image channels. Default: 3. + stem_channels (int): Output channels of the stem layer. Default: 64. + num_stages (int): Stages of the network. Default: 4. + strides (Sequence[int]): Strides of the first block of each stage. + Default: ``(1, 2, 2, 2)``. + dilations (Sequence[int]): Dilation of each stage. + Default: ``(1, 1, 1, 1)``. + out_indices (Sequence[int]): Output from which stages. If only one + stage is specified, a single tensor (feature map) is returned, + otherwise multiple stages are specified, a tuple of tensors will + be returned. Default: ``(3, )``. + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. + deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv. + Default: False. + avg_down (bool): Use AvgPool instead of stride conv when + downsampling in the bottleneck. Default: False. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. Default: -1. + conv_cfg (dict | None): The config dict for conv layers. Default: None. + norm_cfg (dict): The config dict for norm layers. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + zero_init_residual (bool): Whether to use zero init for last norm layer + in resblocks to let them behave as identity. Default: True. + """ + + arch_settings = { + 50: (Bottleneck, (3, 4, 6, 3)), + 101: (Bottleneck, (3, 4, 23, 3)), + 152: (Bottleneck, (3, 8, 36, 3)), + 200: (Bottleneck, (3, 24, 36, 3)), + 269: (Bottleneck, (3, 30, 48, 8)) + } + + def __init__(self, + depth, + groups=1, + width_per_group=4, + radix=2, + reduction_factor=4, + avg_down_stride=True, + **kwargs): + self.groups = groups + self.width_per_group = width_per_group + self.radix = radix + self.reduction_factor = reduction_factor + self.avg_down_stride = avg_down_stride + super(ResNeSt, self).__init__(depth=depth, **kwargs) + + def make_res_layer(self, **kwargs): + return ResLayer( + groups=self.groups, + width_per_group=self.width_per_group, + base_channels=self.base_channels, + radix=self.radix, + reduction_factor=self.reduction_factor, + avg_down_stride=self.avg_down_stride, + **kwargs) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/resnet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/resnet.py new file mode 100644 index 0000000000..35dbf98a42 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/resnet.py @@ -0,0 +1,651 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn +import torch.utils.checkpoint as cp +from mmcv.cnn import (ConvModule, build_conv_layer, build_norm_layer, + constant_init) +from mmcv.utils.parrots_wrapper import _BatchNorm + +from ..builder import BACKBONES +from .base_backbone import BaseBackbone + + +class BasicBlock(nn.Module): + """BasicBlock for ResNet. + + Args: + in_channels (int): Input channels of this block. + out_channels (int): Output channels of this block. + expansion (int): The ratio of ``out_channels/mid_channels`` where + ``mid_channels`` is the output channels of conv1. This is a + reserved argument in BasicBlock and should always be 1. Default: 1. + stride (int): stride of the block. Default: 1 + dilation (int): dilation of convolution. Default: 1 + downsample (nn.Module, optional): downsample operation on identity + branch. Default: None. + style (str): `pytorch` or `caffe`. It is unused and reserved for + unified API with Bottleneck. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. + conv_cfg (dict, optional): dictionary to construct and config conv + layer. Default: None + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN') + """ + + def __init__(self, + in_channels, + out_channels, + expansion=1, + stride=1, + dilation=1, + downsample=None, + style='pytorch', + with_cp=False, + conv_cfg=None, + norm_cfg=dict(type='BN')): + super(BasicBlock, self).__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.expansion = expansion + assert self.expansion == 1 + assert out_channels % expansion == 0 + self.mid_channels = out_channels // expansion + self.stride = stride + self.dilation = dilation + self.style = style + self.with_cp = with_cp + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + + self.norm1_name, norm1 = build_norm_layer( + norm_cfg, self.mid_channels, postfix=1) + self.norm2_name, norm2 = build_norm_layer( + norm_cfg, out_channels, postfix=2) + + self.conv1 = build_conv_layer( + conv_cfg, + in_channels, + self.mid_channels, + 3, + stride=stride, + padding=dilation, + dilation=dilation, + bias=False) + self.add_module(self.norm1_name, norm1) + self.conv2 = build_conv_layer( + conv_cfg, + self.mid_channels, + out_channels, + 3, + padding=1, + bias=False) + self.add_module(self.norm2_name, norm2) + + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + + @property + def norm1(self): + return getattr(self, self.norm1_name) + + @property + def norm2(self): + return getattr(self, self.norm2_name) + + def forward(self, x): + + def _inner_forward(x): + identity = x + + out = self.conv1(x) + out = self.norm1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.norm2(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + out = self.relu(out) + + return out + + +class Bottleneck(nn.Module): + """Bottleneck block for ResNet. + + Args: + in_channels (int): Input channels of this block. + out_channels (int): Output channels of this block. + expansion (int): The ratio of ``out_channels/mid_channels`` where + ``mid_channels`` is the input/output channels of conv2. Default: 4. + stride (int): stride of the block. Default: 1 + dilation (int): dilation of convolution. Default: 1 + downsample (nn.Module, optional): downsample operation on identity + branch. Default: None. + style (str): ``"pytorch"`` or ``"caffe"``. If set to "pytorch", the + stride-two layer is the 3x3 conv layer, otherwise the stride-two + layer is the first 1x1 conv layer. Default: "pytorch". + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. + conv_cfg (dict, optional): dictionary to construct and config conv + layer. Default: None + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN') + """ + + def __init__(self, + in_channels, + out_channels, + expansion=4, + stride=1, + dilation=1, + downsample=None, + style='pytorch', + with_cp=False, + conv_cfg=None, + norm_cfg=dict(type='BN')): + super(Bottleneck, self).__init__() + assert style in ['pytorch', 'caffe'] + + self.in_channels = in_channels + self.out_channels = out_channels + self.expansion = expansion + assert out_channels % expansion == 0 + self.mid_channels = out_channels // expansion + self.stride = stride + self.dilation = dilation + self.style = style + self.with_cp = with_cp + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + + if self.style == 'pytorch': + self.conv1_stride = 1 + self.conv2_stride = stride + else: + self.conv1_stride = stride + self.conv2_stride = 1 + + self.norm1_name, norm1 = build_norm_layer( + norm_cfg, self.mid_channels, postfix=1) + self.norm2_name, norm2 = build_norm_layer( + norm_cfg, self.mid_channels, postfix=2) + self.norm3_name, norm3 = build_norm_layer( + norm_cfg, out_channels, postfix=3) + + self.conv1 = build_conv_layer( + conv_cfg, + in_channels, + self.mid_channels, + kernel_size=1, + stride=self.conv1_stride, + bias=False) + self.add_module(self.norm1_name, norm1) + self.conv2 = build_conv_layer( + conv_cfg, + self.mid_channels, + self.mid_channels, + kernel_size=3, + stride=self.conv2_stride, + padding=dilation, + dilation=dilation, + bias=False) + + self.add_module(self.norm2_name, norm2) + self.conv3 = build_conv_layer( + conv_cfg, + self.mid_channels, + out_channels, + kernel_size=1, + bias=False) + self.add_module(self.norm3_name, norm3) + + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + + @property + def norm1(self): + return getattr(self, self.norm1_name) + + @property + def norm2(self): + return getattr(self, self.norm2_name) + + @property + def norm3(self): + return getattr(self, self.norm3_name) + + def forward(self, x): + + def _inner_forward(x): + identity = x + + out = self.conv1(x) + out = self.norm1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.norm2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.norm3(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + out = self.relu(out) + + return out + + +def get_expansion(block, expansion=None): + """Get the expansion of a residual block. + + The block expansion will be obtained by the following order: + + 1. If ``expansion`` is given, just return it. + 2. If ``block`` has the attribute ``expansion``, then return + ``block.expansion``. + 3. Return the default value according the the block type: + 1 for ``BasicBlock`` and 4 for ``Bottleneck``. + + Args: + block (class): The block class. + expansion (int | None): The given expansion ratio. + + Returns: + int: The expansion of the block. + """ + if isinstance(expansion, int): + assert expansion > 0 + elif expansion is None: + if hasattr(block, 'expansion'): + expansion = block.expansion + elif issubclass(block, BasicBlock): + expansion = 1 + elif issubclass(block, Bottleneck): + expansion = 4 + else: + raise TypeError(f'expansion is not specified for {block.__name__}') + else: + raise TypeError('expansion must be an integer or None') + + return expansion + + +class ResLayer(nn.Sequential): + """ResLayer to build ResNet style backbone. + + Args: + block (nn.Module): Residual block used to build ResLayer. + num_blocks (int): Number of blocks. + in_channels (int): Input channels of this block. + out_channels (int): Output channels of this block. + expansion (int, optional): The expansion for BasicBlock/Bottleneck. + If not specified, it will firstly be obtained via + ``block.expansion``. If the block has no attribute "expansion", + the following default values will be used: 1 for BasicBlock and + 4 for Bottleneck. Default: None. + stride (int): stride of the first block. Default: 1. + avg_down (bool): Use AvgPool instead of stride conv when + downsampling in the bottleneck. Default: False + conv_cfg (dict, optional): dictionary to construct and config conv + layer. Default: None + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN') + """ + + def __init__(self, + block, + num_blocks, + in_channels, + out_channels, + expansion=None, + stride=1, + avg_down=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + **kwargs): + self.block = block + self.expansion = get_expansion(block, expansion) + + downsample = None + if stride != 1 or in_channels != out_channels: + downsample = [] + conv_stride = stride + if avg_down and stride != 1: + conv_stride = 1 + downsample.append( + nn.AvgPool2d( + kernel_size=stride, + stride=stride, + ceil_mode=True, + count_include_pad=False)) + downsample.extend([ + build_conv_layer( + conv_cfg, + in_channels, + out_channels, + kernel_size=1, + stride=conv_stride, + bias=False), + build_norm_layer(norm_cfg, out_channels)[1] + ]) + downsample = nn.Sequential(*downsample) + + layers = [] + layers.append( + block( + in_channels=in_channels, + out_channels=out_channels, + expansion=self.expansion, + stride=stride, + downsample=downsample, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + **kwargs)) + in_channels = out_channels + for i in range(1, num_blocks): + layers.append( + block( + in_channels=in_channels, + out_channels=out_channels, + expansion=self.expansion, + stride=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + **kwargs)) + super(ResLayer, self).__init__(*layers) + + +@BACKBONES.register_module() +class ResNet(BaseBackbone): + """ResNet backbone. + + Please refer to the `paper `__ for + details. + + Args: + depth (int): Network depth, from {18, 34, 50, 101, 152}. + in_channels (int): Number of input image channels. Default: 3. + stem_channels (int): Output channels of the stem layer. Default: 64. + base_channels (int): Middle channels of the first stage. Default: 64. + num_stages (int): Stages of the network. Default: 4. + strides (Sequence[int]): Strides of the first block of each stage. + Default: ``(1, 2, 2, 2)``. + dilations (Sequence[int]): Dilation of each stage. + Default: ``(1, 1, 1, 1)``. + out_indices (Sequence[int]): Output from which stages. + Default: ``(3, )``. + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. + deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv. + Default: False. + avg_down (bool): Use AvgPool instead of stride conv when + downsampling in the bottleneck. Default: False. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. Default: -1. + conv_cfg (dict | None): The config dict for conv layers. Default: None. + norm_cfg (dict): The config dict for norm layers. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + zero_init_residual (bool): Whether to use zero init for last norm layer + in resblocks to let them behave as identity. Default: True. + + Example: + >>> from mmcls.models import ResNet + >>> import torch + >>> self = ResNet(depth=18) + >>> self.eval() + >>> inputs = torch.rand(1, 3, 32, 32) + >>> level_outputs = self.forward(inputs) + >>> for level_out in level_outputs: + ... print(tuple(level_out.shape)) + (1, 64, 8, 8) + (1, 128, 4, 4) + (1, 256, 2, 2) + (1, 512, 1, 1) + """ + + arch_settings = { + 18: (BasicBlock, (2, 2, 2, 2)), + 34: (BasicBlock, (3, 4, 6, 3)), + 50: (Bottleneck, (3, 4, 6, 3)), + 101: (Bottleneck, (3, 4, 23, 3)), + 152: (Bottleneck, (3, 8, 36, 3)) + } + + def __init__(self, + depth, + in_channels=3, + stem_channels=64, + base_channels=64, + expansion=None, + num_stages=4, + strides=(1, 2, 2, 2), + dilations=(1, 1, 1, 1), + out_indices=(3, ), + style='pytorch', + deep_stem=False, + avg_down=False, + frozen_stages=-1, + conv_cfg=None, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=False, + with_cp=False, + zero_init_residual=True, + init_cfg=[ + dict(type='Kaiming', layer=['Conv2d']), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ]): + super(ResNet, self).__init__(init_cfg) + if depth not in self.arch_settings: + raise KeyError(f'invalid depth {depth} for resnet') + self.depth = depth + self.stem_channels = stem_channels + self.base_channels = base_channels + self.num_stages = num_stages + assert num_stages >= 1 and num_stages <= 4 + self.strides = strides + self.dilations = dilations + assert len(strides) == len(dilations) == num_stages + self.out_indices = out_indices + assert max(out_indices) < num_stages + self.style = style + self.deep_stem = deep_stem + self.avg_down = avg_down + self.frozen_stages = frozen_stages + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.with_cp = with_cp + self.norm_eval = norm_eval + self.zero_init_residual = zero_init_residual + self.block, stage_blocks = self.arch_settings[depth] + self.stage_blocks = stage_blocks[:num_stages] + self.expansion = get_expansion(self.block, expansion) + + self._make_stem_layer(in_channels, stem_channels) + + self.res_layers = [] + _in_channels = stem_channels + _out_channels = base_channels * self.expansion + for i, num_blocks in enumerate(self.stage_blocks): + stride = strides[i] + dilation = dilations[i] + res_layer = self.make_res_layer( + block=self.block, + num_blocks=num_blocks, + in_channels=_in_channels, + out_channels=_out_channels, + expansion=self.expansion, + stride=stride, + dilation=dilation, + style=self.style, + avg_down=self.avg_down, + with_cp=with_cp, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg) + _in_channels = _out_channels + _out_channels *= 2 + layer_name = f'layer{i + 1}' + self.add_module(layer_name, res_layer) + self.res_layers.append(layer_name) + + self._freeze_stages() + + self.feat_dim = res_layer[-1].out_channels + + def make_res_layer(self, **kwargs): + return ResLayer(**kwargs) + + @property + def norm1(self): + return getattr(self, self.norm1_name) + + def _make_stem_layer(self, in_channels, stem_channels): + if self.deep_stem: + self.stem = nn.Sequential( + ConvModule( + in_channels, + stem_channels // 2, + kernel_size=3, + stride=2, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + inplace=True), + ConvModule( + stem_channels // 2, + stem_channels // 2, + kernel_size=3, + stride=1, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + inplace=True), + ConvModule( + stem_channels // 2, + stem_channels, + kernel_size=3, + stride=1, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + inplace=True)) + else: + self.conv1 = build_conv_layer( + self.conv_cfg, + in_channels, + stem_channels, + kernel_size=7, + stride=2, + padding=3, + bias=False) + self.norm1_name, norm1 = build_norm_layer( + self.norm_cfg, stem_channels, postfix=1) + self.add_module(self.norm1_name, norm1) + self.relu = nn.ReLU(inplace=True) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + + def _freeze_stages(self): + if self.frozen_stages >= 0: + if self.deep_stem: + self.stem.eval() + for param in self.stem.parameters(): + param.requires_grad = False + else: + self.norm1.eval() + for m in [self.conv1, self.norm1]: + for param in m.parameters(): + param.requires_grad = False + + for i in range(1, self.frozen_stages + 1): + m = getattr(self, f'layer{i}') + m.eval() + for param in m.parameters(): + param.requires_grad = False + + def init_weights(self): + super(ResNet, self).init_weights() + + if (isinstance(self.init_cfg, dict) + and self.init_cfg['type'] == 'Pretrained'): + # Suppress zero_init_residual if use pretrained model. + return + + if self.zero_init_residual: + for m in self.modules(): + if isinstance(m, Bottleneck): + constant_init(m.norm3, 0) + elif isinstance(m, BasicBlock): + constant_init(m.norm2, 0) + + def forward(self, x): + if self.deep_stem: + x = self.stem(x) + else: + x = self.conv1(x) + x = self.norm1(x) + x = self.relu(x) + x = self.maxpool(x) + outs = [] + for i, layer_name in enumerate(self.res_layers): + res_layer = getattr(self, layer_name) + x = res_layer(x) + if i in self.out_indices: + outs.append(x) + return tuple(outs) + + def train(self, mode=True): + super(ResNet, self).train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + # trick: eval have effect on BatchNorm only + if isinstance(m, _BatchNorm): + m.eval() + + +@BACKBONES.register_module() +class ResNetV1d(ResNet): + """ResNetV1d backbone. + + This variant is described in `Bag of Tricks. + `_. + + Compared with default ResNet(ResNetV1b), ResNetV1d replaces the 7x7 conv in + the input stem with three 3x3 convs. And in the downsampling block, a 2x2 + avg_pool with stride 2 is added before conv, whose stride is changed to 1. + """ + + def __init__(self, **kwargs): + super(ResNetV1d, self).__init__( + deep_stem=True, avg_down=True, **kwargs) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/resnet_cifar.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/resnet_cifar.py new file mode 100644 index 0000000000..54b8a48bfb --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/resnet_cifar.py @@ -0,0 +1,81 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn +from mmcv.cnn import build_conv_layer, build_norm_layer + +from ..builder import BACKBONES +from .resnet import ResNet + + +@BACKBONES.register_module() +class ResNet_CIFAR(ResNet): + """ResNet backbone for CIFAR. + + Compared to standard ResNet, it uses `kernel_size=3` and `stride=1` in + conv1, and does not apply MaxPoolinng after stem. It has been proven to + be more efficient than standard ResNet in other public codebase, e.g., + `https://github.com/kuangliu/pytorch-cifar/blob/master/models/resnet.py`. + + Args: + depth (int): Network depth, from {18, 34, 50, 101, 152}. + in_channels (int): Number of input image channels. Default: 3. + stem_channels (int): Output channels of the stem layer. Default: 64. + base_channels (int): Middle channels of the first stage. Default: 64. + num_stages (int): Stages of the network. Default: 4. + strides (Sequence[int]): Strides of the first block of each stage. + Default: ``(1, 2, 2, 2)``. + dilations (Sequence[int]): Dilation of each stage. + Default: ``(1, 1, 1, 1)``. + out_indices (Sequence[int]): Output from which stages. If only one + stage is specified, a single tensor (feature map) is returned, + otherwise multiple stages are specified, a tuple of tensors will + be returned. Default: ``(3, )``. + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. + deep_stem (bool): This network has specific designed stem, thus it is + asserted to be False. + avg_down (bool): Use AvgPool instead of stride conv when + downsampling in the bottleneck. Default: False. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. Default: -1. + conv_cfg (dict | None): The config dict for conv layers. Default: None. + norm_cfg (dict): The config dict for norm layers. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + zero_init_residual (bool): Whether to use zero init for last norm layer + in resblocks to let them behave as identity. Default: True. + """ + + def __init__(self, depth, deep_stem=False, **kwargs): + super(ResNet_CIFAR, self).__init__( + depth, deep_stem=deep_stem, **kwargs) + assert not self.deep_stem, 'ResNet_CIFAR do not support deep_stem' + + def _make_stem_layer(self, in_channels, base_channels): + self.conv1 = build_conv_layer( + self.conv_cfg, + in_channels, + base_channels, + kernel_size=3, + stride=1, + padding=1, + bias=False) + self.norm1_name, norm1 = build_norm_layer( + self.norm_cfg, base_channels, postfix=1) + self.add_module(self.norm1_name, norm1) + self.relu = nn.ReLU(inplace=True) + + def forward(self, x): + x = self.conv1(x) + x = self.norm1(x) + x = self.relu(x) + outs = [] + for i, layer_name in enumerate(self.res_layers): + res_layer = getattr(self, layer_name) + x = res_layer(x) + if i in self.out_indices: + outs.append(x) + return tuple(outs) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/resnext.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/resnext.py new file mode 100644 index 0000000000..2370b7114a --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/resnext.py @@ -0,0 +1,148 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmcv.cnn import build_conv_layer, build_norm_layer + +from ..builder import BACKBONES +from .resnet import Bottleneck as _Bottleneck +from .resnet import ResLayer, ResNet + + +class Bottleneck(_Bottleneck): + """Bottleneck block for ResNeXt. + + Args: + in_channels (int): Input channels of this block. + out_channels (int): Output channels of this block. + groups (int): Groups of conv2. + width_per_group (int): Width per group of conv2. 64x4d indicates + ``groups=64, width_per_group=4`` and 32x8d indicates + ``groups=32, width_per_group=8``. + stride (int): stride of the block. Default: 1 + dilation (int): dilation of convolution. Default: 1 + downsample (nn.Module, optional): downsample operation on identity + branch. Default: None + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. + conv_cfg (dict, optional): dictionary to construct and config conv + layer. Default: None + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN') + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. + """ + + def __init__(self, + in_channels, + out_channels, + base_channels=64, + groups=32, + width_per_group=4, + **kwargs): + super(Bottleneck, self).__init__(in_channels, out_channels, **kwargs) + self.groups = groups + self.width_per_group = width_per_group + + # For ResNet bottleneck, middle channels are determined by expansion + # and out_channels, but for ResNeXt bottleneck, it is determined by + # groups and width_per_group and the stage it is located in. + if groups != 1: + assert self.mid_channels % base_channels == 0 + self.mid_channels = ( + groups * width_per_group * self.mid_channels // base_channels) + + self.norm1_name, norm1 = build_norm_layer( + self.norm_cfg, self.mid_channels, postfix=1) + self.norm2_name, norm2 = build_norm_layer( + self.norm_cfg, self.mid_channels, postfix=2) + self.norm3_name, norm3 = build_norm_layer( + self.norm_cfg, self.out_channels, postfix=3) + + self.conv1 = build_conv_layer( + self.conv_cfg, + self.in_channels, + self.mid_channels, + kernel_size=1, + stride=self.conv1_stride, + bias=False) + self.add_module(self.norm1_name, norm1) + self.conv2 = build_conv_layer( + self.conv_cfg, + self.mid_channels, + self.mid_channels, + kernel_size=3, + stride=self.conv2_stride, + padding=self.dilation, + dilation=self.dilation, + groups=groups, + bias=False) + + self.add_module(self.norm2_name, norm2) + self.conv3 = build_conv_layer( + self.conv_cfg, + self.mid_channels, + self.out_channels, + kernel_size=1, + bias=False) + self.add_module(self.norm3_name, norm3) + + +@BACKBONES.register_module() +class ResNeXt(ResNet): + """ResNeXt backbone. + + Please refer to the `paper `__ for + details. + + Args: + depth (int): Network depth, from {50, 101, 152}. + groups (int): Groups of conv2 in Bottleneck. Default: 32. + width_per_group (int): Width per group of conv2 in Bottleneck. + Default: 4. + in_channels (int): Number of input image channels. Default: 3. + stem_channels (int): Output channels of the stem layer. Default: 64. + num_stages (int): Stages of the network. Default: 4. + strides (Sequence[int]): Strides of the first block of each stage. + Default: ``(1, 2, 2, 2)``. + dilations (Sequence[int]): Dilation of each stage. + Default: ``(1, 1, 1, 1)``. + out_indices (Sequence[int]): Output from which stages. If only one + stage is specified, a single tensor (feature map) is returned, + otherwise multiple stages are specified, a tuple of tensors will + be returned. Default: ``(3, )``. + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. + deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv. + Default: False. + avg_down (bool): Use AvgPool instead of stride conv when + downsampling in the bottleneck. Default: False. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. Default: -1. + conv_cfg (dict | None): The config dict for conv layers. Default: None. + norm_cfg (dict): The config dict for norm layers. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + zero_init_residual (bool): Whether to use zero init for last norm layer + in resblocks to let them behave as identity. Default: True. + """ + + arch_settings = { + 50: (Bottleneck, (3, 4, 6, 3)), + 101: (Bottleneck, (3, 4, 23, 3)), + 152: (Bottleneck, (3, 8, 36, 3)) + } + + def __init__(self, depth, groups=32, width_per_group=4, **kwargs): + self.groups = groups + self.width_per_group = width_per_group + super(ResNeXt, self).__init__(depth, **kwargs) + + def make_res_layer(self, **kwargs): + return ResLayer( + groups=self.groups, + width_per_group=self.width_per_group, + base_channels=self.base_channels, + **kwargs) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/seresnet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/seresnet.py new file mode 100644 index 0000000000..0cfc5d1d2e --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/seresnet.py @@ -0,0 +1,125 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.utils.checkpoint as cp + +from ..builder import BACKBONES +from ..utils.se_layer import SELayer +from .resnet import Bottleneck, ResLayer, ResNet + + +class SEBottleneck(Bottleneck): + """SEBottleneck block for SEResNet. + + Args: + in_channels (int): The input channels of the SEBottleneck block. + out_channels (int): The output channel of the SEBottleneck block. + se_ratio (int): Squeeze ratio in SELayer. Default: 16 + """ + + def __init__(self, in_channels, out_channels, se_ratio=16, **kwargs): + super(SEBottleneck, self).__init__(in_channels, out_channels, **kwargs) + self.se_layer = SELayer(out_channels, ratio=se_ratio) + + def forward(self, x): + + def _inner_forward(x): + identity = x + + out = self.conv1(x) + out = self.norm1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.norm2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.norm3(out) + + out = self.se_layer(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + out = self.relu(out) + + return out + + +@BACKBONES.register_module() +class SEResNet(ResNet): + """SEResNet backbone. + + Please refer to the `paper `__ for + details. + + Args: + depth (int): Network depth, from {50, 101, 152}. + se_ratio (int): Squeeze ratio in SELayer. Default: 16. + in_channels (int): Number of input image channels. Default: 3. + stem_channels (int): Output channels of the stem layer. Default: 64. + num_stages (int): Stages of the network. Default: 4. + strides (Sequence[int]): Strides of the first block of each stage. + Default: ``(1, 2, 2, 2)``. + dilations (Sequence[int]): Dilation of each stage. + Default: ``(1, 1, 1, 1)``. + out_indices (Sequence[int]): Output from which stages. If only one + stage is specified, a single tensor (feature map) is returned, + otherwise multiple stages are specified, a tuple of tensors will + be returned. Default: ``(3, )``. + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. + deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv. + Default: False. + avg_down (bool): Use AvgPool instead of stride conv when + downsampling in the bottleneck. Default: False. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. Default: -1. + conv_cfg (dict | None): The config dict for conv layers. Default: None. + norm_cfg (dict): The config dict for norm layers. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + zero_init_residual (bool): Whether to use zero init for last norm layer + in resblocks to let them behave as identity. Default: True. + + Example: + >>> from mmcls.models import SEResNet + >>> import torch + >>> self = SEResNet(depth=50) + >>> self.eval() + >>> inputs = torch.rand(1, 3, 224, 224) + >>> level_outputs = self.forward(inputs) + >>> for level_out in level_outputs: + ... print(tuple(level_out.shape)) + (1, 64, 56, 56) + (1, 128, 28, 28) + (1, 256, 14, 14) + (1, 512, 7, 7) + """ + + arch_settings = { + 50: (SEBottleneck, (3, 4, 6, 3)), + 101: (SEBottleneck, (3, 4, 23, 3)), + 152: (SEBottleneck, (3, 8, 36, 3)) + } + + def __init__(self, depth, se_ratio=16, **kwargs): + if depth not in self.arch_settings: + raise KeyError(f'invalid depth {depth} for SEResNet') + self.se_ratio = se_ratio + super(SEResNet, self).__init__(depth, **kwargs) + + def make_res_layer(self, **kwargs): + return ResLayer(se_ratio=self.se_ratio, **kwargs) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/seresnext.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/seresnext.py new file mode 100644 index 0000000000..aff5cb4934 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/seresnext.py @@ -0,0 +1,155 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmcv.cnn import build_conv_layer, build_norm_layer + +from ..builder import BACKBONES +from .resnet import ResLayer +from .seresnet import SEBottleneck as _SEBottleneck +from .seresnet import SEResNet + + +class SEBottleneck(_SEBottleneck): + """SEBottleneck block for SEResNeXt. + + Args: + in_channels (int): Input channels of this block. + out_channels (int): Output channels of this block. + base_channels (int): Middle channels of the first stage. Default: 64. + groups (int): Groups of conv2. + width_per_group (int): Width per group of conv2. 64x4d indicates + ``groups=64, width_per_group=4`` and 32x8d indicates + ``groups=32, width_per_group=8``. + stride (int): stride of the block. Default: 1 + dilation (int): dilation of convolution. Default: 1 + downsample (nn.Module, optional): downsample operation on identity + branch. Default: None + se_ratio (int): Squeeze ratio in SELayer. Default: 16 + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. + conv_cfg (dict, optional): dictionary to construct and config conv + layer. Default: None + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN') + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. + """ + + def __init__(self, + in_channels, + out_channels, + base_channels=64, + groups=32, + width_per_group=4, + se_ratio=16, + **kwargs): + super(SEBottleneck, self).__init__(in_channels, out_channels, se_ratio, + **kwargs) + self.groups = groups + self.width_per_group = width_per_group + + # We follow the same rational of ResNext to compute mid_channels. + # For SEResNet bottleneck, middle channels are determined by expansion + # and out_channels, but for SEResNeXt bottleneck, it is determined by + # groups and width_per_group and the stage it is located in. + if groups != 1: + assert self.mid_channels % base_channels == 0 + self.mid_channels = ( + groups * width_per_group * self.mid_channels // base_channels) + + self.norm1_name, norm1 = build_norm_layer( + self.norm_cfg, self.mid_channels, postfix=1) + self.norm2_name, norm2 = build_norm_layer( + self.norm_cfg, self.mid_channels, postfix=2) + self.norm3_name, norm3 = build_norm_layer( + self.norm_cfg, self.out_channels, postfix=3) + + self.conv1 = build_conv_layer( + self.conv_cfg, + self.in_channels, + self.mid_channels, + kernel_size=1, + stride=self.conv1_stride, + bias=False) + self.add_module(self.norm1_name, norm1) + self.conv2 = build_conv_layer( + self.conv_cfg, + self.mid_channels, + self.mid_channels, + kernel_size=3, + stride=self.conv2_stride, + padding=self.dilation, + dilation=self.dilation, + groups=groups, + bias=False) + + self.add_module(self.norm2_name, norm2) + self.conv3 = build_conv_layer( + self.conv_cfg, + self.mid_channels, + self.out_channels, + kernel_size=1, + bias=False) + self.add_module(self.norm3_name, norm3) + + +@BACKBONES.register_module() +class SEResNeXt(SEResNet): + """SEResNeXt backbone. + + Please refer to the `paper `__ for + details. + + Args: + depth (int): Network depth, from {50, 101, 152}. + groups (int): Groups of conv2 in Bottleneck. Default: 32. + width_per_group (int): Width per group of conv2 in Bottleneck. + Default: 4. + se_ratio (int): Squeeze ratio in SELayer. Default: 16. + in_channels (int): Number of input image channels. Default: 3. + stem_channels (int): Output channels of the stem layer. Default: 64. + num_stages (int): Stages of the network. Default: 4. + strides (Sequence[int]): Strides of the first block of each stage. + Default: ``(1, 2, 2, 2)``. + dilations (Sequence[int]): Dilation of each stage. + Default: ``(1, 1, 1, 1)``. + out_indices (Sequence[int]): Output from which stages. If only one + stage is specified, a single tensor (feature map) is returned, + otherwise multiple stages are specified, a tuple of tensors will + be returned. Default: ``(3, )``. + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. + deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv. + Default: False. + avg_down (bool): Use AvgPool instead of stride conv when + downsampling in the bottleneck. Default: False. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. Default: -1. + conv_cfg (dict | None): The config dict for conv layers. Default: None. + norm_cfg (dict): The config dict for norm layers. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + zero_init_residual (bool): Whether to use zero init for last norm layer + in resblocks to let them behave as identity. Default: True. + """ + + arch_settings = { + 50: (SEBottleneck, (3, 4, 6, 3)), + 101: (SEBottleneck, (3, 4, 23, 3)), + 152: (SEBottleneck, (3, 8, 36, 3)) + } + + def __init__(self, depth, groups=32, width_per_group=4, **kwargs): + self.groups = groups + self.width_per_group = width_per_group + super(SEResNeXt, self).__init__(depth, **kwargs) + + def make_res_layer(self, **kwargs): + return ResLayer( + groups=self.groups, + width_per_group=self.width_per_group, + base_channels=self.base_channels, + **kwargs) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/shufflenet_v1.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/shufflenet_v1.py new file mode 100644 index 0000000000..0b6c70f08c --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/shufflenet_v1.py @@ -0,0 +1,321 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +import torch.utils.checkpoint as cp +from mmcv.cnn import (ConvModule, build_activation_layer, constant_init, + normal_init) +from mmcv.runner import BaseModule +from torch.nn.modules.batchnorm import _BatchNorm + +from mmcls.models.utils import channel_shuffle, make_divisible +from ..builder import BACKBONES +from .base_backbone import BaseBackbone + + +class ShuffleUnit(BaseModule): + """ShuffleUnit block. + + ShuffleNet unit with pointwise group convolution (GConv) and channel + shuffle. + + Args: + in_channels (int): The input channels of the ShuffleUnit. + out_channels (int): The output channels of the ShuffleUnit. + groups (int): The number of groups to be used in grouped 1x1 + convolutions in each ShuffleUnit. Default: 3 + first_block (bool): Whether it is the first ShuffleUnit of a + sequential ShuffleUnits. Default: True, which means not using the + grouped 1x1 convolution. + combine (str): The ways to combine the input and output + branches. Default: 'add'. + conv_cfg (dict, optional): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN'). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='ReLU'). + with_cp (bool): Use checkpoint or not. Using checkpoint + will save some memory while slowing down the training speed. + Default: False. + + Returns: + Tensor: The output tensor. + """ + + def __init__(self, + in_channels, + out_channels, + groups=3, + first_block=True, + combine='add', + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + with_cp=False): + super(ShuffleUnit, self).__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.first_block = first_block + self.combine = combine + self.groups = groups + self.bottleneck_channels = self.out_channels // 4 + self.with_cp = with_cp + + if self.combine == 'add': + self.depthwise_stride = 1 + self._combine_func = self._add + assert in_channels == out_channels, ( + 'in_channels must be equal to out_channels when combine ' + 'is add') + elif self.combine == 'concat': + self.depthwise_stride = 2 + self._combine_func = self._concat + self.out_channels -= self.in_channels + self.avgpool = nn.AvgPool2d(kernel_size=3, stride=2, padding=1) + else: + raise ValueError(f'Cannot combine tensors with {self.combine}. ' + 'Only "add" and "concat" are supported') + + self.first_1x1_groups = 1 if first_block else self.groups + self.g_conv_1x1_compress = ConvModule( + in_channels=self.in_channels, + out_channels=self.bottleneck_channels, + kernel_size=1, + groups=self.first_1x1_groups, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + self.depthwise_conv3x3_bn = ConvModule( + in_channels=self.bottleneck_channels, + out_channels=self.bottleneck_channels, + kernel_size=3, + stride=self.depthwise_stride, + padding=1, + groups=self.bottleneck_channels, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None) + + self.g_conv_1x1_expand = ConvModule( + in_channels=self.bottleneck_channels, + out_channels=self.out_channels, + kernel_size=1, + groups=self.groups, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None) + + self.act = build_activation_layer(act_cfg) + + @staticmethod + def _add(x, out): + # residual connection + return x + out + + @staticmethod + def _concat(x, out): + # concatenate along channel axis + return torch.cat((x, out), 1) + + def forward(self, x): + + def _inner_forward(x): + residual = x + + out = self.g_conv_1x1_compress(x) + out = self.depthwise_conv3x3_bn(out) + + if self.groups > 1: + out = channel_shuffle(out, self.groups) + + out = self.g_conv_1x1_expand(out) + + if self.combine == 'concat': + residual = self.avgpool(residual) + out = self.act(out) + out = self._combine_func(residual, out) + else: + out = self._combine_func(residual, out) + out = self.act(out) + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + return out + + +@BACKBONES.register_module() +class ShuffleNetV1(BaseBackbone): + """ShuffleNetV1 backbone. + + Args: + groups (int): The number of groups to be used in grouped 1x1 + convolutions in each ShuffleUnit. Default: 3. + widen_factor (float): Width multiplier - adjusts the number + of channels in each layer by this amount. Default: 1.0. + out_indices (Sequence[int]): Output from which stages. + Default: (2, ) + frozen_stages (int): Stages to be frozen (all param fixed). + Default: -1, which means not freezing any parameters. + conv_cfg (dict, optional): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN'). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='ReLU'). + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + """ + + def __init__(self, + groups=3, + widen_factor=1.0, + out_indices=(2, ), + frozen_stages=-1, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + norm_eval=False, + with_cp=False, + init_cfg=None): + super(ShuffleNetV1, self).__init__(init_cfg) + self.init_cfg = init_cfg + self.stage_blocks = [4, 8, 4] + self.groups = groups + + for index in out_indices: + if index not in range(0, 3): + raise ValueError('the item in out_indices must in ' + f'range(0, 3). But received {index}') + + if frozen_stages not in range(-1, 3): + raise ValueError('frozen_stages must be in range(-1, 3). ' + f'But received {frozen_stages}') + self.out_indices = out_indices + self.frozen_stages = frozen_stages + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.norm_eval = norm_eval + self.with_cp = with_cp + + if groups == 1: + channels = (144, 288, 576) + elif groups == 2: + channels = (200, 400, 800) + elif groups == 3: + channels = (240, 480, 960) + elif groups == 4: + channels = (272, 544, 1088) + elif groups == 8: + channels = (384, 768, 1536) + else: + raise ValueError(f'{groups} groups is not supported for 1x1 ' + 'Grouped Convolutions') + + channels = [make_divisible(ch * widen_factor, 8) for ch in channels] + + self.in_channels = int(24 * widen_factor) + + self.conv1 = ConvModule( + in_channels=3, + out_channels=self.in_channels, + kernel_size=3, + stride=2, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + + self.layers = nn.ModuleList() + for i, num_blocks in enumerate(self.stage_blocks): + first_block = True if i == 0 else False + layer = self.make_layer(channels[i], num_blocks, first_block) + self.layers.append(layer) + + def _freeze_stages(self): + if self.frozen_stages >= 0: + for param in self.conv1.parameters(): + param.requires_grad = False + for i in range(self.frozen_stages): + layer = self.layers[i] + layer.eval() + for param in layer.parameters(): + param.requires_grad = False + + def init_weights(self): + super(ShuffleNetV1, self).init_weights() + + if (isinstance(self.init_cfg, dict) + and self.init_cfg['type'] == 'Pretrained'): + # Suppress default init if use pretrained model. + return + + for name, m in self.named_modules(): + if isinstance(m, nn.Conv2d): + if 'conv1' in name: + normal_init(m, mean=0, std=0.01) + else: + normal_init(m, mean=0, std=1.0 / m.weight.shape[1]) + elif isinstance(m, (_BatchNorm, nn.GroupNorm)): + constant_init(m, val=1, bias=0.0001) + if isinstance(m, _BatchNorm): + if m.running_mean is not None: + nn.init.constant_(m.running_mean, 0) + + def make_layer(self, out_channels, num_blocks, first_block=False): + """Stack ShuffleUnit blocks to make a layer. + + Args: + out_channels (int): out_channels of the block. + num_blocks (int): Number of blocks. + first_block (bool): Whether is the first ShuffleUnit of a + sequential ShuffleUnits. Default: False, which means using + the grouped 1x1 convolution. + """ + layers = [] + for i in range(num_blocks): + first_block = first_block if i == 0 else False + combine_mode = 'concat' if i == 0 else 'add' + layers.append( + ShuffleUnit( + self.in_channels, + out_channels, + groups=self.groups, + first_block=first_block, + combine=combine_mode, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg, + with_cp=self.with_cp)) + self.in_channels = out_channels + + return nn.Sequential(*layers) + + def forward(self, x): + x = self.conv1(x) + x = self.maxpool(x) + + outs = [] + for i, layer in enumerate(self.layers): + x = layer(x) + if i in self.out_indices: + outs.append(x) + + return tuple(outs) + + def train(self, mode=True): + super(ShuffleNetV1, self).train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + if isinstance(m, _BatchNorm): + m.eval() diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/shufflenet_v2.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/shufflenet_v2.py new file mode 100644 index 0000000000..77a16e0034 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/shufflenet_v2.py @@ -0,0 +1,297 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +import torch.utils.checkpoint as cp +from mmcv.cnn import ConvModule, constant_init, normal_init +from mmcv.runner import BaseModule +from torch.nn.modules.batchnorm import _BatchNorm + +from mmcls.models.utils import channel_shuffle +from ..builder import BACKBONES +from .base_backbone import BaseBackbone + + +class InvertedResidual(BaseModule): + """InvertedResidual block for ShuffleNetV2 backbone. + + Args: + in_channels (int): The input channels of the block. + out_channels (int): The output channels of the block. + stride (int): Stride of the 3x3 convolution layer. Default: 1 + conv_cfg (dict, optional): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN'). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='ReLU'). + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + + Returns: + Tensor: The output tensor. + """ + + def __init__(self, + in_channels, + out_channels, + stride=1, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + with_cp=False, + init_cfg=None): + super(InvertedResidual, self).__init__(init_cfg) + self.stride = stride + self.with_cp = with_cp + + branch_features = out_channels // 2 + if self.stride == 1: + assert in_channels == branch_features * 2, ( + f'in_channels ({in_channels}) should equal to ' + f'branch_features * 2 ({branch_features * 2}) ' + 'when stride is 1') + + if in_channels != branch_features * 2: + assert self.stride != 1, ( + f'stride ({self.stride}) should not equal 1 when ' + f'in_channels != branch_features * 2') + + if self.stride > 1: + self.branch1 = nn.Sequential( + ConvModule( + in_channels, + in_channels, + kernel_size=3, + stride=self.stride, + padding=1, + groups=in_channels, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None), + ConvModule( + in_channels, + branch_features, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg), + ) + + self.branch2 = nn.Sequential( + ConvModule( + in_channels if (self.stride > 1) else branch_features, + branch_features, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg), + ConvModule( + branch_features, + branch_features, + kernel_size=3, + stride=self.stride, + padding=1, + groups=branch_features, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None), + ConvModule( + branch_features, + branch_features, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + + def forward(self, x): + + def _inner_forward(x): + if self.stride > 1: + out = torch.cat((self.branch1(x), self.branch2(x)), dim=1) + else: + x1, x2 = x.chunk(2, dim=1) + out = torch.cat((x1, self.branch2(x2)), dim=1) + + out = channel_shuffle(out, 2) + + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + return out + + +@BACKBONES.register_module() +class ShuffleNetV2(BaseBackbone): + """ShuffleNetV2 backbone. + + Args: + widen_factor (float): Width multiplier - adjusts the number of + channels in each layer by this amount. Default: 1.0. + out_indices (Sequence[int]): Output from which stages. + Default: (0, 1, 2, 3). + frozen_stages (int): Stages to be frozen (all param fixed). + Default: -1, which means not freezing any parameters. + conv_cfg (dict, optional): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN'). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='ReLU'). + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + """ + + def __init__(self, + widen_factor=1.0, + out_indices=(3, ), + frozen_stages=-1, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + norm_eval=False, + with_cp=False, + init_cfg=None): + super(ShuffleNetV2, self).__init__(init_cfg) + self.stage_blocks = [4, 8, 4] + for index in out_indices: + if index not in range(0, 4): + raise ValueError('the item in out_indices must in ' + f'range(0, 4). But received {index}') + + if frozen_stages not in range(-1, 4): + raise ValueError('frozen_stages must be in range(-1, 4). ' + f'But received {frozen_stages}') + self.out_indices = out_indices + self.frozen_stages = frozen_stages + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.norm_eval = norm_eval + self.with_cp = with_cp + + if widen_factor == 0.5: + channels = [48, 96, 192, 1024] + elif widen_factor == 1.0: + channels = [116, 232, 464, 1024] + elif widen_factor == 1.5: + channels = [176, 352, 704, 1024] + elif widen_factor == 2.0: + channels = [244, 488, 976, 2048] + else: + raise ValueError('widen_factor must be in [0.5, 1.0, 1.5, 2.0]. ' + f'But received {widen_factor}') + + self.in_channels = 24 + self.conv1 = ConvModule( + in_channels=3, + out_channels=self.in_channels, + kernel_size=3, + stride=2, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + + self.layers = nn.ModuleList() + for i, num_blocks in enumerate(self.stage_blocks): + layer = self._make_layer(channels[i], num_blocks) + self.layers.append(layer) + + output_channels = channels[-1] + self.layers.append( + ConvModule( + in_channels=self.in_channels, + out_channels=output_channels, + kernel_size=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + + def _make_layer(self, out_channels, num_blocks): + """Stack blocks to make a layer. + + Args: + out_channels (int): out_channels of the block. + num_blocks (int): number of blocks. + """ + layers = [] + for i in range(num_blocks): + stride = 2 if i == 0 else 1 + layers.append( + InvertedResidual( + in_channels=self.in_channels, + out_channels=out_channels, + stride=stride, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg, + with_cp=self.with_cp)) + self.in_channels = out_channels + + return nn.Sequential(*layers) + + def _freeze_stages(self): + if self.frozen_stages >= 0: + for param in self.conv1.parameters(): + param.requires_grad = False + + for i in range(self.frozen_stages): + m = self.layers[i] + m.eval() + for param in m.parameters(): + param.requires_grad = False + + def init_weights(self): + super(ShuffleNetV2, self).init_weights() + + if (isinstance(self.init_cfg, dict) + and self.init_cfg['type'] == 'Pretrained'): + # Suppress default init if use pretrained model. + return + + for name, m in self.named_modules(): + if isinstance(m, nn.Conv2d): + if 'conv1' in name: + normal_init(m, mean=0, std=0.01) + else: + normal_init(m, mean=0, std=1.0 / m.weight.shape[1]) + elif isinstance(m, (_BatchNorm, nn.GroupNorm)): + constant_init(m.weight, val=1, bias=0.0001) + if isinstance(m, _BatchNorm): + if m.running_mean is not None: + nn.init.constant_(m.running_mean, 0) + + def forward(self, x): + x = self.conv1(x) + x = self.maxpool(x) + + outs = [] + for i, layer in enumerate(self.layers): + x = layer(x) + if i in self.out_indices: + outs.append(x) + + return tuple(outs) + + def train(self, mode=True): + super(ShuffleNetV2, self).train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + if isinstance(m, nn.BatchNorm2d): + m.eval() diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/swin_transformer.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/swin_transformer.py new file mode 100644 index 0000000000..f94bded430 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/swin_transformer.py @@ -0,0 +1,401 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from copy import deepcopy +from typing import Sequence + +import torch +import torch.nn as nn +from mmcv.cnn import build_norm_layer +from mmcv.cnn.bricks.transformer import FFN +from mmcv.cnn.utils.weight_init import trunc_normal_ +from mmcv.runner.base_module import BaseModule, ModuleList + +from ..builder import BACKBONES +from ..utils import PatchEmbed, PatchMerging, ShiftWindowMSA +from .base_backbone import BaseBackbone + + +class SwinBlock(BaseModule): + """Swin Transformer block. + + Args: + embed_dims (int): Number of input channels. + input_resolution (Tuple[int, int]): The resolution of the input feature + map. + num_heads (int): Number of attention heads. + window_size (int, optional): The height and width of the window. + Defaults to 7. + shift (bool, optional): Shift the attention window or not. + Defaults to False. + ffn_ratio (float, optional): The expansion ratio of feedforward network + hidden layer channels. Defaults to 4. + drop_path (float, optional): The drop path rate after attention and + ffn. Defaults to 0. + attn_cfgs (dict, optional): The extra config of Shift Window-MSA. + Defaults to empty dict. + ffn_cfgs (dict, optional): The extra config of FFN. + Defaults to empty dict. + norm_cfg (dict, optional): The config of norm layers. + Defaults to dict(type='LN'). + auto_pad (bool, optional): Auto pad the feature map to be divisible by + window_size, Defaults to False. + init_cfg (dict, optional): The extra config for initialization. + Default: None. + """ + + def __init__(self, + embed_dims, + input_resolution, + num_heads, + window_size=7, + shift=False, + ffn_ratio=4., + drop_path=0., + attn_cfgs=dict(), + ffn_cfgs=dict(), + norm_cfg=dict(type='LN'), + auto_pad=False, + init_cfg=None): + + super(SwinBlock, self).__init__(init_cfg) + + _attn_cfgs = { + 'embed_dims': embed_dims, + 'input_resolution': input_resolution, + 'num_heads': num_heads, + 'shift_size': window_size // 2 if shift else 0, + 'window_size': window_size, + 'dropout_layer': dict(type='DropPath', drop_prob=drop_path), + 'auto_pad': auto_pad, + **attn_cfgs + } + self.norm1 = build_norm_layer(norm_cfg, embed_dims)[1] + self.attn = ShiftWindowMSA(**_attn_cfgs) + + _ffn_cfgs = { + 'embed_dims': embed_dims, + 'feedforward_channels': int(embed_dims * ffn_ratio), + 'num_fcs': 2, + 'ffn_drop': 0, + 'dropout_layer': dict(type='DropPath', drop_prob=drop_path), + 'act_cfg': dict(type='GELU'), + **ffn_cfgs + } + self.norm2 = build_norm_layer(norm_cfg, embed_dims)[1] + self.ffn = FFN(**_ffn_cfgs) + + def forward(self, x): + identity = x + x = self.norm1(x) + x = self.attn(x) + x = x + identity + + identity = x + x = self.norm2(x) + x = self.ffn(x, identity=identity) + return x + + +class SwinBlockSequence(BaseModule): + """Module with successive Swin Transformer blocks and downsample layer. + + Args: + embed_dims (int): Number of input channels. + input_resolution (Tuple[int, int]): The resolution of the input feature + map. + depth (int): Number of successive swin transformer blocks. + num_heads (int): Number of attention heads. + downsample (bool, optional): Downsample the output of blocks by patch + merging. Defaults to False. + downsample_cfg (dict, optional): The extra config of the patch merging + layer. Defaults to empty dict. + drop_paths (Sequence[float] | float, optional): The drop path rate in + each block. Defaults to 0. + block_cfgs (Sequence[dict] | dict, optional): The extra config of each + block. Defaults to empty dicts. + auto_pad (bool, optional): Auto pad the feature map to be divisible by + window_size, Defaults to False. + init_cfg (dict, optional): The extra config for initialization. + Default: None. + """ + + def __init__(self, + embed_dims, + input_resolution, + depth, + num_heads, + downsample=False, + downsample_cfg=dict(), + drop_paths=0., + block_cfgs=dict(), + auto_pad=False, + init_cfg=None): + super().__init__(init_cfg) + + if not isinstance(drop_paths, Sequence): + drop_paths = [drop_paths] * depth + + if not isinstance(block_cfgs, Sequence): + block_cfgs = [deepcopy(block_cfgs) for _ in range(depth)] + + self.embed_dims = embed_dims + self.input_resolution = input_resolution + self.blocks = ModuleList() + for i in range(depth): + _block_cfg = { + 'embed_dims': embed_dims, + 'input_resolution': input_resolution, + 'num_heads': num_heads, + 'shift': False if i % 2 == 0 else True, + 'drop_path': drop_paths[i], + 'auto_pad': auto_pad, + **block_cfgs[i] + } + block = SwinBlock(**_block_cfg) + self.blocks.append(block) + + if downsample: + _downsample_cfg = { + 'input_resolution': input_resolution, + 'in_channels': embed_dims, + 'expansion_ratio': 2, + 'norm_cfg': dict(type='LN'), + **downsample_cfg + } + self.downsample = PatchMerging(**_downsample_cfg) + else: + self.downsample = None + + def forward(self, x): + for block in self.blocks: + x = block(x) + + if self.downsample: + x = self.downsample(x) + return x + + @property + def out_resolution(self): + if self.downsample: + return self.downsample.output_resolution + else: + return self.input_resolution + + @property + def out_channels(self): + if self.downsample: + return self.downsample.out_channels + else: + return self.embed_dims + + +@BACKBONES.register_module() +class SwinTransformer(BaseBackbone): + """ Swin Transformer + A PyTorch implement of : `Swin Transformer: + Hierarchical Vision Transformer using Shifted Windows + `_ + + Inspiration from + https://github.com/microsoft/Swin-Transformer + + Args: + arch (str | dict): Swin Transformer architecture + Defaults to 'T'. + img_size (int | tuple): The size of input image. + Defaults to 224. + in_channels (int): The num of input channels. + Defaults to 3. + drop_rate (float): Dropout rate after embedding. + Defaults to 0. + drop_path_rate (float): Stochastic depth rate. + Defaults to 0.1. + use_abs_pos_embed (bool): If True, add absolute position embedding to + the patch embedding. Defaults to False. + auto_pad (bool): If True, auto pad feature map to fit window_size. + Defaults to False. + norm_cfg (dict, optional): Config dict for normalization layer at end + of backone. Defaults to dict(type='LN') + stage_cfgs (Sequence | dict, optional): Extra config dict for each + stage. Defaults to empty dict. + patch_cfg (dict, optional): Extra config dict for patch embedding. + Defaults to empty dict. + init_cfg (dict, optional): The Config for initialization. + Defaults to None. + + Examples: + >>> from mmcls.models import SwinTransformer + >>> import torch + >>> extra_config = dict( + >>> arch='tiny', + >>> stage_cfgs=dict(downsample_cfg={'kernel_size': 3, + >>> 'expansion_ratio': 3}), + >>> auto_pad=True) + >>> self = SwinTransformer(**extra_config) + >>> inputs = torch.rand(1, 3, 224, 224) + >>> output = self.forward(inputs) + >>> print(output.shape) + (1, 2592, 4) + """ + arch_zoo = { + **dict.fromkeys(['t', 'tiny'], + {'embed_dims': 96, + 'depths': [2, 2, 6, 2], + 'num_heads': [3, 6, 12, 24]}), + **dict.fromkeys(['s', 'small'], + {'embed_dims': 96, + 'depths': [2, 2, 18, 2], + 'num_heads': [3, 6, 12, 24]}), + **dict.fromkeys(['b', 'base'], + {'embed_dims': 128, + 'depths': [2, 2, 18, 2], + 'num_heads': [4, 8, 16, 32]}), + **dict.fromkeys(['l', 'large'], + {'embed_dims': 192, + 'depths': [2, 2, 18, 2], + 'num_heads': [6, 12, 24, 48]}), + } # yapf: disable + + _version = 2 + + def __init__(self, + arch='T', + img_size=224, + in_channels=3, + drop_rate=0., + drop_path_rate=0.1, + out_indices=(3, ), + use_abs_pos_embed=False, + auto_pad=False, + norm_cfg=dict(type='LN'), + stage_cfgs=dict(), + patch_cfg=dict(), + init_cfg=None): + super(SwinTransformer, self).__init__(init_cfg) + + if isinstance(arch, str): + arch = arch.lower() + assert arch in set(self.arch_zoo), \ + f'Arch {arch} is not in default archs {set(self.arch_zoo)}' + self.arch_settings = self.arch_zoo[arch] + else: + essential_keys = {'embed_dims', 'depths', 'num_head'} + assert isinstance(arch, dict) and set(arch) == essential_keys, \ + f'Custom arch needs a dict with keys {essential_keys}' + self.arch_settings = arch + + self.embed_dims = self.arch_settings['embed_dims'] + self.depths = self.arch_settings['depths'] + self.num_heads = self.arch_settings['num_heads'] + self.num_layers = len(self.depths) + self.out_indices = out_indices + self.use_abs_pos_embed = use_abs_pos_embed + self.auto_pad = auto_pad + + _patch_cfg = { + 'img_size': img_size, + 'in_channels': in_channels, + 'embed_dims': self.embed_dims, + 'conv_cfg': dict(type='Conv2d', kernel_size=4, stride=4), + 'norm_cfg': dict(type='LN'), + **patch_cfg + } + self.patch_embed = PatchEmbed(**_patch_cfg) + num_patches = self.patch_embed.num_patches + patches_resolution = self.patch_embed.patches_resolution + self.patches_resolution = patches_resolution + + if self.use_abs_pos_embed: + self.absolute_pos_embed = nn.Parameter( + torch.zeros(1, num_patches, self.embed_dims)) + + self.drop_after_pos = nn.Dropout(p=drop_rate) + + # stochastic depth + total_depth = sum(self.depths) + dpr = [ + x.item() for x in torch.linspace(0, drop_path_rate, total_depth) + ] # stochastic depth decay rule + + self.stages = ModuleList() + embed_dims = self.embed_dims + input_resolution = patches_resolution + for i, (depth, + num_heads) in enumerate(zip(self.depths, self.num_heads)): + if isinstance(stage_cfgs, Sequence): + stage_cfg = stage_cfgs[i] + else: + stage_cfg = deepcopy(stage_cfgs) + downsample = True if i < self.num_layers - 1 else False + _stage_cfg = { + 'embed_dims': embed_dims, + 'depth': depth, + 'num_heads': num_heads, + 'downsample': downsample, + 'input_resolution': input_resolution, + 'drop_paths': dpr[:depth], + 'auto_pad': auto_pad, + **stage_cfg + } + + stage = SwinBlockSequence(**_stage_cfg) + self.stages.append(stage) + + dpr = dpr[depth:] + embed_dims = stage.out_channels + input_resolution = stage.out_resolution + + for i in out_indices: + if norm_cfg is not None: + norm_layer = build_norm_layer(norm_cfg, embed_dims)[1] + else: + norm_layer = nn.Identity() + + self.add_module(f'norm{i}', norm_layer) + + def init_weights(self): + super(SwinTransformer, self).init_weights() + + if (isinstance(self.init_cfg, dict) + and self.init_cfg['type'] == 'Pretrained'): + # Suppress default init if use pretrained model. + return + + if self.use_abs_pos_embed: + trunc_normal_(self.absolute_pos_embed, std=0.02) + + def forward(self, x): + x = self.patch_embed(x) + if self.use_abs_pos_embed: + x = x + self.absolute_pos_embed + x = self.drop_after_pos(x) + + outs = [] + for i, stage in enumerate(self.stages): + x = stage(x) + if i in self.out_indices: + norm_layer = getattr(self, f'norm{i}') + out = norm_layer(x) + out = out.view(-1, *stage.out_resolution, + stage.out_channels).permute(0, 3, 1, + 2).contiguous() + outs.append(out) + + return tuple(outs) + + def _load_from_state_dict(self, state_dict, prefix, local_metadata, *args, + **kwargs): + """load checkpoints.""" + # Names of some parameters in has been changed. + version = local_metadata.get('version', None) + if (version is None + or version < 2) and self.__class__ is SwinTransformer: + final_stage_num = len(self.stages) - 1 + state_dict_keys = list(state_dict.keys()) + for k in state_dict_keys: + if k.startswith('norm.') or k.startswith('backbone.norm.'): + convert_key = k.replace('norm.', f'norm{final_stage_num}.') + state_dict[convert_key] = state_dict[k] + del state_dict[k] + + super()._load_from_state_dict(state_dict, prefix, local_metadata, + *args, **kwargs) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/t2t_vit.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/t2t_vit.py new file mode 100644 index 0000000000..2e9cb527ff --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/t2t_vit.py @@ -0,0 +1,367 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from copy import deepcopy +from typing import Sequence + +import numpy as np +import torch +import torch.nn as nn +from mmcv.cnn import build_norm_layer +from mmcv.cnn.bricks.transformer import FFN +from mmcv.cnn.utils.weight_init import trunc_normal_ +from mmcv.runner.base_module import BaseModule, ModuleList + +from ..builder import BACKBONES +from ..utils import MultiheadAttention +from .base_backbone import BaseBackbone + + +class T2TTransformerLayer(BaseModule): + """Transformer Layer for T2T_ViT. + + Comparing with :obj:`TransformerEncoderLayer` in ViT, it supports + different ``input_dims`` and ``embed_dims``. + + Args: + embed_dims (int): The feature dimension. + num_heads (int): Parallel attention heads. + feedforward_channels (int): The hidden dimension for FFNs + input_dims (int, optional): The input token dimension. + Defaults to None. + drop_rate (float): Probability of an element to be zeroed + after the feed forward layer. Defaults to 0. + attn_drop_rate (float): The drop out rate for attention output weights. + Defaults to 0. + drop_path_rate (float): Stochastic depth rate. Defaults to 0. + num_fcs (int): The number of fully-connected layers for FFNs. + Defaults to 2. + qkv_bias (bool): enable bias for qkv if True. Defaults to True. + qk_scale (float, optional): Override default qk scale of + ``(input_dims // num_heads) ** -0.5`` if set. Defaults to None. + act_cfg (dict): The activation config for FFNs. + Defaluts to ``dict(type='GELU')``. + norm_cfg (dict): Config dict for normalization layer. + Defaults to ``dict(type='LN')``. + init_cfg (dict, optional): Initialization config dict. + Defaults to None. + + Notes: + In general, ``qk_scale`` should be ``head_dims ** -0.5``, i.e. + ``(embed_dims // num_heads) ** -0.5``. However, in the official + code, it uses ``(input_dims // num_heads) ** -0.5``, so here we + keep the same with the official implementation. + """ + + def __init__(self, + embed_dims, + num_heads, + feedforward_channels, + input_dims=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + num_fcs=2, + qkv_bias=False, + qk_scale=None, + act_cfg=dict(type='GELU'), + norm_cfg=dict(type='LN'), + init_cfg=None): + super(T2TTransformerLayer, self).__init__(init_cfg=init_cfg) + + self.v_shortcut = True if input_dims is not None else False + input_dims = input_dims or embed_dims + + self.norm1_name, norm1 = build_norm_layer( + norm_cfg, input_dims, postfix=1) + self.add_module(self.norm1_name, norm1) + + self.attn = MultiheadAttention( + input_dims=input_dims, + embed_dims=embed_dims, + num_heads=num_heads, + attn_drop=attn_drop_rate, + proj_drop=drop_rate, + dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate), + qkv_bias=qkv_bias, + qk_scale=qk_scale or (input_dims // num_heads)**-0.5, + v_shortcut=self.v_shortcut) + + self.norm2_name, norm2 = build_norm_layer( + norm_cfg, embed_dims, postfix=2) + self.add_module(self.norm2_name, norm2) + + self.ffn = FFN( + embed_dims=embed_dims, + feedforward_channels=feedforward_channels, + num_fcs=num_fcs, + ffn_drop=drop_rate, + dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate), + act_cfg=act_cfg) + + @property + def norm1(self): + return getattr(self, self.norm1_name) + + @property + def norm2(self): + return getattr(self, self.norm2_name) + + def forward(self, x): + if self.v_shortcut: + x = self.attn(self.norm1(x)) + else: + x = x + self.attn(self.norm1(x)) + x = self.ffn(self.norm2(x), identity=x) + return x + + +class T2TModule(BaseModule): + """Tokens-to-Token module. + + "Tokens-to-Token module" (T2T Module) can model the local structure + information of images and reduce the length of tokens progressively. + + Args: + img_size (int): Input image size + in_channels (int): Number of input channels + embed_dims (int): Embedding dimension + token_dims (int): Tokens dimension in T2TModuleAttention. + use_performer (bool): If True, use Performer version self-attention to + adopt regular self-attention. Defaults to False. + init_cfg (dict, optional): The extra config for initialization. + Default: None. + + Notes: + Usually, ``token_dim`` is set as a small value (32 or 64) to reduce + MACs + """ + + def __init__( + self, + img_size=224, + in_channels=3, + embed_dims=384, + token_dims=64, + use_performer=False, + init_cfg=None, + ): + super(T2TModule, self).__init__(init_cfg) + + self.embed_dims = embed_dims + + self.soft_split0 = nn.Unfold( + kernel_size=(7, 7), stride=(4, 4), padding=(2, 2)) + self.soft_split1 = nn.Unfold( + kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)) + self.soft_split2 = nn.Unfold( + kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)) + + if not use_performer: + self.attention1 = T2TTransformerLayer( + input_dims=in_channels * 7 * 7, + embed_dims=token_dims, + num_heads=1, + feedforward_channels=token_dims) + + self.attention2 = T2TTransformerLayer( + input_dims=token_dims * 3 * 3, + embed_dims=token_dims, + num_heads=1, + feedforward_channels=token_dims) + + self.project = nn.Linear(token_dims * 3 * 3, embed_dims) + else: + raise NotImplementedError("Performer hasn't been implemented.") + + # there are 3 soft split, stride are 4,2,2 separately + self.num_patches = (img_size // (4 * 2 * 2))**2 + + def forward(self, x): + # step0: soft split + x = self.soft_split0(x).transpose(1, 2) + + for step in [1, 2]: + # re-structurization/reconstruction + attn = getattr(self, f'attention{step}') + x = attn(x).transpose(1, 2) + B, C, new_HW = x.shape + x = x.reshape(B, C, int(np.sqrt(new_HW)), int(np.sqrt(new_HW))) + + # soft split + soft_split = getattr(self, f'soft_split{step}') + x = soft_split(x).transpose(1, 2) + + # final tokens + x = self.project(x) + return x + + +def get_sinusoid_encoding(n_position, embed_dims): + """Generate sinusoid encoding table. + + Sinusoid encoding is a kind of relative position encoding method came from + `Attention Is All You Need`_. + + Args: + n_position (int): The length of the input token. + embed_dims (int): The position embedding dimension. + + Returns: + :obj:`torch.FloatTensor`: The sinusoid encoding table. + """ + + def get_position_angle_vec(position): + return [ + position / np.power(10000, 2 * (i // 2) / embed_dims) + for i in range(embed_dims) + ] + + sinusoid_table = np.array( + [get_position_angle_vec(pos) for pos in range(n_position)]) + sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i + sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1 + + return torch.FloatTensor(sinusoid_table).unsqueeze(0) + + +@BACKBONES.register_module() +class T2T_ViT(BaseBackbone): + """Tokens-to-Token Vision Transformer (T2T-ViT) + + A PyTorch implementation of `Tokens-to-Token ViT: Training Vision + Transformers from Scratch on ImageNet`_ + + Args: + img_size (int): Input image size. + in_channels (int): Number of input channels. + embed_dims (int): Embedding dimension. + t2t_cfg (dict): Extra config of Tokens-to-Token module. + Defaults to an empty dict. + drop_rate (float): Dropout rate after position embedding. + Defaults to 0. + num_layers (int): Num of transformer layers in encoder. + Defaults to 14. + out_indices (Sequence | int): Output from which stages. + Defaults to -1, means the last stage. + layer_cfgs (Sequence | dict): Configs of each transformer layer in + encoder. Defaults to an empty dict. + drop_path_rate (float): stochastic depth rate. Defaults to 0. + norm_cfg (dict): Config dict for normalization layer. Defaults to + ``dict(type='LN')``. + final_norm (bool): Whether to add a additional layer to normalize + final feature map. Defaults to True. + output_cls_token (bool): Whether output the cls_token. + Defaults to True. + init_cfg (dict, optional): The Config for initialization. + Defaults to None. + """ + + def __init__(self, + img_size=224, + in_channels=3, + embed_dims=384, + t2t_cfg=dict(), + drop_rate=0., + num_layers=14, + out_indices=-1, + layer_cfgs=dict(), + drop_path_rate=0., + norm_cfg=dict(type='LN'), + final_norm=True, + output_cls_token=True, + init_cfg=None): + super(T2T_ViT, self).__init__(init_cfg) + + # Token-to-Token Module + self.tokens_to_token = T2TModule( + img_size=img_size, + in_channels=in_channels, + embed_dims=embed_dims, + **t2t_cfg) + num_patches = self.tokens_to_token.num_patches + + # Class token + self.output_cls_token = output_cls_token + self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dims)) + + # Position Embedding + sinusoid_table = get_sinusoid_encoding(num_patches + 1, embed_dims) + self.register_buffer('pos_embed', sinusoid_table) + self.drop_after_pos = nn.Dropout(p=drop_rate) + + if isinstance(out_indices, int): + out_indices = [out_indices] + assert isinstance(out_indices, Sequence), \ + f'"out_indices" must by a sequence or int, ' \ + f'get {type(out_indices)} instead.' + for i, index in enumerate(out_indices): + if index < 0: + out_indices[i] = num_layers + index + assert out_indices[i] >= 0, f'Invalid out_indices {index}' + self.out_indices = out_indices + + dpr = [x for x in np.linspace(0, drop_path_rate, num_layers)] + self.encoder = ModuleList() + for i in range(num_layers): + if isinstance(layer_cfgs, Sequence): + layer_cfg = layer_cfgs[i] + else: + layer_cfg = deepcopy(layer_cfgs) + layer_cfg = { + 'embed_dims': embed_dims, + 'num_heads': 6, + 'feedforward_channels': 3 * embed_dims, + 'drop_path_rate': dpr[i], + 'qkv_bias': False, + 'norm_cfg': norm_cfg, + **layer_cfg + } + + layer = T2TTransformerLayer(**layer_cfg) + self.encoder.append(layer) + + self.final_norm = final_norm + if final_norm: + self.norm = build_norm_layer(norm_cfg, embed_dims)[1] + else: + self.norm = nn.Identity() + + def init_weights(self): + super().init_weights() + + if (isinstance(self.init_cfg, dict) + and self.init_cfg['type'] == 'Pretrained'): + # Suppress custom init if use pretrained model. + return + + trunc_normal_(self.cls_token, std=.02) + + def forward(self, x): + B = x.shape[0] + x = self.tokens_to_token(x) + num_patches = self.tokens_to_token.num_patches + patch_resolution = [int(np.sqrt(num_patches))] * 2 + + cls_tokens = self.cls_token.expand(B, -1, -1) + x = torch.cat((cls_tokens, x), dim=1) + x = x + self.pos_embed + x = self.drop_after_pos(x) + + outs = [] + for i, layer in enumerate(self.encoder): + x = layer(x) + + if i == len(self.encoder) - 1 and self.final_norm: + x = self.norm(x) + + if i in self.out_indices: + B, _, C = x.shape + patch_token = x[:, 1:].reshape(B, *patch_resolution, C) + patch_token = patch_token.permute(0, 3, 1, 2) + cls_token = x[:, 0] + if self.output_cls_token: + out = [patch_token, cls_token] + else: + out = patch_token + outs.append(out) + + return tuple(outs) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/timm_backbone.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/timm_backbone.py new file mode 100644 index 0000000000..3bc7f226b1 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/timm_backbone.py @@ -0,0 +1,57 @@ +# Copyright (c) OpenMMLab. All rights reserved. +try: + import timm +except ImportError: + timm = None + +from ..builder import BACKBONES +from .base_backbone import BaseBackbone + + +@BACKBONES.register_module() +class TIMMBackbone(BaseBackbone): + """Wrapper to use backbones from timm library. More details can be found in + `timm `_ . + + Args: + model_name (str): Name of timm model to instantiate. + pretrained (bool): Load pretrained weights if True. + checkpoint_path (str): Path of checkpoint to load after + model is initialized. + in_channels (int): Number of input image channels. Default: 3. + init_cfg (dict, optional): Initialization config dict + **kwargs: Other timm & model specific arguments. + """ + + def __init__( + self, + model_name, + pretrained=False, + checkpoint_path='', + in_channels=3, + init_cfg=None, + **kwargs, + ): + if timm is None: + raise RuntimeError('timm is not installed') + super(TIMMBackbone, self).__init__(init_cfg) + self.timm_model = timm.create_model( + model_name=model_name, + pretrained=pretrained, + in_chans=in_channels, + checkpoint_path=checkpoint_path, + **kwargs, + ) + + # Make unused parameters None + self.timm_model.global_pool = None + self.timm_model.fc = None + self.timm_model.classifier = None + + # Hack to use pretrained weights from timm + if pretrained or checkpoint_path: + self._is_init = True + + def forward(self, x): + features = self.timm_model.forward_features(x) + return (features, ) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/tnt.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/tnt.py new file mode 100644 index 0000000000..e0be2f2d85 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/tnt.py @@ -0,0 +1,367 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math + +import torch +import torch.nn as nn +from mmcv.cnn import build_norm_layer +from mmcv.cnn.bricks.transformer import FFN, MultiheadAttention +from mmcv.cnn.utils.weight_init import trunc_normal_ +from mmcv.runner.base_module import BaseModule, ModuleList + +from ..builder import BACKBONES +from ..utils import to_2tuple +from .base_backbone import BaseBackbone + + +class TransformerBlock(BaseModule): + """Implement a transformer block in TnTLayer. + + Args: + embed_dims (int): The feature dimension + num_heads (int): Parallel attention heads + ffn_ratio (int): A ratio to calculate the hidden_dims in ffn layer. + Default: 4 + drop_rate (float): Probability of an element to be zeroed + after the feed forward layer. Default 0. + attn_drop_rate (float): The drop out rate for attention layer. + Default 0. + drop_path_rate (float): stochastic depth rate. Default 0. + num_fcs (int): The number of fully-connected layers for FFNs. Default 2 + qkv_bias (bool): Enable bias for qkv if True. Default False + act_cfg (dict): The activation config for FFNs. Defaults to GELU. + norm_cfg (dict): Config dict for normalization layer. Default + layer normalization + batch_first (bool): Key, Query and Value are shape of + (batch, n, embed_dim) or (n, batch, embed_dim). + (batch, n, embed_dim) is common case in CV. Default to False + init_cfg (dict, optional): Initialization config dict. Default to None + """ + + def __init__(self, + embed_dims, + num_heads, + ffn_ratio=4, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + num_fcs=2, + qkv_bias=False, + act_cfg=dict(type='GELU'), + norm_cfg=dict(type='LN'), + batch_first=True, + init_cfg=None): + super(TransformerBlock, self).__init__(init_cfg=init_cfg) + + self.norm_attn = build_norm_layer(norm_cfg, embed_dims)[1] + self.attn = MultiheadAttention( + embed_dims=embed_dims, + num_heads=num_heads, + attn_drop=attn_drop_rate, + proj_drop=drop_rate, + dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate), + batch_first=batch_first) + + self.norm_ffn = build_norm_layer(norm_cfg, embed_dims)[1] + self.ffn = FFN( + embed_dims=embed_dims, + feedforward_channels=embed_dims * ffn_ratio, + num_fcs=num_fcs, + ffn_drop=drop_rate, + dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate), + act_cfg=act_cfg) + + if not qkv_bias: + self.attn.attn.in_proj_bias = None + + def forward(self, x): + x = self.attn(self.norm_attn(x), identity=x) + x = self.ffn(self.norm_ffn(x), identity=x) + return x + + +class TnTLayer(BaseModule): + """Implement one encoder layer in Transformer in Transformer. + + Args: + num_pixel (int): The pixel number in target patch transformed with + a linear projection in inner transformer + embed_dims_inner (int): Feature dimension in inner transformer block + embed_dims_outer (int): Feature dimension in outer transformer block + num_heads_inner (int): Parallel attention heads in inner transformer. + num_heads_outer (int): Parallel attention heads in outer transformer. + inner_block_cfg (dict): Extra config of inner transformer block. + Defaults to empty dict. + outer_block_cfg (dict): Extra config of outer transformer block. + Defaults to empty dict. + norm_cfg (dict): Config dict for normalization layer. Default + layer normalization + init_cfg (dict, optional): Initialization config dict. Default to None + """ + + def __init__(self, + num_pixel, + embed_dims_inner, + embed_dims_outer, + num_heads_inner, + num_heads_outer, + inner_block_cfg=dict(), + outer_block_cfg=dict(), + norm_cfg=dict(type='LN'), + init_cfg=None): + super(TnTLayer, self).__init__(init_cfg=init_cfg) + + self.inner_block = TransformerBlock( + embed_dims=embed_dims_inner, + num_heads=num_heads_inner, + **inner_block_cfg) + + self.norm_proj = build_norm_layer(norm_cfg, embed_dims_inner)[1] + self.projection = nn.Linear( + embed_dims_inner * num_pixel, embed_dims_outer, bias=True) + + self.outer_block = TransformerBlock( + embed_dims=embed_dims_outer, + num_heads=num_heads_outer, + **outer_block_cfg) + + def forward(self, pixel_embed, patch_embed): + pixel_embed = self.inner_block(pixel_embed) + + B, N, C = patch_embed.size() + patch_embed[:, 1:] = patch_embed[:, 1:] + self.projection( + self.norm_proj(pixel_embed).reshape(B, N - 1, -1)) + patch_embed = self.outer_block(patch_embed) + + return pixel_embed, patch_embed + + +class PixelEmbed(BaseModule): + """Image to Pixel Embedding. + + Args: + img_size (int | tuple): The size of input image + patch_size (int): The size of one patch + in_channels (int): The num of input channels + embed_dims_inner (int): The num of channels of the target patch + transformed with a linear projection in inner transformer + stride (int): The stride of the conv2d layer. We use a conv2d layer + and a unfold layer to implement image to pixel embedding. + init_cfg (dict, optional): Initialization config dict + """ + + def __init__(self, + img_size=224, + patch_size=16, + in_channels=3, + embed_dims_inner=48, + stride=4, + init_cfg=None): + super(PixelEmbed, self).__init__(init_cfg=init_cfg) + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + # patches_resolution property necessary for resizing + # positional embedding + patches_resolution = [ + img_size[0] // patch_size[0], img_size[1] // patch_size[1] + ] + num_patches = patches_resolution[0] * patches_resolution[1] + + self.img_size = img_size + self.num_patches = num_patches + self.embed_dims_inner = embed_dims_inner + + new_patch_size = [math.ceil(ps / stride) for ps in patch_size] + self.new_patch_size = new_patch_size + + self.proj = nn.Conv2d( + in_channels, + self.embed_dims_inner, + kernel_size=7, + padding=3, + stride=stride) + self.unfold = nn.Unfold( + kernel_size=new_patch_size, stride=new_patch_size) + + def forward(self, x, pixel_pos): + B, C, H, W = x.shape + assert H == self.img_size[0] and W == self.img_size[1], \ + f"Input image size ({H}*{W}) doesn't match model " \ + f'({self.img_size[0]}*{self.img_size[1]}).' + x = self.proj(x) + x = self.unfold(x) + x = x.transpose(1, + 2).reshape(B * self.num_patches, self.embed_dims_inner, + self.new_patch_size[0], + self.new_patch_size[1]) + x = x + pixel_pos + x = x.reshape(B * self.num_patches, self.embed_dims_inner, + -1).transpose(1, 2) + return x + + +@BACKBONES.register_module() +class TNT(BaseBackbone): + """ Transformer in Transformer + A PyTorch implement of : `Transformer in Transformer + `_ + + Inspiration from + https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/tnt.py + + Args: + arch (str | dict): Vision Transformer architecture + Default: 'b' + img_size (int | tuple): Input image size. Default to 224 + patch_size (int | tuple): The patch size. Deault to 16 + in_channels (int): Number of input channels. Default to 3 + ffn_ratio (int): A ratio to calculate the hidden_dims in ffn layer. + Default: 4 + qkv_bias (bool): Enable bias for qkv if True. Default False + drop_rate (float): Probability of an element to be zeroed + after the feed forward layer. Default 0. + attn_drop_rate (float): The drop out rate for attention layer. + Default 0. + drop_path_rate (float): stochastic depth rate. Default 0. + act_cfg (dict): The activation config for FFNs. Defaults to GELU. + norm_cfg (dict): Config dict for normalization layer. Default + layer normalization + first_stride (int): The stride of the conv2d layer. We use a conv2d + layer and a unfold layer to implement image to pixel embedding. + num_fcs (int): The number of fully-connected layers for FFNs. Default 2 + init_cfg (dict, optional): Initialization config dict + """ + arch_zoo = { + **dict.fromkeys( + ['s', 'small'], { + 'embed_dims_outer': 384, + 'embed_dims_inner': 24, + 'num_layers': 12, + 'num_heads_outer': 6, + 'num_heads_inner': 4 + }), + **dict.fromkeys( + ['b', 'base'], { + 'embed_dims_outer': 640, + 'embed_dims_inner': 40, + 'num_layers': 12, + 'num_heads_outer': 10, + 'num_heads_inner': 4 + }) + } + + def __init__(self, + arch='b', + img_size=224, + patch_size=16, + in_channels=3, + ffn_ratio=4, + qkv_bias=False, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + act_cfg=dict(type='GELU'), + norm_cfg=dict(type='LN'), + first_stride=4, + num_fcs=2, + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=.02), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.) + ]): + super(TNT, self).__init__(init_cfg=init_cfg) + + if isinstance(arch, str): + arch = arch.lower() + assert arch in set(self.arch_zoo), \ + f'Arch {arch} is not in default archs {set(self.arch_zoo)}' + self.arch_settings = self.arch_zoo[arch] + else: + essential_keys = { + 'embed_dims_outer', 'embed_dims_inner', 'num_layers', + 'num_heads_inner', 'num_heads_outer' + } + assert isinstance(arch, dict) and set(arch) == essential_keys, \ + f'Custom arch needs a dict with keys {essential_keys}' + self.arch_settings = arch + + self.embed_dims_inner = self.arch_settings['embed_dims_inner'] + self.embed_dims_outer = self.arch_settings['embed_dims_outer'] + # embed_dims for consistency with other models + self.embed_dims = self.embed_dims_outer + self.num_layers = self.arch_settings['num_layers'] + self.num_heads_inner = self.arch_settings['num_heads_inner'] + self.num_heads_outer = self.arch_settings['num_heads_outer'] + + self.pixel_embed = PixelEmbed( + img_size=img_size, + patch_size=patch_size, + in_channels=in_channels, + embed_dims_inner=self.embed_dims_inner, + stride=first_stride) + num_patches = self.pixel_embed.num_patches + self.num_patches = num_patches + new_patch_size = self.pixel_embed.new_patch_size + num_pixel = new_patch_size[0] * new_patch_size[1] + + self.norm1_proj = build_norm_layer(norm_cfg, num_pixel * + self.embed_dims_inner)[1] + self.projection = nn.Linear(num_pixel * self.embed_dims_inner, + self.embed_dims_outer) + self.norm2_proj = build_norm_layer(norm_cfg, self.embed_dims_outer)[1] + + self.cls_token = nn.Parameter(torch.zeros(1, 1, self.embed_dims_outer)) + self.patch_pos = nn.Parameter( + torch.zeros(1, num_patches + 1, self.embed_dims_outer)) + self.pixel_pos = nn.Parameter( + torch.zeros(1, self.embed_dims_inner, new_patch_size[0], + new_patch_size[1])) + self.drop_after_pos = nn.Dropout(p=drop_rate) + + dpr = [ + x.item() + for x in torch.linspace(0, drop_path_rate, self.num_layers) + ] # stochastic depth decay rule + self.layers = ModuleList() + for i in range(self.num_layers): + block_cfg = dict( + ffn_ratio=ffn_ratio, + drop_rate=drop_rate, + attn_drop_rate=attn_drop_rate, + drop_path_rate=dpr[i], + num_fcs=num_fcs, + qkv_bias=qkv_bias, + norm_cfg=norm_cfg, + batch_first=True) + self.layers.append( + TnTLayer( + num_pixel=num_pixel, + embed_dims_inner=self.embed_dims_inner, + embed_dims_outer=self.embed_dims_outer, + num_heads_inner=self.num_heads_inner, + num_heads_outer=self.num_heads_outer, + inner_block_cfg=block_cfg, + outer_block_cfg=block_cfg, + norm_cfg=norm_cfg)) + + self.norm = build_norm_layer(norm_cfg, self.embed_dims_outer)[1] + + trunc_normal_(self.cls_token, std=.02) + trunc_normal_(self.patch_pos, std=.02) + trunc_normal_(self.pixel_pos, std=.02) + + def forward(self, x): + B = x.shape[0] + pixel_embed = self.pixel_embed(x, self.pixel_pos) + + patch_embed = self.norm2_proj( + self.projection( + self.norm1_proj(pixel_embed.reshape(B, self.num_patches, -1)))) + patch_embed = torch.cat( + (self.cls_token.expand(B, -1, -1), patch_embed), dim=1) + patch_embed = patch_embed + self.patch_pos + patch_embed = self.drop_after_pos(patch_embed) + + for layer in self.layers: + pixel_embed, patch_embed = layer(pixel_embed, patch_embed) + + patch_embed = self.norm(patch_embed) + return (patch_embed[:, 0], ) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/vgg.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/vgg.py new file mode 100644 index 0000000000..b21151c880 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/vgg.py @@ -0,0 +1,183 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn +from mmcv.cnn import ConvModule +from mmcv.utils.parrots_wrapper import _BatchNorm + +from ..builder import BACKBONES +from .base_backbone import BaseBackbone + + +def make_vgg_layer(in_channels, + out_channels, + num_blocks, + conv_cfg=None, + norm_cfg=None, + act_cfg=dict(type='ReLU'), + dilation=1, + with_norm=False, + ceil_mode=False): + layers = [] + for _ in range(num_blocks): + layer = ConvModule( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=3, + dilation=dilation, + padding=dilation, + bias=True, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + layers.append(layer) + in_channels = out_channels + layers.append(nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=ceil_mode)) + + return layers + + +@BACKBONES.register_module() +class VGG(BaseBackbone): + """VGG backbone. + + Args: + depth (int): Depth of vgg, from {11, 13, 16, 19}. + with_norm (bool): Use BatchNorm or not. + num_classes (int): number of classes for classification. + num_stages (int): VGG stages, normally 5. + dilations (Sequence[int]): Dilation of each stage. + out_indices (Sequence[int], optional): Output from which stages. + When it is None, the default behavior depends on whether + num_classes is specified. If num_classes <= 0, the default value is + (4, ), output the last feature map before classifier. If + num_classes > 0, the default value is (5, ), output the + classification score. Default: None. + frozen_stages (int): Stages to be frozen (all param fixed). -1 means + not freezing any parameters. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + ceil_mode (bool): Whether to use ceil_mode of MaxPool. Default: False. + with_last_pool (bool): Whether to keep the last pooling before + classifier. Default: True. + """ + + # Parameters to build layers. Each element specifies the number of conv in + # each stage. For example, VGG11 contains 11 layers with learnable + # parameters. 11 is computed as 11 = (1 + 1 + 2 + 2 + 2) + 3, + # where 3 indicates the last three fully-connected layers. + arch_settings = { + 11: (1, 1, 2, 2, 2), + 13: (2, 2, 2, 2, 2), + 16: (2, 2, 3, 3, 3), + 19: (2, 2, 4, 4, 4) + } + + def __init__(self, + depth, + num_classes=-1, + num_stages=5, + dilations=(1, 1, 1, 1, 1), + out_indices=None, + frozen_stages=-1, + conv_cfg=None, + norm_cfg=None, + act_cfg=dict(type='ReLU'), + norm_eval=False, + ceil_mode=False, + with_last_pool=True, + init_cfg=[ + dict(type='Kaiming', layer=['Conv2d']), + dict(type='Constant', val=1., layer=['_BatchNorm']), + dict(type='Normal', std=0.01, layer=['Linear']) + ]): + super(VGG, self).__init__(init_cfg) + if depth not in self.arch_settings: + raise KeyError(f'invalid depth {depth} for vgg') + assert num_stages >= 1 and num_stages <= 5 + stage_blocks = self.arch_settings[depth] + self.stage_blocks = stage_blocks[:num_stages] + assert len(dilations) == num_stages + + self.num_classes = num_classes + self.frozen_stages = frozen_stages + self.norm_eval = norm_eval + with_norm = norm_cfg is not None + + if out_indices is None: + out_indices = (5, ) if num_classes > 0 else (4, ) + assert max(out_indices) <= num_stages + self.out_indices = out_indices + + self.in_channels = 3 + start_idx = 0 + vgg_layers = [] + self.range_sub_modules = [] + for i, num_blocks in enumerate(self.stage_blocks): + num_modules = num_blocks + 1 + end_idx = start_idx + num_modules + dilation = dilations[i] + out_channels = 64 * 2**i if i < 4 else 512 + vgg_layer = make_vgg_layer( + self.in_channels, + out_channels, + num_blocks, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + dilation=dilation, + with_norm=with_norm, + ceil_mode=ceil_mode) + vgg_layers.extend(vgg_layer) + self.in_channels = out_channels + self.range_sub_modules.append([start_idx, end_idx]) + start_idx = end_idx + if not with_last_pool: + vgg_layers.pop(-1) + self.range_sub_modules[-1][1] -= 1 + self.module_name = 'features' + self.add_module(self.module_name, nn.Sequential(*vgg_layers)) + + if self.num_classes > 0: + self.classifier = nn.Sequential( + nn.Linear(512 * 7 * 7, 4096), + nn.ReLU(True), + nn.Dropout(), + nn.Linear(4096, 4096), + nn.ReLU(True), + nn.Dropout(), + nn.Linear(4096, num_classes), + ) + + def forward(self, x): + outs = [] + vgg_layers = getattr(self, self.module_name) + for i in range(len(self.stage_blocks)): + for j in range(*self.range_sub_modules[i]): + vgg_layer = vgg_layers[j] + x = vgg_layer(x) + if i in self.out_indices: + outs.append(x) + if self.num_classes > 0: + x = x.view(x.size(0), -1) + x = self.classifier(x) + outs.append(x) + + return tuple(outs) + + def _freeze_stages(self): + vgg_layers = getattr(self, self.module_name) + for i in range(self.frozen_stages): + for j in range(*self.range_sub_modules[i]): + m = vgg_layers[j] + m.eval() + for param in m.parameters(): + param.requires_grad = False + + def train(self, mode=True): + super(VGG, self).train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + # trick: eval have effect on BatchNorm only + if isinstance(m, _BatchNorm): + m.eval() diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/vision_transformer.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/vision_transformer.py new file mode 100644 index 0000000000..acb8c4e26a --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/backbones/vision_transformer.py @@ -0,0 +1,368 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from copy import deepcopy +from typing import Sequence + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import build_norm_layer +from mmcv.cnn.bricks.transformer import FFN +from mmcv.runner.base_module import BaseModule, ModuleList + +from mmcls.utils import get_root_logger +from ..builder import BACKBONES +from ..utils import MultiheadAttention, PatchEmbed, to_2tuple +from .base_backbone import BaseBackbone + + +class TransformerEncoderLayer(BaseModule): + """Implements one encoder layer in Vision Transformer. + + Args: + embed_dims (int): The feature dimension + num_heads (int): Parallel attention heads + feedforward_channels (int): The hidden dimension for FFNs + drop_rate (float): Probability of an element to be zeroed + after the feed forward layer. Defaults to 0. + attn_drop_rate (float): The drop out rate for attention output weights. + Defaults to 0. + drop_path_rate (float): Stochastic depth rate. Defaults to 0. + num_fcs (int): The number of fully-connected layers for FFNs. + Defaults to 2. + qkv_bias (bool): enable bias for qkv if True. Defaults to True. + act_cfg (dict): The activation config for FFNs. + Defaluts to ``dict(type='GELU')``. + norm_cfg (dict): Config dict for normalization layer. + Defaults to ``dict(type='LN')``. + init_cfg (dict, optional): Initialization config dict. + Defaults to None. + """ + + def __init__(self, + embed_dims, + num_heads, + feedforward_channels, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + num_fcs=2, + qkv_bias=True, + act_cfg=dict(type='GELU'), + norm_cfg=dict(type='LN'), + init_cfg=None): + super(TransformerEncoderLayer, self).__init__(init_cfg=init_cfg) + + self.embed_dims = embed_dims + + self.norm1_name, norm1 = build_norm_layer( + norm_cfg, self.embed_dims, postfix=1) + self.add_module(self.norm1_name, norm1) + + self.attn = MultiheadAttention( + embed_dims=embed_dims, + num_heads=num_heads, + attn_drop=attn_drop_rate, + proj_drop=drop_rate, + dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate), + qkv_bias=qkv_bias) + + self.norm2_name, norm2 = build_norm_layer( + norm_cfg, self.embed_dims, postfix=2) + self.add_module(self.norm2_name, norm2) + + self.ffn = FFN( + embed_dims=embed_dims, + feedforward_channels=feedforward_channels, + num_fcs=num_fcs, + ffn_drop=drop_rate, + dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate), + act_cfg=act_cfg) + + @property + def norm1(self): + return getattr(self, self.norm1_name) + + @property + def norm2(self): + return getattr(self, self.norm2_name) + + def init_weights(self): + super(TransformerEncoderLayer, self).init_weights() + for m in self.ffn.modules(): + if isinstance(m, nn.Linear): + nn.init.xavier_uniform_(m.weight) + nn.init.normal_(m.bias, std=1e-6) + + def forward(self, x): + x = x + self.attn(self.norm1(x)) + x = self.ffn(self.norm2(x), identity=x) + return x + + +@BACKBONES.register_module() +class VisionTransformer(BaseBackbone): + """Vision Transformer. + + A PyTorch implement of : `An Image is Worth 16x16 Words: + Transformers for Image Recognition at + Scale`_ + + Args: + arch (str | dict): Vision Transformer architecture + Default: 'b' + img_size (int | tuple): Input image size + patch_size (int | tuple): The patch size + out_indices (Sequence | int): Output from which stages. + Defaults to -1, means the last stage. + drop_rate (float): Probability of an element to be zeroed. + Defaults to 0. + drop_path_rate (float): stochastic depth rate. Defaults to 0. + norm_cfg (dict): Config dict for normalization layer. + Defaults to ``dict(type='LN')``. + final_norm (bool): Whether to add a additional layer to normalize + final feature map. Defaults to True. + output_cls_token (bool): Whether output the cls_token. If set True, + `with_cls_token` must be True. Defaults to True. + interpolate_mode (str): Select the interpolate mode for position + embeding vector resize. Defaults to "bicubic". + patch_cfg (dict): Configs of patch embeding. Defaults to an empty dict. + layer_cfgs (Sequence | dict): Configs of each transformer layer in + encoder. Defaults to an empty dict. + init_cfg (dict, optional): Initialization config dict. + Defaults to None. + """ + arch_zoo = { + **dict.fromkeys( + ['s', 'small'], { + 'embed_dims': 768, + 'num_layers': 8, + 'num_heads': 8, + 'feedforward_channels': 768 * 3, + 'qkv_bias': False + }), + **dict.fromkeys( + ['b', 'base'], { + 'embed_dims': 768, + 'num_layers': 12, + 'num_heads': 12, + 'feedforward_channels': 3072 + }), + **dict.fromkeys( + ['l', 'large'], { + 'embed_dims': 1024, + 'num_layers': 24, + 'num_heads': 16, + 'feedforward_channels': 4096 + }), + } + + def __init__(self, + arch='b', + img_size=224, + patch_size=16, + out_indices=-1, + drop_rate=0., + drop_path_rate=0., + norm_cfg=dict(type='LN', eps=1e-6), + final_norm=True, + output_cls_token=True, + interpolate_mode='bicubic', + patch_cfg=dict(), + layer_cfgs=dict(), + init_cfg=None): + super(VisionTransformer, self).__init__(init_cfg) + + if isinstance(arch, str): + arch = arch.lower() + assert arch in set(self.arch_zoo), \ + f'Arch {arch} is not in default archs {set(self.arch_zoo)}' + self.arch_settings = self.arch_zoo[arch] + else: + essential_keys = { + 'embed_dims', 'num_layers', 'num_heads', 'feedforward_channels' + } + assert isinstance(arch, dict) and set(arch) == essential_keys, \ + f'Custom arch needs a dict with keys {essential_keys}' + self.arch_settings = arch + + self.embed_dims = self.arch_settings['embed_dims'] + self.num_layers = self.arch_settings['num_layers'] + self.img_size = to_2tuple(img_size) + + # Set patch embedding + _patch_cfg = dict( + img_size=img_size, + embed_dims=self.embed_dims, + conv_cfg=dict( + type='Conv2d', kernel_size=patch_size, stride=patch_size), + ) + _patch_cfg.update(patch_cfg) + self.patch_embed = PatchEmbed(**_patch_cfg) + num_patches = self.patch_embed.num_patches + + # Set cls token + self.output_cls_token = output_cls_token + self.cls_token = nn.Parameter(torch.zeros(1, 1, self.embed_dims)) + + # Set position embedding + self.interpolate_mode = interpolate_mode + self.pos_embed = nn.Parameter( + torch.zeros(1, num_patches + 1, self.embed_dims)) + self.drop_after_pos = nn.Dropout(p=drop_rate) + + if isinstance(out_indices, int): + out_indices = [out_indices] + assert isinstance(out_indices, Sequence), \ + f'"out_indices" must by a sequence or int, ' \ + f'get {type(out_indices)} instead.' + for i, index in enumerate(out_indices): + if index < 0: + out_indices[i] = self.num_layers + index + assert out_indices[i] >= 0, f'Invalid out_indices {index}' + self.out_indices = out_indices + + # stochastic depth decay rule + dpr = np.linspace(0, drop_path_rate, self.arch_settings['num_layers']) + + self.layers = ModuleList() + if isinstance(layer_cfgs, dict): + layer_cfgs = [layer_cfgs] * self.num_layers + for i in range(self.num_layers): + _layer_cfg = dict( + embed_dims=self.embed_dims, + num_heads=self.arch_settings['num_heads'], + feedforward_channels=self. + arch_settings['feedforward_channels'], + drop_rate=drop_rate, + drop_path_rate=dpr[i], + qkv_bias=self.arch_settings.get('qkv_bias', True), + norm_cfg=norm_cfg) + _layer_cfg.update(layer_cfgs[i]) + self.layers.append(TransformerEncoderLayer(**_layer_cfg)) + + self.final_norm = final_norm + if final_norm: + self.norm1_name, norm1 = build_norm_layer( + norm_cfg, self.embed_dims, postfix=1) + self.add_module(self.norm1_name, norm1) + + @property + def norm1(self): + return getattr(self, self.norm1_name) + + def init_weights(self): + # Suppress default init if use pretrained model. + # And use custom load_checkpoint function to load checkpoint. + if (isinstance(self.init_cfg, dict) + and self.init_cfg['type'] == 'Pretrained'): + init_cfg = deepcopy(self.init_cfg) + init_cfg.pop('type') + self._load_checkpoint(**init_cfg) + else: + super(VisionTransformer, self).init_weights() + # Modified from ClassyVision + nn.init.normal_(self.pos_embed, std=0.02) + + def _load_checkpoint(self, checkpoint, prefix=None, map_location=None): + from mmcv.runner import (_load_checkpoint, + _load_checkpoint_with_prefix, load_state_dict) + from mmcv.utils import print_log + + logger = get_root_logger() + + if prefix is None: + print_log(f'load model from: {checkpoint}', logger=logger) + checkpoint = _load_checkpoint(checkpoint, map_location, logger) + # get state_dict from checkpoint + if 'state_dict' in checkpoint: + state_dict = checkpoint['state_dict'] + else: + state_dict = checkpoint + else: + print_log( + f'load {prefix} in model from: {checkpoint}', logger=logger) + state_dict = _load_checkpoint_with_prefix(prefix, checkpoint, + map_location) + + if 'pos_embed' in state_dict.keys(): + ckpt_pos_embed_shape = state_dict['pos_embed'].shape + if self.pos_embed.shape != ckpt_pos_embed_shape: + print_log( + f'Resize the pos_embed shape from {ckpt_pos_embed_shape} ' + f'to {self.pos_embed.shape}.', + logger=logger) + + ckpt_pos_embed_shape = to_2tuple( + int(np.sqrt(ckpt_pos_embed_shape[1] - 1))) + pos_embed_shape = self.patch_embed.patches_resolution + + state_dict['pos_embed'] = self.resize_pos_embed( + state_dict['pos_embed'], ckpt_pos_embed_shape, + pos_embed_shape, self.interpolate_mode) + + # load state_dict + load_state_dict(self, state_dict, strict=False, logger=logger) + + @staticmethod + def resize_pos_embed(pos_embed, src_shape, dst_shape, mode='bicubic'): + """Resize pos_embed weights. + + Args: + pos_embed (torch.Tensor): Position embedding weights with shape + [1, L, C]. + src_shape (tuple): The resolution of downsampled origin training + image. + dst_shape (tuple): The resolution of downsampled new training + image. + mode (str): Algorithm used for upsampling: + ``'nearest'`` | ``'linear'`` | ``'bilinear'`` | ``'bicubic'`` | + ``'trilinear'``. Default: ``'bicubic'`` + Return: + torch.Tensor: The resized pos_embed of shape [1, L_new, C] + """ + assert pos_embed.ndim == 3, 'shape of pos_embed must be [1, L, C]' + _, L, C = pos_embed.shape + src_h, src_w = src_shape + assert L == src_h * src_w + 1 + cls_token = pos_embed[:, :1] + + src_weight = pos_embed[:, 1:] + src_weight = src_weight.reshape(1, src_h, src_w, C).permute(0, 3, 1, 2) + + dst_weight = F.interpolate( + src_weight, size=dst_shape, align_corners=False, mode=mode) + dst_weight = torch.flatten(dst_weight, 2).transpose(1, 2) + + return torch.cat((cls_token, dst_weight), dim=1) + + def forward(self, x): + B = x.shape[0] + x = self.patch_embed(x) + patch_resolution = self.patch_embed.patches_resolution + + # stole cls_tokens impl from Phil Wang, thanks + cls_tokens = self.cls_token.expand(B, -1, -1) + x = torch.cat((cls_tokens, x), dim=1) + x = x + self.pos_embed + x = self.drop_after_pos(x) + + outs = [] + for i, layer in enumerate(self.layers): + x = layer(x) + + if i == len(self.layers) - 1 and self.final_norm: + x = self.norm1(x) + + if i in self.out_indices: + B, _, C = x.shape + patch_token = x[:, 1:].reshape(B, *patch_resolution, C) + patch_token = patch_token.permute(0, 3, 1, 2) + cls_token = x[:, 0] + if self.output_cls_token: + out = [patch_token, cls_token] + else: + out = patch_token + outs.append(out) + + return tuple(outs) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/builder.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/builder.py new file mode 100644 index 0000000000..9b43913ef3 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/builder.py @@ -0,0 +1,38 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmcv.cnn import MODELS as MMCV_MODELS +from mmcv.cnn.bricks.registry import ATTENTION as MMCV_ATTENTION +from mmcv.utils import Registry + +MODELS = Registry('models', parent=MMCV_MODELS) + +BACKBONES = MODELS +NECKS = MODELS +HEADS = MODELS +LOSSES = MODELS +CLASSIFIERS = MODELS + +ATTENTION = Registry('attention', parent=MMCV_ATTENTION) + + +def build_backbone(cfg): + """Build backbone.""" + return BACKBONES.build(cfg) + + +def build_neck(cfg): + """Build neck.""" + return NECKS.build(cfg) + + +def build_head(cfg): + """Build head.""" + return HEADS.build(cfg) + + +def build_loss(cfg): + """Build loss.""" + return LOSSES.build(cfg) + + +def build_classifier(cfg): + return CLASSIFIERS.build(cfg) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/classifiers/__init__.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/classifiers/__init__.py new file mode 100644 index 0000000000..5fdfb91ff1 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/classifiers/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .base import BaseClassifier +from .image import ImageClassifier + +__all__ = ['BaseClassifier', 'ImageClassifier'] diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/classifiers/base.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/classifiers/base.py new file mode 100644 index 0000000000..5090245e59 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/classifiers/base.py @@ -0,0 +1,215 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings +from abc import ABCMeta, abstractmethod +from collections import OrderedDict + +import mmcv +import torch +import torch.distributed as dist +from mmcv.runner import BaseModule + +from mmcls.core.visualization import imshow_infos + +# TODO import `auto_fp16` from mmcv and delete them from mmcls +try: + from mmcv.runner import auto_fp16 +except ImportError: + warnings.warn('auto_fp16 from mmcls will be deprecated.' + 'Please install mmcv>=1.1.4.') + from mmcls.core import auto_fp16 + + +class BaseClassifier(BaseModule, metaclass=ABCMeta): + """Base class for classifiers.""" + + def __init__(self, init_cfg=None): + super(BaseClassifier, self).__init__(init_cfg) + self.fp16_enabled = False + + @property + def with_neck(self): + return hasattr(self, 'neck') and self.neck is not None + + @property + def with_head(self): + return hasattr(self, 'head') and self.head is not None + + @abstractmethod + def extract_feat(self, imgs): + pass + + def extract_feats(self, imgs): + assert isinstance(imgs, list) + for img in imgs: + yield self.extract_feat(img) + + @abstractmethod + def forward_train(self, imgs, **kwargs): + """ + Args: + img (list[Tensor]): List of tensors of shape (1, C, H, W). + Typically these should be mean centered and std scaled. + kwargs (keyword arguments): Specific to concrete implementation. + """ + pass + + @abstractmethod + def simple_test(self, img, **kwargs): + pass + + def forward_test(self, imgs, **kwargs): + """ + Args: + imgs (List[Tensor]): the outer list indicates test-time + augmentations and inner Tensor should have a shape NxCxHxW, + which contains all images in the batch. + """ + if isinstance(imgs, torch.Tensor): + imgs = [imgs] + for var, name in [(imgs, 'imgs')]: + if not isinstance(var, list): + raise TypeError(f'{name} must be a list, but got {type(var)}') + + if len(imgs) == 1: + return self.simple_test(imgs[0], **kwargs) + else: + raise NotImplementedError('aug_test has not been implemented') + + @auto_fp16(apply_to=('img', )) + def forward(self, img, return_loss=True, **kwargs): + """Calls either forward_train or forward_test depending on whether + return_loss=True. + + Note this setting will change the expected inputs. When + `return_loss=True`, img and img_meta are single-nested (i.e. Tensor and + List[dict]), and when `resturn_loss=False`, img and img_meta should be + double nested (i.e. List[Tensor], List[List[dict]]), with the outer + list indicating test time augmentations. + """ + if return_loss: + return self.forward_train(img, **kwargs) + else: + return self.forward_test(img, **kwargs) + + def _parse_losses(self, losses): + log_vars = OrderedDict() + for loss_name, loss_value in losses.items(): + if isinstance(loss_value, torch.Tensor): + log_vars[loss_name] = loss_value.mean() + elif isinstance(loss_value, list): + log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value) + elif isinstance(loss_value, dict): + for name, value in loss_value.items(): + log_vars[name] = value + else: + raise TypeError( + f'{loss_name} is not a tensor or list of tensors') + + loss = sum(_value for _key, _value in log_vars.items() + if 'loss' in _key) + + log_vars['loss'] = loss + for loss_name, loss_value in log_vars.items(): + # reduce loss when distributed training + if dist.is_available() and dist.is_initialized(): + loss_value = loss_value.data.clone() + dist.all_reduce(loss_value.div_(dist.get_world_size())) + log_vars[loss_name] = loss_value.item() + + return loss, log_vars + + def train_step(self, data, optimizer): + """The iteration step during training. + + This method defines an iteration step during training, except for the + back propagation and optimizer updating, which are done in an optimizer + hook. Note that in some complicated cases or models, the whole process + including back propagation and optimizer updating are also defined in + this method, such as GAN. + + Args: + data (dict): The output of dataloader. + optimizer (:obj:`torch.optim.Optimizer` | dict): The optimizer of + runner is passed to ``train_step()``. This argument is unused + and reserved. + + Returns: + dict: Dict of outputs. The following fields are contained. + - loss (torch.Tensor): A tensor for back propagation, which \ + can be a weighted sum of multiple losses. + - log_vars (dict): Dict contains all the variables to be sent \ + to the logger. + - num_samples (int): Indicates the batch size (when the model \ + is DDP, it means the batch size on each GPU), which is \ + used for averaging the logs. + """ + losses = self(**data) + loss, log_vars = self._parse_losses(losses) + + outputs = dict( + loss=loss, log_vars=log_vars, num_samples=len(data['img'].data)) + + return outputs + + def val_step(self, data, optimizer): + """The iteration step during validation. + + This method shares the same signature as :func:`train_step`, but used + during val epochs. Note that the evaluation after training epochs is + not implemented with this method, but an evaluation hook. + """ + losses = self(**data) + loss, log_vars = self._parse_losses(losses) + + outputs = dict( + loss=loss, log_vars=log_vars, num_samples=len(data['img'].data)) + + return outputs + + def show_result(self, + img, + result, + text_color='white', + font_scale=0.5, + row_width=20, + show=False, + fig_size=(15, 10), + win_name='', + wait_time=0, + out_file=None): + """Draw `result` over `img`. + + Args: + img (str or ndarray): The image to be displayed. + result (dict): The classification results to draw over `img`. + text_color (str or tuple or :obj:`Color`): Color of texts. + font_scale (float): Font scales of texts. + row_width (int): width between each row of results on the image. + show (bool): Whether to show the image. + Default: False. + fig_size (tuple): Image show figure size. Defaults to (15, 10). + win_name (str): The window name. + wait_time (int): How many seconds to display the image. + Defaults to 0. + out_file (str or None): The filename to write the image. + Default: None. + + Returns: + img (ndarray): Image with overlaid results. + """ + img = mmcv.imread(img) + img = img.copy() + + img = imshow_infos( + img, + result, + text_color=text_color, + font_size=int(font_scale * 50), + row_width=row_width, + win_name=win_name, + show=show, + fig_size=fig_size, + wait_time=wait_time, + out_file=out_file) + + return img diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/classifiers/image.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/classifiers/image.py new file mode 100644 index 0000000000..5c2f5cefa4 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/classifiers/image.py @@ -0,0 +1,141 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +import warnings + +from ..builder import CLASSIFIERS, build_backbone, build_head, build_neck +from ..utils.augment import Augments +from .base import BaseClassifier + +warnings.simplefilter('once') + + +@CLASSIFIERS.register_module() +class ImageClassifier(BaseClassifier): + + def __init__(self, + backbone, + neck=None, + head=None, + pretrained=None, + train_cfg=None, + init_cfg=None): + super(ImageClassifier, self).__init__(init_cfg) + + if pretrained is not None: + warnings.warn('DeprecationWarning: pretrained is a deprecated \ + key, please consider using init_cfg') + self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) + + return_tuple = backbone.pop('return_tuple', True) + self.backbone = build_backbone(backbone) + if return_tuple is False: + warnings.warn( + 'The `return_tuple` is a temporary arg, we will force to ' + 'return tuple in the future. Please handle tuple in your ' + 'custom neck or head.', DeprecationWarning) + self.return_tuple = return_tuple + + if neck is not None: + self.neck = build_neck(neck) + + if head is not None: + self.head = build_head(head) + + self.augments = None + if train_cfg is not None: + augments_cfg = train_cfg.get('augments', None) + if augments_cfg is not None: + self.augments = Augments(augments_cfg) + else: + # Considering BC-breaking + mixup_cfg = train_cfg.get('mixup', None) + cutmix_cfg = train_cfg.get('cutmix', None) + assert mixup_cfg is None or cutmix_cfg is None, \ + 'If mixup and cutmix are set simultaneously,' \ + 'use augments instead.' + if mixup_cfg is not None: + warnings.warn('The mixup attribute will be deprecated. ' + 'Please use augments instead.') + cfg = copy.deepcopy(mixup_cfg) + cfg['type'] = 'BatchMixup' + # In the previous version, mixup_prob is always 1.0. + cfg['prob'] = 1.0 + self.augments = Augments(cfg) + if cutmix_cfg is not None: + warnings.warn('The cutmix attribute will be deprecated. ' + 'Please use augments instead.') + cfg = copy.deepcopy(cutmix_cfg) + cutmix_prob = cfg.pop('cutmix_prob') + cfg['type'] = 'BatchCutMix' + cfg['prob'] = cutmix_prob + self.augments = Augments(cfg) + + def extract_feat(self, img): + """Directly extract features from the backbone + neck.""" + x = self.backbone(img) + if self.return_tuple: + if not isinstance(x, tuple): + x = (x, ) + warnings.warn( + 'We will force all backbones to return a tuple in the ' + 'future. Please check your backbone and wrap the output ' + 'as a tuple.', DeprecationWarning) + else: + if isinstance(x, tuple): + x = x[-1] + if self.with_neck: + x = self.neck(x) + return x + + def forward_train(self, img, gt_label, **kwargs): + """Forward computation during training. + + Args: + img (Tensor): of shape (N, C, H, W) encoding input images. + Typically these should be mean centered and std scaled. + gt_label (Tensor): It should be of shape (N, 1) encoding the + ground-truth label of input images for single label task. It + shoulf be of shape (N, C) encoding the ground-truth label + of input images for multi-labels task. + Returns: + dict[str, Tensor]: a dictionary of loss components + """ + if self.augments is not None: + img, gt_label = self.augments(img, gt_label) + + x = self.extract_feat(img) + + losses = dict() + try: + loss = self.head.forward_train(x, gt_label) + except TypeError as e: + if 'not tuple' in str(e) and self.return_tuple: + return TypeError( + 'Seems the head cannot handle tuple input. We have ' + 'changed all backbones\' output to a tuple. Please ' + 'update your custom head\'s forward function. ' + 'Temporarily, you can set "return_tuple=False" in ' + 'your backbone config to disable this feature.') + raise e + + losses.update(loss) + + return losses + + def simple_test(self, img, img_metas): + """Test without augmentation.""" + x = self.extract_feat(img) + + try: + res = self.head.simple_test(x) + except TypeError as e: + if 'not tuple' in str(e) and self.return_tuple: + return TypeError( + 'Seems the head cannot handle tuple input. We have ' + 'changed all backbones\' output to a tuple. Please ' + 'update your custom head\'s forward function. ' + 'Temporarily, you can set "return_tuple=False" in ' + 'your backbone config to disable this feature.') + raise e + + return res diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/heads/__init__.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/heads/__init__.py new file mode 100644 index 0000000000..7711272a91 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/heads/__init__.py @@ -0,0 +1,12 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .cls_head import ClsHead +from .linear_head import LinearClsHead +from .multi_label_head import MultiLabelClsHead +from .multi_label_linear_head import MultiLabelLinearClsHead +from .stacked_head import StackedLinearClsHead +from .vision_transformer_head import VisionTransformerClsHead + +__all__ = [ + 'ClsHead', 'LinearClsHead', 'StackedLinearClsHead', 'MultiLabelClsHead', + 'MultiLabelLinearClsHead', 'VisionTransformerClsHead' +] diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/heads/base_head.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/heads/base_head.py new file mode 100644 index 0000000000..e8936f28fe --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/heads/base_head.py @@ -0,0 +1,15 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from abc import ABCMeta, abstractmethod + +from mmcv.runner import BaseModule + + +class BaseHead(BaseModule, metaclass=ABCMeta): + """Base head.""" + + def __init__(self, init_cfg=None): + super(BaseHead, self).__init__(init_cfg) + + @abstractmethod + def forward_train(self, x, gt_label, **kwargs): + pass diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/heads/cls_head.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/heads/cls_head.py new file mode 100644 index 0000000000..8ce1d28848 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/heads/cls_head.py @@ -0,0 +1,78 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn.functional as F + +from mmcls.models.losses import Accuracy +from ..builder import HEADS, build_loss +from ..utils import is_tracing +from .base_head import BaseHead + + +@HEADS.register_module() +class ClsHead(BaseHead): + """classification head. + + Args: + loss (dict): Config of classification loss. + topk (int | tuple): Top-k accuracy. + cal_acc (bool): Whether to calculate accuracy during training. + If you use Mixup/CutMix or something like that during training, + it is not reasonable to calculate accuracy. Defaults to False. + """ + + def __init__(self, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, ), + cal_acc=False, + init_cfg=None): + super(ClsHead, self).__init__(init_cfg=init_cfg) + + assert isinstance(loss, dict) + assert isinstance(topk, (int, tuple)) + if isinstance(topk, int): + topk = (topk, ) + for _topk in topk: + assert _topk > 0, 'Top-k should be larger than 0' + self.topk = topk + + self.compute_loss = build_loss(loss) + self.compute_accuracy = Accuracy(topk=self.topk) + self.cal_acc = cal_acc + + def loss(self, cls_score, gt_label): + num_samples = len(cls_score) + losses = dict() + # compute loss + loss = self.compute_loss(cls_score, gt_label, avg_factor=num_samples) + if self.cal_acc: + # compute accuracy + acc = self.compute_accuracy(cls_score, gt_label) + assert len(acc) == len(self.topk) + losses['accuracy'] = { + f'top-{k}': a + for k, a in zip(self.topk, acc) + } + losses['loss'] = loss + return losses + + def forward_train(self, cls_score, gt_label): + if isinstance(cls_score, tuple): + cls_score = cls_score[-1] + losses = self.loss(cls_score, gt_label) + return losses + + def simple_test(self, cls_score): + """Test without augmentation.""" + if isinstance(cls_score, tuple): + cls_score = cls_score[-1] + if isinstance(cls_score, list): + cls_score = sum(cls_score) / float(len(cls_score)) + pred = F.softmax(cls_score, dim=1) if cls_score is not None else None + return self.post_process(pred) + + def post_process(self, pred): + on_trace = is_tracing() + if torch.onnx.is_in_onnx_export() or on_trace: + return pred + pred = list(pred.detach().cpu().numpy()) + return pred diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/heads/linear_head.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/heads/linear_head.py new file mode 100644 index 0000000000..f355aee899 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/heads/linear_head.py @@ -0,0 +1,54 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn +import torch.nn.functional as F + +from ..builder import HEADS +from .cls_head import ClsHead + + +@HEADS.register_module() +class LinearClsHead(ClsHead): + """Linear classifier head. + + Args: + num_classes (int): Number of categories excluding the background + category. + in_channels (int): Number of channels in the input feature map. + init_cfg (dict | optional): The extra init config of layers. + Defaults to use dict(type='Normal', layer='Linear', std=0.01). + """ + + def __init__(self, + num_classes, + in_channels, + init_cfg=dict(type='Normal', layer='Linear', std=0.01), + *args, + **kwargs): + super(LinearClsHead, self).__init__(init_cfg=init_cfg, *args, **kwargs) + + self.in_channels = in_channels + self.num_classes = num_classes + + if self.num_classes <= 0: + raise ValueError( + f'num_classes={num_classes} must be a positive integer') + + self.fc = nn.Linear(self.in_channels, self.num_classes) + + def simple_test(self, x): + """Test without augmentation.""" + if isinstance(x, tuple): + x = x[-1] + cls_score = self.fc(x) + if isinstance(cls_score, list): + cls_score = sum(cls_score) / float(len(cls_score)) + pred = F.softmax(cls_score, dim=1) if cls_score is not None else None + + return self.post_process(pred) + + def forward_train(self, x, gt_label): + if isinstance(x, tuple): + x = x[-1] + cls_score = self.fc(x) + losses = self.loss(cls_score, gt_label) + return losses diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/heads/multi_label_head.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/heads/multi_label_head.py new file mode 100644 index 0000000000..f923cece8f --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/heads/multi_label_head.py @@ -0,0 +1,64 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn.functional as F + +from ..builder import HEADS, build_loss +from ..utils import is_tracing +from .base_head import BaseHead + + +@HEADS.register_module() +class MultiLabelClsHead(BaseHead): + """Classification head for multilabel task. + + Args: + loss (dict): Config of classification loss. + """ + + def __init__(self, + loss=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + reduction='mean', + loss_weight=1.0), + init_cfg=None): + super(MultiLabelClsHead, self).__init__(init_cfg=init_cfg) + + assert isinstance(loss, dict) + + self.compute_loss = build_loss(loss) + + def loss(self, cls_score, gt_label): + gt_label = gt_label.type_as(cls_score) + num_samples = len(cls_score) + losses = dict() + + # map difficult examples to positive ones + _gt_label = torch.abs(gt_label) + # compute loss + loss = self.compute_loss(cls_score, _gt_label, avg_factor=num_samples) + losses['loss'] = loss + return losses + + def forward_train(self, cls_score, gt_label): + if isinstance(cls_score, tuple): + cls_score = cls_score[-1] + gt_label = gt_label.type_as(cls_score) + losses = self.loss(cls_score, gt_label) + return losses + + def simple_test(self, x): + if isinstance(x, tuple): + x = x[-1] + if isinstance(x, list): + x = sum(x) / float(len(x)) + pred = F.sigmoid(x) if x is not None else None + + return self.post_process(pred) + + def post_process(self, pred): + on_trace = is_tracing() + if torch.onnx.is_in_onnx_export() or on_trace: + return pred + pred = list(pred.detach().cpu().numpy()) + return pred diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/heads/multi_label_linear_head.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/heads/multi_label_linear_head.py new file mode 100644 index 0000000000..396e8b1ddf --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/heads/multi_label_linear_head.py @@ -0,0 +1,59 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn +import torch.nn.functional as F + +from ..builder import HEADS +from .multi_label_head import MultiLabelClsHead + + +@HEADS.register_module() +class MultiLabelLinearClsHead(MultiLabelClsHead): + """Linear classification head for multilabel task. + + Args: + num_classes (int): Number of categories. + in_channels (int): Number of channels in the input feature map. + loss (dict): Config of classification loss. + init_cfg (dict | optional): The extra init config of layers. + Defaults to use dict(type='Normal', layer='Linear', std=0.01). + """ + + def __init__(self, + num_classes, + in_channels, + loss=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + reduction='mean', + loss_weight=1.0), + init_cfg=dict(type='Normal', layer='Linear', std=0.01)): + super(MultiLabelLinearClsHead, self).__init__( + loss=loss, init_cfg=init_cfg) + + if num_classes <= 0: + raise ValueError( + f'num_classes={num_classes} must be a positive integer') + + self.in_channels = in_channels + self.num_classes = num_classes + + self.fc = nn.Linear(self.in_channels, self.num_classes) + + def forward_train(self, x, gt_label): + if isinstance(x, tuple): + x = x[-1] + gt_label = gt_label.type_as(x) + cls_score = self.fc(x) + losses = self.loss(cls_score, gt_label) + return losses + + def simple_test(self, x): + """Test without augmentation.""" + if isinstance(x, tuple): + x = x[-1] + cls_score = self.fc(x) + if isinstance(cls_score, list): + cls_score = sum(cls_score) / float(len(cls_score)) + pred = F.sigmoid(cls_score) if cls_score is not None else None + + return self.post_process(pred) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/heads/stacked_head.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/heads/stacked_head.py new file mode 100644 index 0000000000..d22a807226 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/heads/stacked_head.py @@ -0,0 +1,137 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Dict, Sequence + +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import build_activation_layer, build_norm_layer +from mmcv.runner import BaseModule, ModuleList + +from ..builder import HEADS +from .cls_head import ClsHead + + +class LinearBlock(BaseModule): + + def __init__(self, + in_channels, + out_channels, + dropout_rate=0., + norm_cfg=None, + act_cfg=None, + init_cfg=None): + super().__init__(init_cfg=init_cfg) + self.fc = nn.Linear(in_channels, out_channels) + + self.norm = None + self.act = None + self.dropout = None + + if norm_cfg is not None: + self.norm = build_norm_layer(norm_cfg, out_channels)[1] + if act_cfg is not None: + self.act = build_activation_layer(act_cfg) + if dropout_rate > 0: + self.dropout = nn.Dropout(p=dropout_rate) + + def forward(self, x): + x = self.fc(x) + if self.norm is not None: + x = self.norm(x) + if self.act is not None: + x = self.act(x) + if self.dropout is not None: + x = self.dropout(x) + return x + + +@HEADS.register_module() +class StackedLinearClsHead(ClsHead): + """Classifier head with several hidden fc layer and a output fc layer. + + Args: + num_classes (int): Number of categories excluding the background + category. + in_channels (int): Number of channels in the input feature map. + mid_channels (Sequence): Number of channels in the hidden fc layers. + dropout_rate (float): Dropout rate after each hidden fc layer, + except the last layer. Defaults to 0. + norm_cfg (dict, optional): Config dict of normalization layer after + each hidden fc layer, except the last layer. Defaults to None. + act_cfg (dict, optional): Config dict of activation function after each + hidden layer, except the last layer. Defaults to use "ReLU". + """ + + def __init__(self, + num_classes: int, + in_channels: int, + mid_channels: Sequence, + dropout_rate: float = 0., + norm_cfg: Dict = None, + act_cfg: Dict = dict(type='ReLU'), + **kwargs): + super(StackedLinearClsHead, self).__init__(**kwargs) + assert num_classes > 0, \ + f'`num_classes` of StackedLinearClsHead must be a positive ' \ + f'integer, got {num_classes} instead.' + self.num_classes = num_classes + + self.in_channels = in_channels + + assert isinstance(mid_channels, Sequence), \ + f'`mid_channels` of StackedLinearClsHead should be a sequence, ' \ + f'instead of {type(mid_channels)}' + self.mid_channels = mid_channels + + self.dropout_rate = dropout_rate + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + + self._init_layers() + + def _init_layers(self): + self.layers = ModuleList( + init_cfg=dict( + type='Normal', layer='Linear', mean=0., std=0.01, bias=0.)) + in_channels = self.in_channels + for hidden_channels in self.mid_channels: + self.layers.append( + LinearBlock( + in_channels, + hidden_channels, + dropout_rate=self.dropout_rate, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg)) + in_channels = hidden_channels + + self.layers.append( + LinearBlock( + self.mid_channels[-1], + self.num_classes, + dropout_rate=0., + norm_cfg=None, + act_cfg=None)) + + def init_weights(self): + self.layers.init_weights() + + def simple_test(self, x): + """Test without augmentation.""" + if isinstance(x, tuple): + x = x[-1] + cls_score = x + for layer in self.layers: + cls_score = layer(cls_score) + if isinstance(cls_score, list): + cls_score = sum(cls_score) / float(len(cls_score)) + pred = F.softmax(cls_score, dim=1) if cls_score is not None else None + + return self.post_process(pred) + + def forward_train(self, x, gt_label): + if isinstance(x, tuple): + x = x[-1] + cls_score = x + for layer in self.layers: + cls_score = layer(cls_score) + losses = self.loss(cls_score, gt_label) + return losses diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/heads/vision_transformer_head.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/heads/vision_transformer_head.py new file mode 100644 index 0000000000..30fd3c58b3 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/heads/vision_transformer_head.py @@ -0,0 +1,87 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +from collections import OrderedDict + +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import build_activation_layer +from mmcv.cnn.utils.weight_init import trunc_normal_ +from mmcv.runner import Sequential + +from ..builder import HEADS +from .cls_head import ClsHead + + +@HEADS.register_module() +class VisionTransformerClsHead(ClsHead): + """Vision Transformer classifier head. + + Args: + num_classes (int): Number of categories excluding the background + category. + in_channels (int): Number of channels in the input feature map. + hidden_dim (int): Number of the dimensions for hidden layer. Only + available during pre-training. Default None. + act_cfg (dict): The activation config. Only available during + pre-training. Defaults to Tanh. + """ + + def __init__(self, + num_classes, + in_channels, + hidden_dim=None, + act_cfg=dict(type='Tanh'), + init_cfg=dict(type='Constant', layer='Linear', val=0), + *args, + **kwargs): + super(VisionTransformerClsHead, self).__init__( + init_cfg=init_cfg, *args, **kwargs) + self.in_channels = in_channels + self.num_classes = num_classes + self.hidden_dim = hidden_dim + self.act_cfg = act_cfg + + if self.num_classes <= 0: + raise ValueError( + f'num_classes={num_classes} must be a positive integer') + + self._init_layers() + + def _init_layers(self): + if self.hidden_dim is None: + layers = [('head', nn.Linear(self.in_channels, self.num_classes))] + else: + layers = [ + ('pre_logits', nn.Linear(self.in_channels, self.hidden_dim)), + ('act', build_activation_layer(self.act_cfg)), + ('head', nn.Linear(self.hidden_dim, self.num_classes)), + ] + self.layers = Sequential(OrderedDict(layers)) + + def init_weights(self): + super(VisionTransformerClsHead, self).init_weights() + # Modified from ClassyVision + if hasattr(self.layers, 'pre_logits'): + # Lecun norm + trunc_normal_( + self.layers.pre_logits.weight, + std=math.sqrt(1 / self.layers.pre_logits.in_features)) + nn.init.zeros_(self.layers.pre_logits.bias) + + def simple_test(self, x): + """Test without augmentation.""" + x = x[-1] + _, cls_token = x + cls_score = self.layers(cls_token) + if isinstance(cls_score, list): + cls_score = sum(cls_score) / float(len(cls_score)) + pred = F.softmax(cls_score, dim=1) if cls_score is not None else None + + return self.post_process(pred) + + def forward_train(self, x, gt_label): + x = x[-1] + _, cls_token = x + cls_score = self.layers(cls_token) + losses = self.loss(cls_score, gt_label) + return losses diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/losses/__init__.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/losses/__init__.py new file mode 100644 index 0000000000..9c90086169 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/losses/__init__.py @@ -0,0 +1,17 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .accuracy import Accuracy, accuracy +from .asymmetric_loss import AsymmetricLoss, asymmetric_loss +from .cross_entropy_loss import (CrossEntropyLoss, binary_cross_entropy, + cross_entropy) +from .focal_loss import FocalLoss, sigmoid_focal_loss +from .label_smooth_loss import LabelSmoothLoss +from .seesaw_loss import SeesawLoss +from .utils import (convert_to_one_hot, reduce_loss, weight_reduce_loss, + weighted_loss) + +__all__ = [ + 'accuracy', 'Accuracy', 'asymmetric_loss', 'AsymmetricLoss', + 'cross_entropy', 'binary_cross_entropy', 'CrossEntropyLoss', 'reduce_loss', + 'weight_reduce_loss', 'LabelSmoothLoss', 'weighted_loss', 'FocalLoss', + 'sigmoid_focal_loss', 'convert_to_one_hot', 'SeesawLoss' +] diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/losses/accuracy.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/losses/accuracy.py new file mode 100644 index 0000000000..3ecdbbecfa --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/losses/accuracy.py @@ -0,0 +1,130 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from numbers import Number + +import numpy as np +import torch +import torch.nn as nn + + +def accuracy_numpy(pred, target, topk=1, thrs=0.): + if isinstance(thrs, Number): + thrs = (thrs, ) + res_single = True + elif isinstance(thrs, tuple): + res_single = False + else: + raise TypeError( + f'thrs should be a number or tuple, but got {type(thrs)}.') + + res = [] + maxk = max(topk) + num = pred.shape[0] + pred_label = pred.argsort(axis=1)[:, -maxk:][:, ::-1] + pred_score = np.sort(pred, axis=1)[:, -maxk:][:, ::-1] + + for k in topk: + correct_k = pred_label[:, :k] == target.reshape(-1, 1) + res_thr = [] + for thr in thrs: + # Only prediction values larger than thr are counted as correct + _correct_k = correct_k & (pred_score[:, :k] > thr) + _correct_k = np.logical_or.reduce(_correct_k, axis=1) + res_thr.append(_correct_k.sum() * 100. / num) + if res_single: + res.append(res_thr[0]) + else: + res.append(res_thr) + return res + + +def accuracy_torch(pred, target, topk=1, thrs=0.): + if isinstance(thrs, Number): + thrs = (thrs, ) + res_single = True + elif isinstance(thrs, tuple): + res_single = False + else: + raise TypeError( + f'thrs should be a number or tuple, but got {type(thrs)}.') + + res = [] + maxk = max(topk) + num = pred.size(0) + pred_score, pred_label = pred.topk(maxk, dim=1) + pred_label = pred_label.t() + correct = pred_label.eq(target.view(1, -1).expand_as(pred_label)) + for k in topk: + res_thr = [] + for thr in thrs: + # Only prediction values larger than thr are counted as correct + _correct = correct & (pred_score.t() > thr) + correct_k = _correct[:k].reshape(-1).float().sum(0, keepdim=True) + res_thr.append(correct_k.mul_(100. / num)) + if res_single: + res.append(res_thr[0]) + else: + res.append(res_thr) + return res + + +def accuracy(pred, target, topk=1, thrs=0.): + """Calculate accuracy according to the prediction and target. + + Args: + pred (torch.Tensor | np.array): The model prediction. + target (torch.Tensor | np.array): The target of each prediction + topk (int | tuple[int]): If the predictions in ``topk`` + matches the target, the predictions will be regarded as + correct ones. Defaults to 1. + thrs (Number | tuple[Number], optional): Predictions with scores under + the thresholds are considered negative. Default to 0. + + Returns: + float | list[float] | list[list[float]]: Accuracy + - float: If both ``topk`` and ``thrs`` is a single value. + - list[float]: If one of ``topk`` or ``thrs`` is a tuple. + - list[list[float]]: If both ``topk`` and ``thrs`` is a tuple. \ + And the first dim is ``topk``, the second dim is ``thrs``. + """ + assert isinstance(topk, (int, tuple)) + if isinstance(topk, int): + topk = (topk, ) + return_single = True + else: + return_single = False + + if isinstance(pred, torch.Tensor) and isinstance(target, torch.Tensor): + res = accuracy_torch(pred, target, topk, thrs) + elif isinstance(pred, np.ndarray) and isinstance(target, np.ndarray): + res = accuracy_numpy(pred, target, topk, thrs) + else: + raise TypeError( + f'pred and target should both be torch.Tensor or np.ndarray, ' + f'but got {type(pred)} and {type(target)}.') + + return res[0] if return_single else res + + +class Accuracy(nn.Module): + + def __init__(self, topk=(1, )): + """Module to calculate the accuracy. + + Args: + topk (tuple): The criterion used to calculate the + accuracy. Defaults to (1,). + """ + super().__init__() + self.topk = topk + + def forward(self, pred, target): + """Forward function to calculate accuracy. + + Args: + pred (torch.Tensor): Prediction of models. + target (torch.Tensor): Target for each prediction. + + Returns: + list[float]: The accuracies under different topk criterions. + """ + return accuracy(pred, target, self.topk) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/losses/asymmetric_loss.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/losses/asymmetric_loss.py new file mode 100644 index 0000000000..bc4aa1b411 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/losses/asymmetric_loss.py @@ -0,0 +1,112 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn + +from ..builder import LOSSES +from .utils import weight_reduce_loss + + +def asymmetric_loss(pred, + target, + weight=None, + gamma_pos=1.0, + gamma_neg=4.0, + clip=0.05, + reduction='mean', + avg_factor=None): + r"""asymmetric loss. + + Please refer to the `paper `__ for + details. + + Args: + pred (torch.Tensor): The prediction with shape (N, \*). + target (torch.Tensor): The ground truth label of the prediction with + shape (N, \*). + weight (torch.Tensor, optional): Sample-wise loss weight with shape + (N, ). Defaults to None. + gamma_pos (float): positive focusing parameter. Defaults to 0.0. + gamma_neg (float): Negative focusing parameter. We usually set + gamma_neg > gamma_pos. Defaults to 4.0. + clip (float, optional): Probability margin. Defaults to 0.05. + reduction (str): The method used to reduce the loss. + Options are "none", "mean" and "sum". If reduction is 'none' , loss + is same shape as pred and label. Defaults to 'mean'. + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + + Returns: + torch.Tensor: Loss. + """ + assert pred.shape == \ + target.shape, 'pred and target should be in the same shape.' + + eps = 1e-8 + pred_sigmoid = pred.sigmoid() + target = target.type_as(pred) + + if clip and clip > 0: + pt = (1 - pred_sigmoid + + clip).clamp(max=1) * (1 - target) + pred_sigmoid * target + else: + pt = (1 - pred_sigmoid) * (1 - target) + pred_sigmoid * target + asymmetric_weight = (1 - pt).pow(gamma_pos * target + gamma_neg * + (1 - target)) + loss = -torch.log(pt.clamp(min=eps)) * asymmetric_weight + if weight is not None: + assert weight.dim() == 1 + weight = weight.float() + if pred.dim() > 1: + weight = weight.reshape(-1, 1) + loss = weight_reduce_loss(loss, weight, reduction, avg_factor) + return loss + + +@LOSSES.register_module() +class AsymmetricLoss(nn.Module): + """asymmetric loss. + + Args: + gamma_pos (float): positive focusing parameter. + Defaults to 0.0. + gamma_neg (float): Negative focusing parameter. We + usually set gamma_neg > gamma_pos. Defaults to 4.0. + clip (float, optional): Probability margin. Defaults to 0.05. + reduction (str): The method used to reduce the loss into + a scalar. + loss_weight (float): Weight of loss. Defaults to 1.0. + """ + + def __init__(self, + gamma_pos=0.0, + gamma_neg=4.0, + clip=0.05, + reduction='mean', + loss_weight=1.0): + super(AsymmetricLoss, self).__init__() + self.gamma_pos = gamma_pos + self.gamma_neg = gamma_neg + self.clip = clip + self.reduction = reduction + self.loss_weight = loss_weight + + def forward(self, + pred, + target, + weight=None, + avg_factor=None, + reduction_override=None): + """asymmetric loss.""" + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + loss_cls = self.loss_weight * asymmetric_loss( + pred, + target, + weight, + gamma_pos=self.gamma_pos, + gamma_neg=self.gamma_neg, + clip=self.clip, + reduction=reduction, + avg_factor=avg_factor) + return loss_cls diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/losses/cross_entropy_loss.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/losses/cross_entropy_loss.py new file mode 100644 index 0000000000..be9b8f0970 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/losses/cross_entropy_loss.py @@ -0,0 +1,189 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn +import torch.nn.functional as F + +from ..builder import LOSSES +from .utils import weight_reduce_loss + + +def cross_entropy(pred, + label, + weight=None, + reduction='mean', + avg_factor=None, + class_weight=None): + """Calculate the CrossEntropy loss. + + Args: + pred (torch.Tensor): The prediction with shape (N, C), C is the number + of classes. + label (torch.Tensor): The gt label of the prediction. + weight (torch.Tensor, optional): Sample-wise loss weight. + reduction (str): The method used to reduce the loss. + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + class_weight (torch.Tensor, optional): The weight for each class with + shape (C), C is the number of classes. Default None. + + Returns: + torch.Tensor: The calculated loss + """ + # element-wise losses + loss = F.cross_entropy(pred, label, weight=class_weight, reduction='none') + + # apply weights and do the reduction + if weight is not None: + weight = weight.float() + loss = weight_reduce_loss( + loss, weight=weight, reduction=reduction, avg_factor=avg_factor) + + return loss + + +def soft_cross_entropy(pred, + label, + weight=None, + reduction='mean', + class_weight=None, + avg_factor=None): + """Calculate the Soft CrossEntropy loss. The label can be float. + + Args: + pred (torch.Tensor): The prediction with shape (N, C), C is the number + of classes. + label (torch.Tensor): The gt label of the prediction with shape (N, C). + When using "mixup", the label can be float. + weight (torch.Tensor, optional): Sample-wise loss weight. + reduction (str): The method used to reduce the loss. + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + class_weight (torch.Tensor, optional): The weight for each class with + shape (C), C is the number of classes. Default None. + + Returns: + torch.Tensor: The calculated loss + """ + # element-wise losses + loss = -label * F.log_softmax(pred, dim=-1) + if class_weight is not None: + loss *= class_weight + loss = loss.sum(dim=-1) + + # apply weights and do the reduction + if weight is not None: + weight = weight.float() + loss = weight_reduce_loss( + loss, weight=weight, reduction=reduction, avg_factor=avg_factor) + + return loss + + +def binary_cross_entropy(pred, + label, + weight=None, + reduction='mean', + avg_factor=None, + class_weight=None): + r"""Calculate the binary CrossEntropy loss with logits. + + Args: + pred (torch.Tensor): The prediction with shape (N, \*). + label (torch.Tensor): The gt label with shape (N, \*). + weight (torch.Tensor, optional): Element-wise weight of loss with shape + (N, ). Defaults to None. + reduction (str): The method used to reduce the loss. + Options are "none", "mean" and "sum". If reduction is 'none' , loss + is same shape as pred and label. Defaults to 'mean'. + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + class_weight (torch.Tensor, optional): The weight for each class with + shape (C), C is the number of classes. Default None. + + Returns: + torch.Tensor: The calculated loss + """ + assert pred.dim() == label.dim() + # Ensure that the size of class_weight is consistent with pred and label to + # avoid automatic boracast, + if class_weight is not None: + N = pred.size()[0] + class_weight = class_weight.repeat(N, 1) + loss = F.binary_cross_entropy_with_logits( + pred, label, weight=class_weight, reduction='none') + + # apply weights and do the reduction + if weight is not None: + assert weight.dim() == 1 + weight = weight.float() + if pred.dim() > 1: + weight = weight.reshape(-1, 1) + loss = weight_reduce_loss( + loss, weight=weight, reduction=reduction, avg_factor=avg_factor) + return loss + + +@LOSSES.register_module() +class CrossEntropyLoss(nn.Module): + """Cross entropy loss. + + Args: + use_sigmoid (bool): Whether the prediction uses sigmoid + of softmax. Defaults to False. + use_soft (bool): Whether to use the soft version of CrossEntropyLoss. + Defaults to False. + reduction (str): The method used to reduce the loss. + Options are "none", "mean" and "sum". Defaults to 'mean'. + loss_weight (float): Weight of the loss. Defaults to 1.0. + class_weight (List[float], optional): The weight for each class with + shape (C), C is the number of classes. Default None. + """ + + def __init__(self, + use_sigmoid=False, + use_soft=False, + reduction='mean', + loss_weight=1.0, + class_weight=None): + super(CrossEntropyLoss, self).__init__() + self.use_sigmoid = use_sigmoid + self.use_soft = use_soft + assert not ( + self.use_soft and self.use_sigmoid + ), 'use_sigmoid and use_soft could not be set simultaneously' + + self.reduction = reduction + self.loss_weight = loss_weight + self.class_weight = class_weight + + if self.use_sigmoid: + self.cls_criterion = binary_cross_entropy + elif self.use_soft: + self.cls_criterion = soft_cross_entropy + else: + self.cls_criterion = cross_entropy + + def forward(self, + cls_score, + label, + weight=None, + avg_factor=None, + reduction_override=None, + **kwargs): + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + + if self.class_weight is not None: + class_weight = cls_score.new_tensor(self.class_weight) + else: + class_weight = None + + loss_cls = self.loss_weight * self.cls_criterion( + cls_score, + label, + weight, + class_weight=class_weight, + reduction=reduction, + avg_factor=avg_factor, + **kwargs) + return loss_cls diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/losses/focal_loss.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/losses/focal_loss.py new file mode 100644 index 0000000000..441ab46ce5 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/losses/focal_loss.py @@ -0,0 +1,114 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn +import torch.nn.functional as F + +from ..builder import LOSSES +from .utils import weight_reduce_loss + + +def sigmoid_focal_loss(pred, + target, + weight=None, + gamma=2.0, + alpha=0.25, + reduction='mean', + avg_factor=None): + r"""Sigmoid focal loss. + + Args: + pred (torch.Tensor): The prediction with shape (N, \*). + target (torch.Tensor): The ground truth label of the prediction with + shape (N, \*). + weight (torch.Tensor, optional): Sample-wise loss weight with shape + (N, ). Defaults to None. + gamma (float): The gamma for calculating the modulating factor. + Defaults to 2.0. + alpha (float): A balanced form for Focal Loss. Defaults to 0.25. + reduction (str): The method used to reduce the loss. + Options are "none", "mean" and "sum". If reduction is 'none' , + loss is same shape as pred and label. Defaults to 'mean'. + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + + Returns: + torch.Tensor: Loss. + """ + assert pred.shape == \ + target.shape, 'pred and target should be in the same shape.' + pred_sigmoid = pred.sigmoid() + target = target.type_as(pred) + pt = (1 - pred_sigmoid) * target + pred_sigmoid * (1 - target) + focal_weight = (alpha * target + (1 - alpha) * + (1 - target)) * pt.pow(gamma) + loss = F.binary_cross_entropy_with_logits( + pred, target, reduction='none') * focal_weight + if weight is not None: + assert weight.dim() == 1 + weight = weight.float() + if pred.dim() > 1: + weight = weight.reshape(-1, 1) + loss = weight_reduce_loss(loss, weight, reduction, avg_factor) + return loss + + +@LOSSES.register_module() +class FocalLoss(nn.Module): + """Focal loss. + + Args: + gamma (float): Focusing parameter in focal loss. + Defaults to 2.0. + alpha (float): The parameter in balanced form of focal + loss. Defaults to 0.25. + reduction (str): The method used to reduce the loss into + a scalar. Options are "none" and "mean". Defaults to 'mean'. + loss_weight (float): Weight of loss. Defaults to 1.0. + """ + + def __init__(self, + gamma=2.0, + alpha=0.25, + reduction='mean', + loss_weight=1.0): + + super(FocalLoss, self).__init__() + self.gamma = gamma + self.alpha = alpha + self.reduction = reduction + self.loss_weight = loss_weight + + def forward(self, + pred, + target, + weight=None, + avg_factor=None, + reduction_override=None): + r"""Sigmoid focal loss. + + Args: + pred (torch.Tensor): The prediction with shape (N, \*). + target (torch.Tensor): The ground truth label of the prediction + with shape (N, \*). + weight (torch.Tensor, optional): Sample-wise loss weight with shape + (N, \*). Defaults to None. + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + reduction_override (str, optional): The method used to reduce the + loss into a scalar. Options are "none", "mean" and "sum". + Defaults to None. + + Returns: + torch.Tensor: Loss. + """ + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + loss_cls = self.loss_weight * sigmoid_focal_loss( + pred, + target, + weight, + gamma=self.gamma, + alpha=self.alpha, + reduction=reduction, + avg_factor=avg_factor) + return loss_cls diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/losses/label_smooth_loss.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/losses/label_smooth_loss.py new file mode 100644 index 0000000000..118230843e --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/losses/label_smooth_loss.py @@ -0,0 +1,167 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +import torch +import torch.nn as nn + +from ..builder import LOSSES +from .cross_entropy_loss import CrossEntropyLoss +from .utils import convert_to_one_hot + + +@LOSSES.register_module() +class LabelSmoothLoss(nn.Module): + r"""Initializer for the label smoothed cross entropy loss. + + Refers to `Rethinking the Inception Architecture for Computer Vision + `_ + + This decreases gap between output scores and encourages generalization. + Labels provided to forward can be one-hot like vectors (NxC) or class + indices (Nx1). + And this accepts linear combination of one-hot like labels from mixup or + cutmix except multi-label task. + + Args: + label_smooth_val (float): The degree of label smoothing. + num_classes (int, optional): Number of classes. Defaults to None. + mode (str): Refers to notes, Options are 'original', 'classy_vision', + 'multi_label'. Defaults to 'classy_vision' + reduction (str): The method used to reduce the loss. + Options are "none", "mean" and "sum". Defaults to 'mean'. + loss_weight (float): Weight of the loss. Defaults to 1.0. + + Notes: + if the mode is "original", this will use the same label smooth method + as the original paper as: + + .. math:: + (1-\epsilon)\delta_{k, y} + \frac{\epsilon}{K} + + where epsilon is the `label_smooth_val`, K is the num_classes and + delta(k,y) is Dirac delta, which equals 1 for k=y and 0 otherwise. + + if the mode is "classy_vision", this will use the same label smooth + method as the facebookresearch/ClassyVision repo as: + + .. math:: + \frac{\delta_{k, y} + \epsilon/K}{1+\epsilon} + + if the mode is "multi_label", this will accept labels from multi-label + task and smoothing them as: + + .. math:: + (1-2\epsilon)\delta_{k, y} + \epsilon + """ + + def __init__(self, + label_smooth_val, + num_classes=None, + mode=None, + reduction='mean', + loss_weight=1.0): + super().__init__() + self.num_classes = num_classes + self.loss_weight = loss_weight + + assert (isinstance(label_smooth_val, float) + and 0 <= label_smooth_val < 1), \ + f'LabelSmoothLoss accepts a float label_smooth_val ' \ + f'over [0, 1), but gets {label_smooth_val}' + self.label_smooth_val = label_smooth_val + + accept_reduction = {'none', 'mean', 'sum'} + assert reduction in accept_reduction, \ + f'LabelSmoothLoss supports reduction {accept_reduction}, ' \ + f'but gets {mode}.' + self.reduction = reduction + + if mode is None: + warnings.warn( + 'LabelSmoothLoss mode is not set, use "classy_vision" ' + 'by default. The default value will be changed to ' + '"original" recently. Please set mode manually if want ' + 'to keep "classy_vision".', UserWarning) + mode = 'classy_vision' + + accept_mode = {'original', 'classy_vision', 'multi_label'} + assert mode in accept_mode, \ + f'LabelSmoothLoss supports mode {accept_mode}, but gets {mode}.' + self.mode = mode + + self._eps = label_smooth_val + if mode == 'classy_vision': + self._eps = label_smooth_val / (1 + label_smooth_val) + if mode == 'multi_label': + self.ce = CrossEntropyLoss(use_sigmoid=True) + self.smooth_label = self.multilabel_smooth_label + else: + self.ce = CrossEntropyLoss(use_soft=True) + self.smooth_label = self.original_smooth_label + + def generate_one_hot_like_label(self, label): + """This function takes one-hot or index label vectors and computes one- + hot like label vectors (float)""" + # check if targets are inputted as class integers + if label.dim() == 1 or (label.dim() == 2 and label.shape[1] == 1): + label = convert_to_one_hot(label.view(-1, 1), self.num_classes) + return label.float() + + def original_smooth_label(self, one_hot_like_label): + assert self.num_classes > 0 + smooth_label = one_hot_like_label * (1 - self._eps) + smooth_label += self._eps / self.num_classes + return smooth_label + + def multilabel_smooth_label(self, one_hot_like_label): + assert self.num_classes > 0 + smooth_label = torch.full_like(one_hot_like_label, self._eps) + smooth_label.masked_fill_(one_hot_like_label > 0, 1 - self._eps) + return smooth_label + + def forward(self, + cls_score, + label, + weight=None, + avg_factor=None, + reduction_override=None, + **kwargs): + r"""Label smooth loss. + + Args: + pred (torch.Tensor): The prediction with shape (N, \*). + label (torch.Tensor): The ground truth label of the prediction + with shape (N, \*). + weight (torch.Tensor, optional): Sample-wise loss weight with shape + (N, \*). Defaults to None. + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + reduction_override (str, optional): The method used to reduce the + loss into a scalar. Options are "none", "mean" and "sum". + Defaults to None. + + Returns: + torch.Tensor: Loss. + """ + if self.num_classes is not None: + assert self.num_classes == cls_score.shape[1], \ + f'num_classes should equal to cls_score.shape[1], ' \ + f'but got num_classes: {self.num_classes} and ' \ + f'cls_score.shape[1]: {cls_score.shape[1]}' + else: + self.num_classes = cls_score.shape[1] + + one_hot_like_label = self.generate_one_hot_like_label(label=label) + assert one_hot_like_label.shape == cls_score.shape, \ + f'LabelSmoothLoss requires output and target ' \ + f'to be same shape, but got output.shape: {cls_score.shape} ' \ + f'and target.shape: {one_hot_like_label.shape}' + + smoothed_label = self.smooth_label(one_hot_like_label) + return self.ce.forward( + cls_score, + smoothed_label, + weight=weight, + avg_factor=avg_factor, + reduction_override=reduction_override, + **kwargs) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/losses/seesaw_loss.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/losses/seesaw_loss.py new file mode 100644 index 0000000000..14176de61d --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/losses/seesaw_loss.py @@ -0,0 +1,173 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# migrate from mmdetection with modifications +import torch +import torch.nn as nn +import torch.nn.functional as F + +from ..builder import LOSSES +from .utils import weight_reduce_loss + + +def seesaw_ce_loss(cls_score, + labels, + weight, + cum_samples, + num_classes, + p, + q, + eps, + reduction='mean', + avg_factor=None): + """Calculate the Seesaw CrossEntropy loss. + + Args: + cls_score (torch.Tensor): The prediction with shape (N, C), + C is the number of classes. + labels (torch.Tensor): The learning label of the prediction. + weight (torch.Tensor): Sample-wise loss weight. + cum_samples (torch.Tensor): Cumulative samples for each category. + num_classes (int): The number of classes. + p (float): The ``p`` in the mitigation factor. + q (float): The ``q`` in the compenstation factor. + eps (float): The minimal value of divisor to smooth + the computation of compensation factor + reduction (str, optional): The method used to reduce the loss. + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + + Returns: + torch.Tensor: The calculated loss + """ + assert cls_score.size(-1) == num_classes + assert len(cum_samples) == num_classes + + onehot_labels = F.one_hot(labels, num_classes) + seesaw_weights = cls_score.new_ones(onehot_labels.size()) + + # mitigation factor + if p > 0: + sample_ratio_matrix = cum_samples[None, :].clamp( + min=1) / cum_samples[:, None].clamp(min=1) + index = (sample_ratio_matrix < 1.0).float() + sample_weights = sample_ratio_matrix.pow(p) * index + (1 - index + ) # M_{ij} + mitigation_factor = sample_weights[labels.long(), :] + seesaw_weights = seesaw_weights * mitigation_factor + + # compensation factor + if q > 0: + scores = F.softmax(cls_score.detach(), dim=1) + self_scores = scores[ + torch.arange(0, len(scores)).to(scores.device).long(), + labels.long()] + score_matrix = scores / self_scores[:, None].clamp(min=eps) + index = (score_matrix > 1.0).float() + compensation_factor = score_matrix.pow(q) * index + (1 - index) + seesaw_weights = seesaw_weights * compensation_factor + + cls_score = cls_score + (seesaw_weights.log() * (1 - onehot_labels)) + + loss = F.cross_entropy(cls_score, labels, weight=None, reduction='none') + + if weight is not None: + weight = weight.float() + loss = weight_reduce_loss( + loss, weight=weight, reduction=reduction, avg_factor=avg_factor) + return loss + + +@LOSSES.register_module() +class SeesawLoss(nn.Module): + """Implementation of seesaw loss. + + Refers to `Seesaw Loss for Long-Tailed Instance Segmentation (CVPR 2021) + `_ + + Args: + use_sigmoid (bool): Whether the prediction uses sigmoid of softmax. + Only False is supported. Defaults to False. + p (float): The ``p`` in the mitigation factor. + Defaults to 0.8. + q (float): The ``q`` in the compenstation factor. + Defaults to 2.0. + num_classes (int): The number of classes. + Default to 1000 for the ImageNet dataset. + eps (float): The minimal value of divisor to smooth + the computation of compensation factor, default to 1e-2. + reduction (str): The method that reduces the loss to a scalar. + Options are "none", "mean" and "sum". Default to "mean". + loss_weight (float): The weight of the loss. Defaults to 1.0 + """ + + def __init__(self, + use_sigmoid=False, + p=0.8, + q=2.0, + num_classes=1000, + eps=1e-2, + reduction='mean', + loss_weight=1.0): + super(SeesawLoss, self).__init__() + assert not use_sigmoid, '`use_sigmoid` is not supported' + self.use_sigmoid = False + self.p = p + self.q = q + self.num_classes = num_classes + self.eps = eps + self.reduction = reduction + self.loss_weight = loss_weight + + self.cls_criterion = seesaw_ce_loss + + # cumulative samples for each category + self.register_buffer('cum_samples', + torch.zeros(self.num_classes, dtype=torch.float)) + + def forward(self, + cls_score, + labels, + weight=None, + avg_factor=None, + reduction_override=None): + """Forward function. + + Args: + cls_score (torch.Tensor): The prediction with shape (N, C). + labels (torch.Tensor): The learning label of the prediction. + weight (torch.Tensor, optional): Sample-wise loss weight. + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + reduction (str, optional): The method used to reduce the loss. + Options are "none", "mean" and "sum". + Returns: + torch.Tensor: The calculated loss + """ + assert reduction_override in (None, 'none', 'mean', 'sum'), \ + f'The `reduction_override` should be one of (None, "none", ' \ + f'"mean", "sum"), but get "{reduction_override}".' + assert cls_score.size(0) == labels.view(-1).size(0), \ + f'Expected `labels` shape [{cls_score.size(0)}], ' \ + f'but got {list(labels.size())}' + reduction = ( + reduction_override if reduction_override else self.reduction) + assert cls_score.size(-1) == self.num_classes, \ + f'The channel number of output ({cls_score.size(-1)}) does ' \ + f'not match the `num_classes` of seesaw loss ({self.num_classes}).' + + # accumulate the samples for each category + unique_labels = labels.unique() + for u_l in unique_labels: + inds_ = labels == u_l.item() + self.cum_samples[u_l] += inds_.sum() + + if weight is not None: + weight = weight.float() + else: + weight = labels.new_ones(labels.size(), dtype=torch.float) + + # calculate loss_cls_classes + loss_cls = self.loss_weight * self.cls_criterion( + cls_score, labels, weight, self.cum_samples, self.num_classes, + self.p, self.q, self.eps, reduction, avg_factor) + + return loss_cls diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/losses/utils.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/losses/utils.py new file mode 100644 index 0000000000..a33da4dc9a --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/losses/utils.py @@ -0,0 +1,121 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import functools + +import torch +import torch.nn.functional as F + + +def reduce_loss(loss, reduction): + """Reduce loss as specified. + + Args: + loss (Tensor): Elementwise loss tensor. + reduction (str): Options are "none", "mean" and "sum". + + Return: + Tensor: Reduced loss tensor. + """ + reduction_enum = F._Reduction.get_enum(reduction) + # none: 0, elementwise_mean:1, sum: 2 + if reduction_enum == 0: + return loss + elif reduction_enum == 1: + return loss.mean() + elif reduction_enum == 2: + return loss.sum() + + +def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): + """Apply element-wise weight and reduce loss. + + Args: + loss (Tensor): Element-wise loss. + weight (Tensor): Element-wise weights. + reduction (str): Same as built-in losses of PyTorch. + avg_factor (float): Average factor when computing the mean of losses. + + Returns: + Tensor: Processed loss values. + """ + # if weight is specified, apply element-wise weight + if weight is not None: + loss = loss * weight + + # if avg_factor is not specified, just reduce the loss + if avg_factor is None: + loss = reduce_loss(loss, reduction) + else: + # if reduction is mean, then average the loss by avg_factor + if reduction == 'mean': + loss = loss.sum() / avg_factor + # if reduction is 'none', then do nothing, otherwise raise an error + elif reduction != 'none': + raise ValueError('avg_factor can not be used with reduction="sum"') + return loss + + +def weighted_loss(loss_func): + """Create a weighted version of a given loss function. + + To use this decorator, the loss function must have the signature like + ``loss_func(pred, target, **kwargs)``. The function only needs to compute + element-wise loss without any reduction. This decorator will add weight + and reduction arguments to the function. The decorated function will have + the signature like ``loss_func(pred, target, weight=None, reduction='mean', + avg_factor=None, **kwargs)``. + + :Example: + + >>> import torch + >>> @weighted_loss + >>> def l1_loss(pred, target): + >>> return (pred - target).abs() + + >>> pred = torch.Tensor([0, 2, 3]) + >>> target = torch.Tensor([1, 1, 1]) + >>> weight = torch.Tensor([1, 0, 1]) + + >>> l1_loss(pred, target) + tensor(1.3333) + >>> l1_loss(pred, target, weight) + tensor(1.) + >>> l1_loss(pred, target, reduction='none') + tensor([1., 1., 2.]) + >>> l1_loss(pred, target, weight, avg_factor=2) + tensor(1.5000) + """ + + @functools.wraps(loss_func) + def wrapper(pred, + target, + weight=None, + reduction='mean', + avg_factor=None, + **kwargs): + # get element-wise loss + loss = loss_func(pred, target, **kwargs) + loss = weight_reduce_loss(loss, weight, reduction, avg_factor) + return loss + + return wrapper + + +def convert_to_one_hot(targets: torch.Tensor, classes) -> torch.Tensor: + """This function converts target class indices to one-hot vectors, given + the number of classes. + + Args: + targets (Tensor): The ground truth label of the prediction + with shape (N, 1) + classes (int): the number of classes. + + Returns: + Tensor: Processed loss values. + """ + assert (torch.max(targets).item() < + classes), 'Class Index must be less than number of classes' + one_hot_targets = torch.zeros((targets.shape[0], classes), + dtype=torch.long, + device=targets.device) + one_hot_targets.scatter_(1, targets.long(), 1) + return one_hot_targets diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/necks/__init__.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/necks/__init__.py new file mode 100644 index 0000000000..67053fe681 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/necks/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .gap import GlobalAveragePooling + +__all__ = ['GlobalAveragePooling'] diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/necks/gap.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/necks/gap.py new file mode 100644 index 0000000000..f64cce0ffd --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/necks/gap.py @@ -0,0 +1,45 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn + +from ..builder import NECKS + + +@NECKS.register_module() +class GlobalAveragePooling(nn.Module): + """Global Average Pooling neck. + + Note that we use `view` to remove extra channel after pooling. We do not + use `squeeze` as it will also remove the batch dimension when the tensor + has a batch dimension of size 1, which can lead to unexpected errors. + + Args: + dim (int): Dimensions of each sample channel, can be one of {1, 2, 3}. + Default: 2 + """ + + def __init__(self, dim=2): + super(GlobalAveragePooling, self).__init__() + assert dim in [1, 2, 3], 'GlobalAveragePooling dim only support ' \ + f'{1, 2, 3}, get {dim} instead.' + if dim == 1: + self.gap = nn.AdaptiveAvgPool1d(1) + elif dim == 2: + self.gap = nn.AdaptiveAvgPool2d((1, 1)) + else: + self.gap = nn.AdaptiveAvgPool3d((1, 1, 1)) + + def init_weights(self): + pass + + def forward(self, inputs): + if isinstance(inputs, tuple): + outs = tuple([self.gap(x) for x in inputs]) + outs = tuple( + [out.view(x.size(0), -1) for out, x in zip(outs, inputs)]) + elif isinstance(inputs, torch.Tensor): + outs = self.gap(inputs) + outs = outs.view(inputs.size(0), -1) + else: + raise TypeError('neck inputs should be tuple or torch.tensor') + return outs diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/utils/__init__.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/utils/__init__.py new file mode 100644 index 0000000000..69e9a4c133 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/utils/__init__.py @@ -0,0 +1,16 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .attention import MultiheadAttention, ShiftWindowMSA +from .augment.augments import Augments +from .channel_shuffle import channel_shuffle +from .embed import HybridEmbed, PatchEmbed, PatchMerging +from .helpers import is_tracing, to_2tuple, to_3tuple, to_4tuple, to_ntuple +from .inverted_residual import InvertedResidual +from .make_divisible import make_divisible +from .se_layer import SELayer + +__all__ = [ + 'channel_shuffle', 'make_divisible', 'InvertedResidual', 'SELayer', + 'to_ntuple', 'to_2tuple', 'to_3tuple', 'to_4tuple', 'PatchEmbed', + 'PatchMerging', 'HybridEmbed', 'Augments', 'ShiftWindowMSA', 'is_tracing', + 'MultiheadAttention' +] diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/utils/attention.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/utils/attention.py new file mode 100644 index 0000000000..6595411295 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/utils/attention.py @@ -0,0 +1,370 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn.bricks.registry import DROPOUT_LAYERS +from mmcv.cnn.bricks.transformer import build_dropout +from mmcv.cnn.utils.weight_init import trunc_normal_ +from mmcv.runner.base_module import BaseModule + +from ..builder import ATTENTION +from .helpers import to_2tuple + + +class WindowMSA(BaseModule): + """Window based multi-head self-attention (W-MSA) module with relative + position bias. + + Args: + embed_dims (int): Number of input channels. + window_size (tuple[int]): The height and width of the window. + num_heads (int): Number of attention heads. + qkv_bias (bool, optional): If True, add a learnable bias to q, k, v. + Defaults to True. + qk_scale (float, optional): Override default qk scale of + ``head_dim ** -0.5`` if set. Defaults to None. + attn_drop (float, optional): Dropout ratio of attention weight. + Defaults to 0. + proj_drop (float, optional): Dropout ratio of output. Defaults to 0. + init_cfg (dict, optional): The extra config for initialization. + Defaults to None. + """ + + def __init__(self, + embed_dims, + window_size, + num_heads, + qkv_bias=True, + qk_scale=None, + attn_drop=0., + proj_drop=0., + init_cfg=None): + + super().__init__(init_cfg) + self.embed_dims = embed_dims + self.window_size = window_size # Wh, Ww + self.num_heads = num_heads + head_embed_dims = embed_dims // num_heads + self.scale = qk_scale or head_embed_dims**-0.5 + + # define a parameter table of relative position bias + self.relative_position_bias_table = nn.Parameter( + torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), + num_heads)) # 2*Wh-1 * 2*Ww-1, nH + + # About 2x faster than original impl + Wh, Ww = self.window_size + rel_index_coords = self.double_step_seq(2 * Ww - 1, Wh, 1, Ww) + rel_position_index = rel_index_coords + rel_index_coords.T + rel_position_index = rel_position_index.flip(1).contiguous() + self.register_buffer('relative_position_index', rel_position_index) + + self.qkv = nn.Linear(embed_dims, embed_dims * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(embed_dims, embed_dims) + self.proj_drop = nn.Dropout(proj_drop) + + self.softmax = nn.Softmax(dim=-1) + + def init_weights(self): + super(WindowMSA, self).init_weights() + + trunc_normal_(self.relative_position_bias_table, std=0.02) + + def forward(self, x, mask=None): + """ + Args: + + x (tensor): input features with shape of (num_windows*B, N, C) + mask (tensor, Optional): mask with shape of (num_windows, Wh*Ww, + Wh*Ww), value should be between (-inf, 0]. + """ + B_, N, C = x.shape + qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, + C // self.num_heads).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[ + 2] # make torchscript happy (cannot use tensor as tuple) + + q = q * self.scale + attn = (q @ k.transpose(-2, -1)) + + relative_position_bias = self.relative_position_bias_table[ + self.relative_position_index.view(-1)].view( + self.window_size[0] * self.window_size[1], + self.window_size[0] * self.window_size[1], + -1) # Wh*Ww,Wh*Ww,nH + relative_position_bias = relative_position_bias.permute( + 2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww + attn = attn + relative_position_bias.unsqueeze(0) + + if mask is not None: + nW = mask.shape[0] + attn = attn.view(B_ // nW, nW, self.num_heads, N, + N) + mask.unsqueeze(1).unsqueeze(0) + attn = attn.view(-1, self.num_heads, N, N) + attn = self.softmax(attn) + else: + attn = self.softmax(attn) + + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B_, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + @staticmethod + def double_step_seq(step1, len1, step2, len2): + seq1 = torch.arange(0, step1 * len1, step1) + seq2 = torch.arange(0, step2 * len2, step2) + return (seq1[:, None] + seq2[None, :]).reshape(1, -1) + + +@ATTENTION.register_module() +class ShiftWindowMSA(BaseModule): + """Shift Window Multihead Self-Attention Module. + + Args: + embed_dims (int): Number of input channels. + input_resolution (Tuple[int, int]): The resolution of the input feature + map. + num_heads (int): Number of attention heads. + window_size (int): The height and width of the window. + shift_size (int, optional): The shift step of each window towards + right-bottom. If zero, act as regular window-msa. Defaults to 0. + qkv_bias (bool, optional): If True, add a learnable bias to q, k, v. + Default: True + qk_scale (float | None, optional): Override default qk scale of + head_dim ** -0.5 if set. Defaults to None. + attn_drop (float, optional): Dropout ratio of attention weight. + Defaults to 0.0. + proj_drop (float, optional): Dropout ratio of output. Defaults to 0. + dropout_layer (dict, optional): The dropout_layer used before output. + Defaults to dict(type='DropPath', drop_prob=0.). + auto_pad (bool, optional): Auto pad the feature map to be divisible by + window_size, Defaults to False. + init_cfg (dict, optional): The extra config for initialization. + Default: None. + """ + + def __init__(self, + embed_dims, + input_resolution, + num_heads, + window_size, + shift_size=0, + qkv_bias=True, + qk_scale=None, + attn_drop=0, + proj_drop=0, + dropout_layer=dict(type='DropPath', drop_prob=0.), + auto_pad=False, + init_cfg=None): + super().__init__(init_cfg) + + self.embed_dims = embed_dims + self.input_resolution = input_resolution + self.shift_size = shift_size + self.window_size = window_size + if min(self.input_resolution) <= self.window_size: + # if window size is larger than input resolution, don't partition + self.shift_size = 0 + self.window_size = min(self.input_resolution) + + self.w_msa = WindowMSA(embed_dims, to_2tuple(self.window_size), + num_heads, qkv_bias, qk_scale, attn_drop, + proj_drop) + + self.drop = build_dropout(dropout_layer) + + H, W = self.input_resolution + # Handle auto padding + self.auto_pad = auto_pad + if self.auto_pad: + self.pad_r = (self.window_size - + W % self.window_size) % self.window_size + self.pad_b = (self.window_size - + H % self.window_size) % self.window_size + self.H_pad = H + self.pad_b + self.W_pad = W + self.pad_r + else: + H_pad, W_pad = self.input_resolution + assert H_pad % self.window_size + W_pad % self.window_size == 0,\ + f'input_resolution({self.input_resolution}) is not divisible '\ + f'by window_size({self.window_size}). Please check feature '\ + f'map shape or set `auto_pad=True`.' + self.H_pad, self.W_pad = H_pad, W_pad + self.pad_r, self.pad_b = 0, 0 + + if self.shift_size > 0: + # calculate attention mask for SW-MSA + img_mask = torch.zeros((1, self.H_pad, self.W_pad, 1)) # 1 H W 1 + h_slices = (slice(0, -self.window_size), + slice(-self.window_size, + -self.shift_size), slice(-self.shift_size, None)) + w_slices = (slice(0, -self.window_size), + slice(-self.window_size, + -self.shift_size), slice(-self.shift_size, None)) + cnt = 0 + for h in h_slices: + for w in w_slices: + img_mask[:, h, w, :] = cnt + cnt += 1 + + # nW, window_size, window_size, 1 + mask_windows = self.window_partition(img_mask) + mask_windows = mask_windows.view( + -1, self.window_size * self.window_size) + attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) + attn_mask = attn_mask.masked_fill(attn_mask != 0, + float(-100.0)).masked_fill( + attn_mask == 0, float(0.0)) + else: + attn_mask = None + + self.register_buffer('attn_mask', attn_mask) + + def forward(self, query): + H, W = self.input_resolution + B, L, C = query.shape + assert L == H * W, 'input feature has wrong size' + query = query.view(B, H, W, C) + + if self.pad_r or self.pad_b: + query = F.pad(query, (0, 0, 0, self.pad_r, 0, self.pad_b)) + + # cyclic shift + if self.shift_size > 0: + shifted_query = torch.roll( + query, + shifts=(-self.shift_size, -self.shift_size), + dims=(1, 2)) + else: + shifted_query = query + + # nW*B, window_size, window_size, C + query_windows = self.window_partition(shifted_query) + # nW*B, window_size*window_size, C + query_windows = query_windows.view(-1, self.window_size**2, C) + + # W-MSA/SW-MSA (nW*B, window_size*window_size, C) + attn_windows = self.w_msa(query_windows, mask=self.attn_mask) + + # merge windows + attn_windows = attn_windows.view(-1, self.window_size, + self.window_size, C) + + # B H' W' C + shifted_x = self.window_reverse(attn_windows, self.H_pad, self.W_pad) + # reverse cyclic shift + if self.shift_size > 0: + x = torch.roll( + shifted_x, + shifts=(self.shift_size, self.shift_size), + dims=(1, 2)) + else: + x = shifted_x + + if self.pad_r or self.pad_b: + x = x[:, :H, :W, :].contiguous() + + x = x.view(B, H * W, C) + + x = self.drop(x) + return x + + def window_reverse(self, windows, H, W): + window_size = self.window_size + B = int(windows.shape[0] / (H * W / window_size / window_size)) + x = windows.view(B, H // window_size, W // window_size, window_size, + window_size, -1) + x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) + return x + + def window_partition(self, x): + B, H, W, C = x.shape + window_size = self.window_size + x = x.view(B, H // window_size, window_size, W // window_size, + window_size, C) + windows = x.permute(0, 1, 3, 2, 4, 5).contiguous() + windows = windows.view(-1, window_size, window_size, C) + return windows + + +class MultiheadAttention(BaseModule): + """Multi-head Attention Module. + + This module implements multi-head attention that supports different input + dims and embed dims. And it also supports a shortcut from ``value``, which + is useful if input dims is not the same with embed dims. + + Args: + embed_dims (int): The embedding dimension. + num_heads (int): Parallel attention heads. + input_dims (int, optional): The input dimension, and if None, + use ``embed_dims``. Defaults to None. + attn_drop (float): Dropout rate of the dropout layer after the + attention calculation of query and key. Defaults to 0. + proj_drop (float): Dropout rate of the dropout layer after the + output projection. Defaults to 0. + dropout_layer (dict): The dropout config before adding the shortcut. + Defaults to ``dict(type='Dropout', drop_prob=0.)``. + qkv_bias (bool): If True, add a learnable bias to q, k, v. + Defaults to True. + qk_scale (float, optional): Override default qk scale of + ``head_dim ** -0.5`` if set. Defaults to None. + proj_bias (bool) If True, add a learnable bias to output projection. + Defaults to True. + v_shortcut (bool): Add a shortcut from value to output. It's usually + used if ``input_dims`` is different from ``embed_dims``. + Defaults to False. + init_cfg (dict, optional): The Config for initialization. + Defaults to None. + """ + + def __init__(self, + embed_dims, + num_heads, + input_dims=None, + attn_drop=0., + proj_drop=0., + dropout_layer=dict(type='Dropout', drop_prob=0.), + qkv_bias=True, + qk_scale=None, + proj_bias=True, + v_shortcut=False, + init_cfg=None): + super(MultiheadAttention, self).__init__(init_cfg=init_cfg) + + self.input_dims = input_dims or embed_dims + self.embed_dims = embed_dims + self.num_heads = num_heads + self.v_shortcut = v_shortcut + + self.head_dims = embed_dims // num_heads + self.scale = qk_scale or self.head_dims**-0.5 + + self.qkv = nn.Linear(self.input_dims, embed_dims * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(embed_dims, embed_dims, bias=proj_bias) + self.proj_drop = nn.Dropout(proj_drop) + + self.out_drop = DROPOUT_LAYERS.build(dropout_layer) + + def forward(self, x): + B, N, _ = x.shape + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, + self.head_dims).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[2] + + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, self.embed_dims) + x = self.proj(x) + x = self.out_drop(self.proj_drop(x)) + + if self.v_shortcut: + x = v.squeeze(1) + x + return x diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/utils/augment/__init__.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/utils/augment/__init__.py new file mode 100644 index 0000000000..b64cce38a0 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/utils/augment/__init__.py @@ -0,0 +1,7 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .augments import Augments +from .cutmix import BatchCutMixLayer +from .identity import Identity +from .mixup import BatchMixupLayer + +__all__ = ['Augments', 'BatchCutMixLayer', 'Identity', 'BatchMixupLayer'] diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/utils/augment/augments.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/utils/augment/augments.py new file mode 100644 index 0000000000..8455e935dd --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/utils/augment/augments.py @@ -0,0 +1,73 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import random + +import numpy as np + +from .builder import build_augment + + +class Augments(object): + """Data augments. + + We implement some data augmentation methods, such as mixup, cutmix. + + Args: + augments_cfg (list[`mmcv.ConfigDict`] | obj:`mmcv.ConfigDict`): + Config dict of augments + + Example: + >>> augments_cfg = [ + dict(type='BatchCutMix', alpha=1., num_classes=10, prob=0.5), + dict(type='BatchMixup', alpha=1., num_classes=10, prob=0.3) + ] + >>> augments = Augments(augments_cfg) + >>> imgs = torch.randn(16, 3, 32, 32) + >>> label = torch.randint(0, 10, (16, )) + >>> imgs, label = augments(imgs, label) + + To decide which augmentation within Augments block is used + the following rule is applied. + We pick augmentation based on the probabilities. In the example above, + we decide if we should use BatchCutMix with probability 0.5, + BatchMixup 0.3. As Identity is not in augments_cfg, we use Identity with + probability 1 - 0.5 - 0.3 = 0.2. + """ + + def __init__(self, augments_cfg): + super(Augments, self).__init__() + + if isinstance(augments_cfg, dict): + augments_cfg = [augments_cfg] + + assert len(augments_cfg) > 0, \ + 'The length of augments_cfg should be positive.' + self.augments = [build_augment(cfg) for cfg in augments_cfg] + self.augment_probs = [aug.prob for aug in self.augments] + + has_identity = any([cfg['type'] == 'Identity' for cfg in augments_cfg]) + if has_identity: + assert sum(self.augment_probs) == 1.0,\ + 'The sum of augmentation probabilities should equal to 1,' \ + ' but got {:.2f}'.format(sum(self.augment_probs)) + else: + assert sum(self.augment_probs) <= 1.0,\ + 'The sum of augmentation probabilities should less than or ' \ + 'equal to 1, but got {:.2f}'.format(sum(self.augment_probs)) + identity_prob = 1 - sum(self.augment_probs) + if identity_prob > 0: + num_classes = self.augments[0].num_classes + self.augments += [ + build_augment( + dict( + type='Identity', + num_classes=num_classes, + prob=identity_prob)) + ] + self.augment_probs += [identity_prob] + + def __call__(self, img, gt_label): + if self.augments: + random_state = np.random.RandomState(random.randint(0, 2**32 - 1)) + aug = random_state.choice(self.augments, p=self.augment_probs) + return aug(img, gt_label) + return img, gt_label diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/utils/augment/builder.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/utils/augment/builder.py new file mode 100644 index 0000000000..5d1205ee31 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/utils/augment/builder.py @@ -0,0 +1,8 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmcv.utils import Registry, build_from_cfg + +AUGMENT = Registry('augment') + + +def build_augment(cfg, default_args=None): + return build_from_cfg(cfg, AUGMENT, default_args) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/utils/augment/cutmix.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/utils/augment/cutmix.py new file mode 100644 index 0000000000..215e878d01 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/utils/augment/cutmix.py @@ -0,0 +1,140 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from abc import ABCMeta, abstractmethod + +import numpy as np +import torch +import torch.nn.functional as F + +from .builder import AUGMENT + + +class BaseCutMixLayer(object, metaclass=ABCMeta): + """Base class for CutMixLayer. + + Args: + alpha (float): Parameters for Beta distribution. Positive(>0) + num_classes (int): The number of classes + prob (float): MixUp probability. It should be in range [0, 1]. + Default to 1.0 + cutmix_minmax (List[float], optional): cutmix min/max image ratio. + (as percent of image size). When cutmix_minmax is not None, we + generate cutmix bounding-box using cutmix_minmax instead of alpha + correct_lam (bool): Whether to apply lambda correction when cutmix bbox + clipped by image borders. Default to True + """ + + def __init__(self, + alpha, + num_classes, + prob=1.0, + cutmix_minmax=None, + correct_lam=True): + super(BaseCutMixLayer, self).__init__() + + assert isinstance(alpha, float) and alpha > 0 + assert isinstance(num_classes, int) + assert isinstance(prob, float) and 0.0 <= prob <= 1.0 + + self.alpha = alpha + self.num_classes = num_classes + self.prob = prob + self.cutmix_minmax = cutmix_minmax + self.correct_lam = correct_lam + + def rand_bbox_minmax(self, img_shape, count=None): + """Min-Max CutMix bounding-box Inspired by Darknet cutmix + implementation. It generates a random rectangular bbox based on min/max + percent values applied to each dimension of the input image. + + Typical defaults for minmax are usually in the .2-.3 for min and + .8-.9 range for max. + + Args: + img_shape (tuple): Image shape as tuple + count (int, optional): Number of bbox to generate. Default to None + """ + assert len(self.cutmix_minmax) == 2 + img_h, img_w = img_shape[-2:] + cut_h = np.random.randint( + int(img_h * self.cutmix_minmax[0]), + int(img_h * self.cutmix_minmax[1]), + size=count) + cut_w = np.random.randint( + int(img_w * self.cutmix_minmax[0]), + int(img_w * self.cutmix_minmax[1]), + size=count) + yl = np.random.randint(0, img_h - cut_h, size=count) + xl = np.random.randint(0, img_w - cut_w, size=count) + yu = yl + cut_h + xu = xl + cut_w + return yl, yu, xl, xu + + def rand_bbox(self, img_shape, lam, margin=0., count=None): + """Standard CutMix bounding-box that generates a random square bbox + based on lambda value. This implementation includes support for + enforcing a border margin as percent of bbox dimensions. + + Args: + img_shape (tuple): Image shape as tuple + lam (float): Cutmix lambda value + margin (float): Percentage of bbox dimension to enforce as margin + (reduce amount of box outside image). Default to 0. + count (int, optional): Number of bbox to generate. Default to None + """ + ratio = np.sqrt(1 - lam) + img_h, img_w = img_shape[-2:] + cut_h, cut_w = int(img_h * ratio), int(img_w * ratio) + margin_y, margin_x = int(margin * cut_h), int(margin * cut_w) + cy = np.random.randint(0 + margin_y, img_h - margin_y, size=count) + cx = np.random.randint(0 + margin_x, img_w - margin_x, size=count) + yl = np.clip(cy - cut_h // 2, 0, img_h) + yh = np.clip(cy + cut_h // 2, 0, img_h) + xl = np.clip(cx - cut_w // 2, 0, img_w) + xh = np.clip(cx + cut_w // 2, 0, img_w) + return yl, yh, xl, xh + + def cutmix_bbox_and_lam(self, img_shape, lam, count=None): + """Generate bbox and apply lambda correction. + + Args: + img_shape (tuple): Image shape as tuple + lam (float): Cutmix lambda value + count (int, optional): Number of bbox to generate. Default to None + """ + if self.cutmix_minmax is not None: + yl, yu, xl, xu = self.rand_bbox_minmax(img_shape, count=count) + else: + yl, yu, xl, xu = self.rand_bbox(img_shape, lam, count=count) + if self.correct_lam or self.cutmix_minmax is not None: + bbox_area = (yu - yl) * (xu - xl) + lam = 1. - bbox_area / float(img_shape[-2] * img_shape[-1]) + return (yl, yu, xl, xu), lam + + @abstractmethod + def cutmix(self, imgs, gt_label): + pass + + +@AUGMENT.register_module(name='BatchCutMix') +class BatchCutMixLayer(BaseCutMixLayer): + """CutMix layer for batch CutMix.""" + + def __init__(self, *args, **kwargs): + super(BatchCutMixLayer, self).__init__(*args, **kwargs) + + def cutmix(self, img, gt_label): + one_hot_gt_label = F.one_hot(gt_label, num_classes=self.num_classes) + lam = np.random.beta(self.alpha, self.alpha) + batch_size = img.size(0) + index = torch.randperm(batch_size) + + (bby1, bby2, bbx1, + bbx2), lam = self.cutmix_bbox_and_lam(img.shape, lam) + img[:, :, bby1:bby2, bbx1:bbx2] = \ + img[index, :, bby1:bby2, bbx1:bbx2] + mixed_gt_label = lam * one_hot_gt_label + ( + 1 - lam) * one_hot_gt_label[index, :] + return img, mixed_gt_label + + def __call__(self, img, gt_label): + return self.cutmix(img, gt_label) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/utils/augment/identity.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/utils/augment/identity.py new file mode 100644 index 0000000000..e676fc4223 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/utils/augment/identity.py @@ -0,0 +1,30 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn.functional as F + +from .builder import AUGMENT + + +@AUGMENT.register_module(name='Identity') +class Identity(object): + """Change gt_label to one_hot encoding and keep img as the same. + + Args: + num_classes (int): The number of classes. + prob (float): MixUp probability. It should be in range [0, 1]. + Default to 1.0 + """ + + def __init__(self, num_classes, prob=1.0): + super(Identity, self).__init__() + + assert isinstance(num_classes, int) + assert isinstance(prob, float) and 0.0 <= prob <= 1.0 + + self.num_classes = num_classes + self.prob = prob + + def one_hot(self, gt_label): + return F.one_hot(gt_label, num_classes=self.num_classes) + + def __call__(self, img, gt_label): + return img, self.one_hot(gt_label) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/utils/augment/mixup.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/utils/augment/mixup.py new file mode 100644 index 0000000000..2d6cd2b534 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/utils/augment/mixup.py @@ -0,0 +1,57 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from abc import ABCMeta, abstractmethod + +import numpy as np +import torch +import torch.nn.functional as F + +from .builder import AUGMENT + + +class BaseMixupLayer(object, metaclass=ABCMeta): + """Base class for MixupLayer. + + Args: + alpha (float): Parameters for Beta distribution. + num_classes (int): The number of classes. + prob (float): MixUp probability. It should be in range [0, 1]. + Default to 1.0 + """ + + def __init__(self, alpha, num_classes, prob=1.0): + super(BaseMixupLayer, self).__init__() + + assert isinstance(alpha, float) and alpha > 0 + assert isinstance(num_classes, int) + assert isinstance(prob, float) and 0.0 <= prob <= 1.0 + + self.alpha = alpha + self.num_classes = num_classes + self.prob = prob + + @abstractmethod + def mixup(self, imgs, gt_label): + pass + + +@AUGMENT.register_module(name='BatchMixup') +class BatchMixupLayer(BaseMixupLayer): + """Mixup layer for batch mixup.""" + + def __init__(self, *args, **kwargs): + super(BatchMixupLayer, self).__init__(*args, **kwargs) + + def mixup(self, img, gt_label): + one_hot_gt_label = F.one_hot(gt_label, num_classes=self.num_classes) + lam = np.random.beta(self.alpha, self.alpha) + batch_size = img.size(0) + index = torch.randperm(batch_size) + + mixed_img = lam * img + (1 - lam) * img[index, :] + mixed_gt_label = lam * one_hot_gt_label + ( + 1 - lam) * one_hot_gt_label[index, :] + + return mixed_img, mixed_gt_label + + def __call__(self, img, gt_label): + return self.mixup(img, gt_label) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/utils/channel_shuffle.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/utils/channel_shuffle.py new file mode 100644 index 0000000000..27006a8065 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/utils/channel_shuffle.py @@ -0,0 +1,29 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + + +def channel_shuffle(x, groups): + """Channel Shuffle operation. + + This function enables cross-group information flow for multiple groups + convolution layers. + + Args: + x (Tensor): The input tensor. + groups (int): The number of groups to divide the input tensor + in the channel dimension. + + Returns: + Tensor: The output tensor after channel shuffle operation. + """ + + batch_size, num_channels, height, width = x.size() + assert (num_channels % groups == 0), ('num_channels should be ' + 'divisible by groups') + channels_per_group = num_channels // groups + + x = x.view(batch_size, groups, channels_per_group, height, width) + x = torch.transpose(x, 1, 2).contiguous() + x = x.view(batch_size, -1, height, width) + + return x diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/utils/embed.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/utils/embed.py new file mode 100644 index 0000000000..b5f7be2763 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/utils/embed.py @@ -0,0 +1,253 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +from mmcv.cnn import build_conv_layer, build_norm_layer +from mmcv.runner.base_module import BaseModule + +from .helpers import to_2tuple + + +class PatchEmbed(BaseModule): + """Image to Patch Embedding. + + We use a conv layer to implement PatchEmbed. + + Args: + img_size (int | tuple): The size of input image. Default: 224 + in_channels (int): The num of input channels. Default: 3 + embed_dims (int): The dimensions of embedding. Default: 768 + norm_cfg (dict, optional): Config dict for normalization layer. + Default: None + conv_cfg (dict, optional): The config dict for conv layers. + Default: None + init_cfg (`mmcv.ConfigDict`, optional): The Config for initialization. + Default: None + """ + + def __init__(self, + img_size=224, + in_channels=3, + embed_dims=768, + norm_cfg=None, + conv_cfg=None, + init_cfg=None): + super(PatchEmbed, self).__init__(init_cfg) + + if isinstance(img_size, int): + img_size = to_2tuple(img_size) + elif isinstance(img_size, tuple): + if len(img_size) == 1: + img_size = to_2tuple(img_size[0]) + assert len(img_size) == 2, \ + f'The size of image should have length 1 or 2, ' \ + f'but got {len(img_size)}' + + self.img_size = img_size + self.embed_dims = embed_dims + + # Use conv layer to embed + conv_cfg = conv_cfg or dict() + _conv_cfg = dict( + type='Conv2d', kernel_size=16, stride=16, padding=0, dilation=1) + _conv_cfg.update(conv_cfg) + self.projection = build_conv_layer(_conv_cfg, in_channels, embed_dims) + + # Calculate how many patches a input image is splited to. + h_out, w_out = [(self.img_size[i] + 2 * self.projection.padding[i] - + self.projection.dilation[i] * + (self.projection.kernel_size[i] - 1) - 1) // + self.projection.stride[i] + 1 for i in range(2)] + + self.patches_resolution = (h_out, w_out) + self.num_patches = h_out * w_out + + if norm_cfg is not None: + self.norm = build_norm_layer(norm_cfg, embed_dims)[1] + else: + self.norm = None + + def forward(self, x): + B, C, H, W = x.shape + assert H == self.img_size[0] and W == self.img_size[1], \ + f"Input image size ({H}*{W}) doesn't " \ + f'match model ({self.img_size[0]}*{self.img_size[1]}).' + # The output size is (B, N, D), where N=H*W/P/P, D is embid_dim + x = self.projection(x).flatten(2).transpose(1, 2) + + if self.norm is not None: + x = self.norm(x) + + return x + + +# Modified from pytorch-image-models +class HybridEmbed(BaseModule): + """CNN Feature Map Embedding. + + Extract feature map from CNN, flatten, + project to embedding dim. + + Args: + backbone (nn.Module): CNN backbone + img_size (int | tuple): The size of input image. Default: 224 + feature_size (int | tuple, optional): Size of feature map extracted by + CNN backbone. Default: None + in_channels (int): The num of input channels. Default: 3 + embed_dims (int): The dimensions of embedding. Default: 768 + conv_cfg (dict, optional): The config dict for conv layers. + Default: None. + init_cfg (`mmcv.ConfigDict`, optional): The Config for initialization. + Default: None. + """ + + def __init__(self, + backbone, + img_size=224, + feature_size=None, + in_channels=3, + embed_dims=768, + conv_cfg=None, + init_cfg=None): + super(HybridEmbed, self).__init__(init_cfg) + assert isinstance(backbone, nn.Module) + if isinstance(img_size, int): + img_size = to_2tuple(img_size) + elif isinstance(img_size, tuple): + if len(img_size) == 1: + img_size = to_2tuple(img_size[0]) + assert len(img_size) == 2, \ + f'The size of image should have length 1 or 2, ' \ + f'but got {len(img_size)}' + + self.img_size = img_size + self.backbone = backbone + if feature_size is None: + with torch.no_grad(): + # FIXME this is hacky, but most reliable way of + # determining the exact dim of the output feature + # map for all networks, the feature metadata has + # reliable channel and stride info, but using + # stride to calc feature dim requires info about padding of + # each stage that isn't captured. + training = backbone.training + if training: + backbone.eval() + o = self.backbone( + torch.zeros(1, in_channels, img_size[0], img_size[1])) + if isinstance(o, (list, tuple)): + # last feature if backbone outputs list/tuple of features + o = o[-1] + feature_size = o.shape[-2:] + feature_dim = o.shape[1] + backbone.train(training) + else: + feature_size = to_2tuple(feature_size) + if hasattr(self.backbone, 'feature_info'): + feature_dim = self.backbone.feature_info.channels()[-1] + else: + feature_dim = self.backbone.num_features + self.num_patches = feature_size[0] * feature_size[1] + + # Use conv layer to embed + conv_cfg = conv_cfg or dict() + _conv_cfg = dict( + type='Conv2d', kernel_size=1, stride=1, padding=0, dilation=1) + _conv_cfg.update(conv_cfg) + self.projection = build_conv_layer(_conv_cfg, feature_dim, embed_dims) + + def forward(self, x): + x = self.backbone(x) + if isinstance(x, (list, tuple)): + # last feature if backbone outputs list/tuple of features + x = x[-1] + x = self.projection(x).flatten(2).transpose(1, 2) + return x + + +class PatchMerging(BaseModule): + """Merge patch feature map. + + This layer use nn.Unfold to group feature map by kernel_size, and use norm + and linear layer to embed grouped feature map. + + Args: + input_resolution (tuple): The size of input patch resolution. + in_channels (int): The num of input channels. + expansion_ratio (Number): Expansion ratio of output channels. The num + of output channels is equal to int(expansion_ratio * in_channels). + kernel_size (int | tuple, optional): the kernel size in the unfold + layer. Defaults to 2. + stride (int | tuple, optional): the stride of the sliding blocks in the + unfold layer. Defaults to be equal with kernel_size. + padding (int | tuple, optional): zero padding width in the unfold + layer. Defaults to 0. + dilation (int | tuple, optional): dilation parameter in the unfold + layer. Defaults to 1. + bias (bool, optional): Whether to add bias in linear layer or not. + Defaults to False. + norm_cfg (dict, optional): Config dict for normalization layer. + Defaults to dict(type='LN'). + init_cfg (dict, optional): The extra config for initialization. + Defaults to None. + """ + + def __init__(self, + input_resolution, + in_channels, + expansion_ratio, + kernel_size=2, + stride=None, + padding=0, + dilation=1, + bias=False, + norm_cfg=dict(type='LN'), + init_cfg=None): + super().__init__(init_cfg) + H, W = input_resolution + self.input_resolution = input_resolution + self.in_channels = in_channels + self.out_channels = int(expansion_ratio * in_channels) + + if stride is None: + stride = kernel_size + kernel_size = to_2tuple(kernel_size) + stride = to_2tuple(stride) + padding = to_2tuple(padding) + dilation = to_2tuple(dilation) + self.sampler = nn.Unfold(kernel_size, dilation, padding, stride) + + sample_dim = kernel_size[0] * kernel_size[1] * in_channels + + if norm_cfg is not None: + self.norm = build_norm_layer(norm_cfg, sample_dim)[1] + else: + self.norm = None + + self.reduction = nn.Linear(sample_dim, self.out_channels, bias=bias) + + # See https://pytorch.org/docs/stable/generated/torch.nn.Conv2d.html + H_out = (H + 2 * padding[0] - dilation[0] * + (kernel_size[0] - 1) - 1) // stride[0] + 1 + W_out = (W + 2 * padding[1] - dilation[1] * + (kernel_size[1] - 1) - 1) // stride[1] + 1 + self.output_resolution = (H_out, W_out) + + def forward(self, x): + """ + x: B, H*W, C + """ + H, W = self.input_resolution + B, L, C = x.shape + assert L == H * W, 'input feature has wrong size' + + x = x.view(B, H, W, C).permute([0, 3, 1, 2]) # B, C, H, W + + # Use nn.Unfold to merge patch. About 25% faster than original method, + # but need to modify pretrained model for compatibility + x = self.sampler(x) # B, 4*C, H/2*W/2 + x = x.transpose(1, 2) # B, H/2*W/2, 4*C + + x = self.norm(x) if self.norm else x + x = self.reduction(x) + + return x diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/utils/helpers.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/utils/helpers.py new file mode 100644 index 0000000000..d1c79c404f --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/utils/helpers.py @@ -0,0 +1,42 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import collections.abc +import warnings +from distutils.version import LooseVersion +from itertools import repeat + +import torch + + +def is_tracing() -> bool: + if LooseVersion(torch.__version__) >= LooseVersion('1.6.0'): + on_trace = torch.jit.is_tracing() + # In PyTorch 1.6, torch.jit.is_tracing has a bug. + # Refers to https://github.com/pytorch/pytorch/issues/42448 + if isinstance(on_trace, bool): + return on_trace + else: + return torch._C._is_tracing() + else: + warnings.warn( + 'torch.jit.is_tracing is only supported after v1.6.0. ' + 'Therefore is_tracing returns False automatically. Please ' + 'set on_trace manually if you are using trace.', UserWarning) + return False + + +# From PyTorch internals +def _ntuple(n): + + def parse(x): + if isinstance(x, collections.abc.Iterable): + return x + return tuple(repeat(x, n)) + + return parse + + +to_1tuple = _ntuple(1) +to_2tuple = _ntuple(2) +to_3tuple = _ntuple(3) +to_4tuple = _ntuple(4) +to_ntuple = _ntuple diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/utils/inverted_residual.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/utils/inverted_residual.py new file mode 100644 index 0000000000..d2e9fba6e3 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/utils/inverted_residual.py @@ -0,0 +1,114 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.utils.checkpoint as cp +from mmcv.cnn import ConvModule +from mmcv.runner import BaseModule + +from .se_layer import SELayer + + +# class InvertedResidual(nn.Module): +class InvertedResidual(BaseModule): + """Inverted Residual Block. + + Args: + in_channels (int): The input channels of this Module. + out_channels (int): The output channels of this Module. + mid_channels (int): The input channels of the depthwise convolution. + kernel_size (int): The kernel size of the depthwise convolution. + Default: 3. + stride (int): The stride of the depthwise convolution. Default: 1. + se_cfg (dict): Config dict for se layer. Default: None, which means no + se layer. + conv_cfg (dict): Config dict for convolution layer. Default: None, + which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN'). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='ReLU'). + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + + Returns: + Tensor: The output tensor. + """ + + def __init__(self, + in_channels, + out_channels, + mid_channels, + kernel_size=3, + stride=1, + se_cfg=None, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + with_cp=False, + init_cfg=None): + super(InvertedResidual, self).__init__(init_cfg) + self.with_res_shortcut = (stride == 1 and in_channels == out_channels) + assert stride in [1, 2] + self.with_cp = with_cp + self.with_se = se_cfg is not None + self.with_expand_conv = (mid_channels != in_channels) + + if self.with_se: + assert isinstance(se_cfg, dict) + + if self.with_expand_conv: + self.expand_conv = ConvModule( + in_channels=in_channels, + out_channels=mid_channels, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + self.depthwise_conv = ConvModule( + in_channels=mid_channels, + out_channels=mid_channels, + kernel_size=kernel_size, + stride=stride, + padding=kernel_size // 2, + groups=mid_channels, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + if self.with_se: + self.se = SELayer(**se_cfg) + self.linear_conv = ConvModule( + in_channels=mid_channels, + out_channels=out_channels, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None) + + def forward(self, x): + + def _inner_forward(x): + out = x + + if self.with_expand_conv: + out = self.expand_conv(out) + + out = self.depthwise_conv(out) + + if self.with_se: + out = self.se(out) + + out = self.linear_conv(out) + + if self.with_res_shortcut: + return x + out + else: + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + return out diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/utils/make_divisible.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/utils/make_divisible.py new file mode 100644 index 0000000000..1ec74689e3 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/utils/make_divisible.py @@ -0,0 +1,25 @@ +# Copyright (c) OpenMMLab. All rights reserved. +def make_divisible(value, divisor, min_value=None, min_ratio=0.9): + """Make divisible function. + + This function rounds the channel number down to the nearest value that can + be divisible by the divisor. + + Args: + value (int): The original channel number. + divisor (int): The divisor to fully divide the channel number. + min_value (int, optional): The minimum value of the output channel. + Default: None, means that the minimum value equal to the divisor. + min_ratio (float): The minimum ratio of the rounded channel + number to the original channel number. Default: 0.9. + Returns: + int: The modified output channel number + """ + + if min_value is None: + min_value = divisor + new_value = max(min_value, int(value + divisor / 2) // divisor * divisor) + # Make sure that round down does not go down by more than (1-min_ratio). + if new_value < min_ratio * value: + new_value += divisor + return new_value diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/utils/se_layer.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/utils/se_layer.py new file mode 100644 index 0000000000..bcef12fc47 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/models/utils/se_layer.py @@ -0,0 +1,74 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import mmcv +import torch.nn as nn +from mmcv.cnn import ConvModule +from mmcv.runner import BaseModule + +from .make_divisible import make_divisible + + +class SELayer(BaseModule): + """Squeeze-and-Excitation Module. + + Args: + channels (int): The input (and output) channels of the SE layer. + squeeze_channels (None or int): The intermediate channel number of + SElayer. Default: None, means the value of ``squeeze_channels`` + is ``make_divisible(channels // ratio, divisor)``. + ratio (int): Squeeze ratio in SELayer, the intermediate channel will + be ``make_divisible(channels // ratio, divisor)``. Only used when + ``squeeze_channels`` is None. Default: 16. + divisor(int): The divisor to true divide the channel number. Only + used when ``squeeze_channels`` is None. Default: 8. + conv_cfg (None or dict): Config dict for convolution layer. Default: + None, which means using conv2d. + act_cfg (dict or Sequence[dict]): Config dict for activation layer. + If act_cfg is a dict, two activation layers will be configurated + by this dict. If act_cfg is a sequence of dicts, the first + activation layer will be configurated by the first dict and the + second activation layer will be configurated by the second dict. + Default: (dict(type='ReLU'), dict(type='Sigmoid')) + """ + + def __init__(self, + channels, + squeeze_channels=None, + ratio=16, + divisor=8, + bias='auto', + conv_cfg=None, + act_cfg=(dict(type='ReLU'), dict(type='Sigmoid')), + init_cfg=None): + super(SELayer, self).__init__(init_cfg) + if isinstance(act_cfg, dict): + act_cfg = (act_cfg, act_cfg) + assert len(act_cfg) == 2 + assert mmcv.is_tuple_of(act_cfg, dict) + self.global_avgpool = nn.AdaptiveAvgPool2d(1) + if squeeze_channels is None: + squeeze_channels = make_divisible(channels // ratio, divisor) + assert isinstance(squeeze_channels, int) and squeeze_channels > 0, \ + '"squeeze_channels" should be a positive integer, but get ' + \ + f'{squeeze_channels} instead.' + self.conv1 = ConvModule( + in_channels=channels, + out_channels=squeeze_channels, + kernel_size=1, + stride=1, + bias=bias, + conv_cfg=conv_cfg, + act_cfg=act_cfg[0]) + self.conv2 = ConvModule( + in_channels=squeeze_channels, + out_channels=channels, + kernel_size=1, + stride=1, + bias=bias, + conv_cfg=conv_cfg, + act_cfg=act_cfg[1]) + + def forward(self, x): + out = self.global_avgpool(x) + out = self.conv1(out) + out = self.conv2(out) + return x * out diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/utils/__init__.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/utils/__init__.py new file mode 100644 index 0000000000..0e80ec1355 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/utils/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .collect_env import collect_env +from .logger import get_root_logger + +__all__ = ['collect_env', 'get_root_logger'] diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/utils/collect_env.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/utils/collect_env.py new file mode 100644 index 0000000000..adb5030f27 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/utils/collect_env.py @@ -0,0 +1,17 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmcv.utils import collect_env as collect_base_env +from mmcv.utils import get_git_hash + +import mmcls + + +def collect_env(): + """Collect the information of the running environments.""" + env_info = collect_base_env() + env_info['MMClassification'] = mmcls.__version__ + '+' + get_git_hash()[:7] + return env_info + + +if __name__ == '__main__': + for name, val in collect_env().items(): + print(f'{name}: {val}') diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/utils/logger.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/utils/logger.py new file mode 100644 index 0000000000..478bdc171f --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/utils/logger.py @@ -0,0 +1,8 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import logging + +from mmcv.utils import get_logger + + +def get_root_logger(log_file=None, log_level=logging.INFO): + return get_logger('mmcls', log_file, log_level) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/version.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/version.py new file mode 100644 index 0000000000..32ac224fd9 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/mmcls/version.py @@ -0,0 +1,28 @@ +# Copyright (c) OpenMMLab. All rights reserved + +__version__ = '0.17.0' + + +def parse_version_info(version_str): + """Parse a version string into a tuple. + + Args: + version_str (str): The version string. + Returns: + tuple[int | str]: The version info, e.g., "1.3.0" is parsed into + (1, 3, 0), and "2.0.0rc1" is parsed into (2, 0, 0, 'rc1'). + """ + version_info = [] + for x in version_str.split('.'): + if x.isdigit(): + version_info.append(int(x)) + elif x.find('rc') != -1: + patch_version = x.split('rc') + version_info.append(int(patch_version[0])) + version_info.append(f'rc{patch_version[1]}') + return tuple(version_info) + + +version_info = parse_version_info(__version__) + +__all__ = ['__version__', 'version_info', 'parse_version_info'] diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/model-index.yml b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/model-index.yml new file mode 100644 index 0000000000..f48e0937a4 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/model-index.yml @@ -0,0 +1,15 @@ +Import: + - configs/fp16/metafile.yml + - configs/mobilenet_v2/metafile.yml + - configs/resnet/metafile.yml + - configs/res2net/metafile.yml + - configs/resnext/metafile.yml + - configs/seresnet/metafile.yml + - configs/shufflenet_v1/metafile.yml + - configs/shufflenet_v2/metafile.yml + - configs/swin_transformer/metafile.yml + - configs/vgg/metafile.yml + - configs/repvgg/metafile.yml + - configs/tnt/metafile.yml + - configs/vision_transformer/metafile.yml + - configs/t2t_vit/metafile.yml diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/requirements.txt b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/requirements.txt new file mode 100644 index 0000000000..6da5adea75 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/requirements.txt @@ -0,0 +1,3 @@ +-r requirements/optional.txt +-r requirements/runtime.txt +-r requirements/tests.txt diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/requirements/docs.txt b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/requirements/docs.txt new file mode 100644 index 0000000000..e009fcdaa6 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/requirements/docs.txt @@ -0,0 +1,7 @@ +docutils==0.16.0 +m2r +myst-parser +-e git+https://github.com/open-mmlab/pytorch_sphinx_theme.git#egg=pytorch_sphinx_theme +sphinx==4.0.2 +sphinx-copybutton +sphinx_markdown_tables diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/requirements/mminstall.txt b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/requirements/mminstall.txt new file mode 100644 index 0000000000..aceefe4d1a --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/requirements/mminstall.txt @@ -0,0 +1 @@ +mmcv-full>=1.3.8,<=1.7.2 diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/requirements/optional.txt b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/requirements/optional.txt new file mode 100644 index 0000000000..ca3c4e779e --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/requirements/optional.txt @@ -0,0 +1,2 @@ +albumentations>=0.3.2 --no-binary imgaug,albumentations +requests diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/requirements/readthedocs.txt b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/requirements/readthedocs.txt new file mode 100644 index 0000000000..cd16364f48 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/requirements/readthedocs.txt @@ -0,0 +1,3 @@ +mmcv>=1.3.8 +torch +torchvision diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/requirements/runtime.txt b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/requirements/runtime.txt new file mode 100644 index 0000000000..80565dbe37 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/requirements/runtime.txt @@ -0,0 +1,3 @@ +matplotlib +numpy +packaging diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/requirements/tests.txt b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/requirements/tests.txt new file mode 100644 index 0000000000..29d351b558 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/requirements/tests.txt @@ -0,0 +1,8 @@ +codecov +flake8 +interrogate +isort==4.3.21 +mmdet +pytest +xdoctest >= 0.10.0 +yapf diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/setup.cfg b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/setup.cfg new file mode 100644 index 0000000000..44f3117acc --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/setup.cfg @@ -0,0 +1,24 @@ +[bdist_wheel] +universal=1 + +[aliases] +test=pytest + +[yapf] +based_on_style = pep8 +blank_line_before_nested_class_or_def = true +split_before_expression_after_opening_paren = true + +[isort] +line_length = 79 +multi_line_output = 0 +known_standard_library = pkg_resources,setuptools +known_first_party = mmcls +known_third_party = PIL,m2r,matplotlib,mmcv,mmdet,numpy,onnxruntime,packaging,pytest,pytorch_sphinx_theme,recommonmark,requests,rich,seaborn,sphinx,torch,torchvision,ts +no_lines_before = STDLIB,LOCALFOLDER +default_section = THIRDPARTY + +[codespell] +skip = *.ipynb +quiet-level = 3 +ignore-words-list = patten,confectionary,nd,ty,formating diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/setup.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/setup.py new file mode 100644 index 0000000000..8046f1d059 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/setup.py @@ -0,0 +1,174 @@ +import os +import os.path as osp +import shutil +import sys +import warnings +from setuptools import find_packages, setup + + +def readme(): + with open('README.md', encoding='utf-8') as f: + content = f.read() + return content + + +def get_version(): + version_file = 'mmcls/version.py' + with open(version_file, 'r', encoding='utf-8') as f: + exec(compile(f.read(), version_file, 'exec')) + return locals()['__version__'] + + +def parse_requirements(fname='requirements.txt', with_version=True): + """Parse the package dependencies listed in a requirements file but strips + specific versioning information. + + Args: + fname (str): path to requirements file + with_version (bool, default=False): if True include version specs + + Returns: + List[str]: list of requirements items + + CommandLine: + python -c "import setup; print(setup.parse_requirements())" + """ + import re + import sys + from os.path import exists + require_fpath = fname + + def parse_line(line): + """Parse information from a line in a requirements text file.""" + if line.startswith('-r '): + # Allow specifying requirements in other files + target = line.split(' ')[1] + for info in parse_require_file(target): + yield info + else: + info = {'line': line} + if line.startswith('-e '): + info['package'] = line.split('#egg=')[1] + else: + # Remove versioning from the package + pat = '(' + '|'.join(['>=', '==', '>']) + ')' + parts = re.split(pat, line, maxsplit=1) + parts = [p.strip() for p in parts] + + info['package'] = parts[0] + if len(parts) > 1: + op, rest = parts[1:] + if ';' in rest: + # Handle platform specific dependencies + # http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies + version, platform_deps = map(str.strip, + rest.split(';')) + info['platform_deps'] = platform_deps + else: + version = rest # NOQA + info['version'] = (op, version) + yield info + + def parse_require_file(fpath): + with open(fpath, 'r') as f: + for line in f.readlines(): + line = line.strip() + if line and not line.startswith('#'): + for info in parse_line(line): + yield info + + def gen_packages_items(): + if exists(require_fpath): + for info in parse_require_file(require_fpath): + parts = [info['package']] + if with_version and 'version' in info: + parts.extend(info['version']) + if not sys.version.startswith('3.4'): + # apparently package_deps are broken in 3.4 + platform_deps = info.get('platform_deps') + if platform_deps is not None: + parts.append(';' + platform_deps) + item = ''.join(parts) + yield item + + packages = list(gen_packages_items()) + return packages + + +def add_mim_extension(): + """Add extra files that are required to support MIM into the package. + + These files will be added by creating a symlink to the originals if the + package is installed in `editable` mode (e.g. pip install -e .), or by + copying from the originals otherwise. + """ + + # parse installment mode + if 'develop' in sys.argv: + # installed by `pip install -e .` + mode = 'symlink' + elif 'sdist' in sys.argv or 'bdist_wheel' in sys.argv: + # installed by `pip install .` + # or create source distribution by `python setup.py sdist` + mode = 'copy' + else: + return + + filenames = ['tools', 'configs', 'model-index.yml'] + repo_path = osp.dirname(__file__) + mim_path = osp.join(repo_path, 'mmcls', '.mim') + os.makedirs(mim_path, exist_ok=True) + + for filename in filenames: + if osp.exists(filename): + src_path = osp.join(repo_path, filename) + tar_path = osp.join(mim_path, filename) + + if osp.isfile(tar_path) or osp.islink(tar_path): + os.remove(tar_path) + elif osp.isdir(tar_path): + shutil.rmtree(tar_path) + + if mode == 'symlink': + src_relpath = osp.relpath(src_path, osp.dirname(tar_path)) + os.symlink(src_relpath, tar_path) + elif mode == 'copy': + if osp.isfile(src_path): + shutil.copyfile(src_path, tar_path) + elif osp.isdir(src_path): + shutil.copytree(src_path, tar_path) + else: + warnings.warn(f'Cannot copy file {src_path}.') + else: + raise ValueError(f'Invalid mode {mode}') + + +if __name__ == '__main__': + add_mim_extension() + setup( + name='mmcls', + version=get_version(), + description='OpenMMLab Image Classification Toolbox and Benchmark', + long_description=readme(), + long_description_content_type='text/markdown', + author='MMClassification Contributors', + author_email='openmmlab@gmail.com', + keywords='computer vision, image classification', + url='https://github.com/open-mmlab/mmclassification', + packages=find_packages(exclude=('configs', 'tools', 'demo')), + include_package_data=True, + classifiers=[ + 'Development Status :: 4 - Beta', + 'License :: OSI Approved :: Apache Software License', + 'Operating System :: OS Independent', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3.5', + 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', + 'Programming Language :: Python :: 3.9', + ], + license='Apache License 2.0', + tests_require=parse_requirements('requirements/tests.txt'), + install_requires=parse_requirements('requirements/runtime.txt'), + zip_safe=False) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/test/env_npu.sh b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/test/env_npu.sh new file mode 100644 index 0000000000..aab79fff2e --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/test/env_npu.sh @@ -0,0 +1,55 @@ +#!/bin/bash +CANN_INSTALL_PATH_CONF='/etc/Ascend/ascend_cann_install.info' + +if [ -f $CANN_INSTALL_PATH_CONF ]; then + CANN_INSTALL_PATH=$(cat $CANN_INSTALL_PATH_CONF | grep Install_Path | cut -d "=" -f 2) +else + CANN_INSTALL_PATH="/usr/local/Ascend" +fi + +if [ -d ${CANN_INSTALL_PATH}/ascend-toolkit/latest ]; then + source ${CANN_INSTALL_PATH}/ascend-toolkit/set_env.sh +else + source ${CANN_INSTALL_PATH}/nnae/set_env.sh +fi +msnpureport -g error -d 0 +msnpureport -g error -d 1 +msnpureport -g error -d 2 +msnpureport -g error -d 3 +msnpureport -g error -d 4 +msnpureport -g error -d 5 +msnpureport -g error -d 6 +msnpureport -g error -d 7 + +#将Host日志输出到串口,0-关闭/1-开启 +export ASCEND_SLOG_PRINT_TO_STDOUT=0 +#设置默认日志级别,0-debug/1-info/2-warning/3-error +export ASCEND_GLOBAL_LOG_LEVEL=3 +#设置Event日志开启标志,0-关闭/1-开启 +export ASCEND_GLOBAL_EVENT_ENABLE=0 +#设置是否开启taskque,0-关闭/1-开启 +export TASK_QUEUE_ENABLE=1 +#HCCL白名单开关,1-关闭/0-开启 +export HCCL_WHITELIST_DISABLE=1 + + +path_lib=$(python3.7 -c """ +import sys +import re +result='' +for index in range(len(sys.path)): + match_sit = re.search('-packages', sys.path[index]) + if match_sit is not None: + match_lib = re.search('lib', sys.path[index]) + + if match_lib is not None: + end=match_lib.span()[1] + result += sys.path[index][0:end] + ':' + + result+=sys.path[index] + '/torch/lib:' +print(result)""" +) + +echo ${path_lib} + +export LD_LIBRARY_PATH=/usr/local/python3.7.5/lib/:${path_lib}:$LD_LIBRARY_PATH diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/test/set_conda.sh b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/test/set_conda.sh new file mode 100644 index 0000000000..febb0fa349 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/test/set_conda.sh @@ -0,0 +1,2 @@ +export PATH=/home/anaconda3/bin:$PATH +export LD_LIBRARY_PATH=/home/anaconda3/lib:$LD_LIBRARY_PATH \ No newline at end of file diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/test/train_ID3915_performance_8p.sh b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/test/train_ID3915_performance_8p.sh new file mode 100644 index 0000000000..ae2cf96d3a --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/test/train_ID3915_performance_8p.sh @@ -0,0 +1,141 @@ +#!/bin/bash + + +#集合通信参数,不需要修改 +export RANK_SIZE=8 + + +# 数据集路径,保持为空,不需要修改 +data_path="" + +#网络名称,同目录名称,需要模型审视修改 +Network="Resnet50_ID3915_for_PyTorch" + +#训练batch_size,,需要模型审视修改 +batch_size=32 +PORT=${PORT:-29500} +CONFIG=configs/resnet/resnet50_b32x8_imagenet.py + +#参数校验,不需要修改 +# 参数校验,data_path为必传参数,其他参数的增删由模型自身决定;此处新增参数需在上面有定义并赋值 +for para in $* +do + if [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --conda_name* ]];then + conda_name=`echo ${para#*=}` + source set_conda.sh + echo "conda_name: $conda_name" + source activate $conda_name + fi +done + +#校验是否传入data_path,不需要修改 +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path\" must be confing" + exit 1 +fi + +###############指定训练脚本执行路径############### +# cd到与test文件夹同层级目录下执行脚本,提高兼容性;test_path_dir为包含test文件夹的路径 +cur_path=`pwd` +cur_path_last_dirname=${cur_path##*/} +if [ x"${cur_path_last_dirname}" == x"test" ];then + test_path_dir=${cur_path} + cd .. + cur_path=`pwd` +else + test_path_dir=${cur_path}/test +fi + +#创建DeviceID输出目录,不需要修改 +ASCEND_DEVICE_ID=0 +if [ -d ${test_path_dir}/output/${ASCEND_DEVICE_ID} ];then + rm -rf ${test_path_dir}/output/$ASCEND_DEVICE_ID + mkdir -p ${test_path_dir}/output/$ASCEND_DEVICE_ID +else + mkdir -p ${test_path_dir}/output/$ASCEND_DEVICE_ID +fi + +#################启动训练脚本################# +#训练开始时间,不需要修改 +start_time=$(date +%s) +# 非平台场景时source 环境变量 +check_etp_flag=`env | grep etp_running_flag` +etp_flag=`echo ${check_etp_flag#*=}` +if [ x"${etp_flag}" != x"true" ];then + source ${test_path_dir}/env_npu.sh +fi +#数据集处理 +ln -nsf ${data_path} $cur_path/data/imagenet + +#性能测试脚本修改epoch为1 +path=configs/_base_/schedules/imagenet_bs256.py +key_word="max_epochs=100" +line=`grep -rn "$key_word" $path | tail -l | awk -F ':' '{print $1}'` +if [ $line ]; then + sed -i "$[line]s/100/1/g" $path +fi + +#执行训练脚本,以下传参不需要修改,其他需要模型审视修改 +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ +python3 -m torch.distributed.launch \ + --nproc_per_node=8 \ + --master_port=$PORT \ + tools/train.py $CONFIG \ + --device npu \ + --launcher pytorch > ${test_path_dir}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log 2>&1 & + +wait + +#性能测试脚本跑完还原epoch为100 +path=configs/_base_/schedules/imagenet_bs256.py +key_word="max_epochs=1" +line=`grep -rn "$key_word" $path | tail -l | awk -F ':' '{print $1}'` +if [ $line ]; then + sed -i "$[line]s/1/100/g" $path +fi + +##################获取训练数据################ +#训练结束时间,不需要修改 +end_time=$(date +%s) +e2e_time=$(( $end_time - $start_time )) + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +FPS=`grep -a 'time' ${test_path_dir}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log|awk -F "," '{print$4}' | awk '{print$NF}' | awk 'BEGIN{count=0}{if(NR>2){sum+=$NF;count+=1}}END{printf "%.4f\n", sum/count}'` +FPS=`awk 'BEGIN{printf "%.4f", '${batch_size}'/'${FPS}'}'` +#打印,不需要修改 +echo "Final Performance images/sec : $FPS" + +#打印,不需要修改 +echo "E2E Training Duration sec : $e2e_time" + +#性能看护结果汇总 +#训练用例信息,不需要修改 +BatchSize=${batch_size} +DeviceType=`uname -m` +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'perf' + +#获取性能数据,不需要修改 +#吞吐量 +# ActualFPS=`awk -v x="$FPS" -v y="$RANK_SIZE" 'BEGIN{printf "%.3f\n", x*y}'` +#单迭代训练时长 +TrainingTime=`awk 'BEGIN{printf "%.2f\n", '${batch_size}'*1000/'${FPS}'}'` + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要模型审视修改 +grep Epoch ${test_path_dir}/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | grep time | awk '{print$NF}' >> ${test_path_dir}/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +#最后一个迭代loss值,不需要修改 +ActualLoss=`awk 'END {print}' ${test_path_dir}/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` + +#关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > ${test_path_dir}/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> ${test_path_dir}/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> ${test_path_dir}/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> ${test_path_dir}/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}" >> ${test_path_dir}/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${FPS}" >> ${test_path_dir}/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> ${test_path_dir}/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> ${test_path_dir}/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> ${test_path_dir}/output/$ASCEND_DEVICE_ID/${CaseName}.log \ No newline at end of file diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/test/train_ID3918_performance_8p.sh b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/test/train_ID3918_performance_8p.sh new file mode 100644 index 0000000000..be9716979d --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/test/train_ID3918_performance_8p.sh @@ -0,0 +1,142 @@ +#!/bin/bash + + +#集合通信参数,不需要修改 +export RANK_SIZE=8 + + +# 数据集路径,保持为空,不需要修改 +data_path="" + +#网络名称,同目录名称,需要模型审视修改 +Network="resnext32x4d-50_ID3915_for_PyTorch" + +#训练batch_size,,需要模型审视修改 +batch_size=32 +PORT=${PORT:-29500} +CONFIG=configs/resnext/resnext50_32x4d_b32x8_imagenet.py + + +#参数校验,不需要修改 +# 参数校验,data_path为必传参数,其他参数的增删由模型自身决定;此处新增参数需在上面有定义并赋值 +for para in $* +do + if [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --conda_name* ]];then + conda_name=`echo ${para#*=}` + source set_conda.sh + echo "conda_name: $conda_name" + source activate $conda_name + fi +done + +#校验是否传入data_path,不需要修改 +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path\" must be confing" + exit 1 +fi + +###############指定训练脚本执行路径############### +# cd到与test文件夹同层级目录下执行脚本,提高兼容性;test_path_dir为包含test文件夹的路径 +cur_path=`pwd` +cur_path_last_dirname=${cur_path##*/} +if [ x"${cur_path_last_dirname}" == x"test" ];then + test_path_dir=${cur_path} + cd .. + cur_path=`pwd` +else + test_path_dir=${cur_path}/test +fi + +#创建DeviceID输出目录,不需要修改 +ASCEND_DEVICE_ID=0 +if [ -d ${test_path_dir}/output/${ASCEND_DEVICE_ID} ];then + rm -rf ${test_path_dir}/output/$ASCEND_DEVICE_ID + mkdir -p ${test_path_dir}/output/$ASCEND_DEVICE_ID +else + mkdir -p ${test_path_dir}/output/$ASCEND_DEVICE_ID +fi + +#################启动训练脚本################# +#训练开始时间,不需要修改 +start_time=$(date +%s) +# 非平台场景时source 环境变量 +check_etp_flag=`env | grep etp_running_flag` +etp_flag=`echo ${check_etp_flag#*=}` +if [ x"${etp_flag}" != x"true" ];then + source ${test_path_dir}/env_npu.sh +fi +#数据集处理 +ln -nsf ${data_path} $cur_path/data/imagenet + +#性能测试脚本修改epoch为1 +path=configs/_base_/schedules/imagenet_bs256.py +key_word="max_epochs=100" +line=`grep -rn "$key_word" $path | tail -l | awk -F ':' '{print $1}'` +if [ $line ]; then + sed -i "$[line]s/100/1/g" $path +fi + +#执行训练脚本,以下传参不需要修改,其他需要模型审视修改 +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ +python3 -m torch.distributed.launch \ + --nproc_per_node=8 \ + --master_port=$PORT \ + tools/train.py $CONFIG \ + --device npu \ + --launcher pytorch > ${test_path_dir}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log 2>&1 & + +wait + +#性能测试脚本跑完还原epoch为100 +path=configs/_base_/schedules/imagenet_bs256.py +key_word="max_epochs=1" +line=`grep -rn "$key_word" $path | tail -l | awk -F ':' '{print $1}'` +if [ $line ]; then + sed -i "$[line]s/1/100/g" $path +fi + +##################获取训练数据################ +#训练结束时间,不需要修改 +end_time=$(date +%s) +e2e_time=$(( $end_time - $start_time )) + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +FPS=`grep -a 'time' ${test_path_dir}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log|awk -F "," '{print$4}' | awk '{print$NF}' | awk 'BEGIN{count=0}{if(NR>2){sum+=$NF;count+=1}}END{printf "%.4f\n", sum/count}'` +FPS=`awk 'BEGIN{printf "%.4f", '${batch_size}'/'${FPS}'}'` +#打印,不需要修改 +echo "Final Performance images/sec : $FPS" + +#打印,不需要修改 +echo "E2E Training Duration sec : $e2e_time" + +#性能看护结果汇总 +#训练用例信息,不需要修改 +BatchSize=${batch_size} +DeviceType=`uname -m` +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'perf' + +#获取性能数据,不需要修改 +#吞吐量 +# ActualFPS=`awk -v x="$FPS" -v y="$RANK_SIZE" 'BEGIN{printf "%.3f\n", x*y}'` +#单迭代训练时长 +TrainingTime=`awk 'BEGIN{printf "%.2f\n", '${batch_size}'*1000/'${FPS}'}'` + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要模型审视修改 +grep Epoch ${test_path_dir}/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | grep time | awk '{print$NF}' >> ${test_path_dir}/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +#最后一个迭代loss值,不需要修改 +ActualLoss=`awk 'END {print}' ${test_path_dir}/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` + +#关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > ${test_path_dir}/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> ${test_path_dir}/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> ${test_path_dir}/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> ${test_path_dir}/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}" >> ${test_path_dir}/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${FPS}" >> ${test_path_dir}/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> ${test_path_dir}/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> ${test_path_dir}/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> ${test_path_dir}/output/$ASCEND_DEVICE_ID/${CaseName}.log \ No newline at end of file diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/data/dataset/ann.txt b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/data/dataset/ann.txt new file mode 100644 index 0000000000..a21a9c4272 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/data/dataset/ann.txt @@ -0,0 +1,3 @@ +a/1.JPG 0 +b/2.jpeg 1 +b/subb/2.jpeg 1 diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/data/dataset/b/2.jpeg b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/data/dataset/b/2.jpeg new file mode 100644 index 0000000000..e69de29bb2 diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/data/retinanet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/data/retinanet.py new file mode 100644 index 0000000000..2c38ae5279 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/data/retinanet.py @@ -0,0 +1,82 @@ +# small RetinaNet +num_classes=3 + +# model settings +model = dict( + type='RetinaNet', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=1, + add_extra_convs='on_input', + num_outs=5), + bbox_head=dict( + type='RetinaHead', + num_classes=num_classes, + in_channels=256, + stacked_convs=1, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + octave_base_scale=4, + scales_per_octave=3, + ratios=[0.5, 1.0, 2.0], + strides=[8, 16, 32, 64, 128]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + # model training and testing settings + train_cfg=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.4, + min_pos_iou=0, + ignore_iof_thr=-1), + allowed_border=-1, + pos_weight=-1, + debug=False), + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100)) + +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict(test=dict(pipeline=test_pipeline)) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_data/test_datasets/test_common.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_data/test_datasets/test_common.py new file mode 100644 index 0000000000..c86f779fc1 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_data/test_datasets/test_common.py @@ -0,0 +1,295 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import tempfile +from unittest.mock import MagicMock, patch + +import numpy as np +import pytest +import torch + +from mmcls.datasets import (DATASETS, BaseDataset, ImageNet21k, + MultiLabelDataset) + + +@pytest.mark.parametrize('dataset_name', [ + 'MNIST', 'FashionMNIST', 'CIFAR10', 'CIFAR100', 'ImageNet', 'VOC', + 'ImageNet21k' +]) +def test_datasets_override_default(dataset_name): + dataset_class = DATASETS.get(dataset_name) + load_annotations_f = dataset_class.load_annotations + dataset_class.load_annotations = MagicMock() + + original_classes = dataset_class.CLASSES + + # Test VOC year + if dataset_name == 'VOC': + dataset = dataset_class( + data_prefix='VOC2007', + pipeline=[], + classes=('bus', 'car'), + test_mode=True) + assert dataset.year == 2007 + with pytest.raises(ValueError): + dataset = dataset_class( + data_prefix='VOC', + pipeline=[], + classes=('bus', 'car'), + test_mode=True) + + # Test setting classes as a tuple + dataset = dataset_class( + data_prefix='VOC2007' if dataset_name == 'VOC' else '', + pipeline=[], + classes=('bus', 'car'), + test_mode=True) + assert dataset.CLASSES == ('bus', 'car') + + # Test setting classes as a list + dataset = dataset_class( + data_prefix='VOC2007' if dataset_name == 'VOC' else '', + pipeline=[], + classes=['bus', 'car'], + test_mode=True) + assert dataset.CLASSES == ['bus', 'car'] + + # Test setting classes through a file + tmp_file = tempfile.NamedTemporaryFile() + with open(tmp_file.name, 'w') as f: + f.write('bus\ncar\n') + dataset = dataset_class( + data_prefix='VOC2007' if dataset_name == 'VOC' else '', + pipeline=[], + classes=tmp_file.name, + test_mode=True) + tmp_file.close() + + assert dataset.CLASSES == ['bus', 'car'] + + # Test overriding not a subset + dataset = dataset_class( + data_prefix='VOC2007' if dataset_name == 'VOC' else '', + pipeline=[], + classes=['foo'], + test_mode=True) + assert dataset.CLASSES == ['foo'] + + # Test default behavior + dataset = dataset_class( + data_prefix='VOC2007' if dataset_name == 'VOC' else '', pipeline=[]) + + if dataset_name == 'VOC': + assert dataset.data_prefix == 'VOC2007' + else: + assert dataset.data_prefix == '' + assert not dataset.test_mode + assert dataset.ann_file is None + assert dataset.CLASSES == original_classes + + dataset_class.load_annotations = load_annotations_f + + +@patch.multiple(MultiLabelDataset, __abstractmethods__=set()) +@patch.multiple(BaseDataset, __abstractmethods__=set()) +def test_dataset_evaluation(): + # test multi-class single-label evaluation + dataset = BaseDataset(data_prefix='', pipeline=[], test_mode=True) + dataset.data_infos = [ + dict(gt_label=0), + dict(gt_label=0), + dict(gt_label=1), + dict(gt_label=2), + dict(gt_label=1), + dict(gt_label=0) + ] + fake_results = np.array([[0.7, 0, 0.3], [0.5, 0.2, 0.3], [0.4, 0.5, 0.1], + [0, 0, 1], [0, 0, 1], [0, 0, 1]]) + eval_results = dataset.evaluate( + fake_results, + metric=['precision', 'recall', 'f1_score', 'support', 'accuracy'], + metric_options={'topk': 1}) + assert eval_results['precision'] == pytest.approx( + (1 + 1 + 1 / 3) / 3 * 100.0) + assert eval_results['recall'] == pytest.approx( + (2 / 3 + 1 / 2 + 1) / 3 * 100.0) + assert eval_results['f1_score'] == pytest.approx( + (4 / 5 + 2 / 3 + 1 / 2) / 3 * 100.0) + assert eval_results['support'] == 6 + assert eval_results['accuracy'] == pytest.approx(4 / 6 * 100) + + # test input as tensor + fake_results_tensor = torch.from_numpy(fake_results) + eval_results_ = dataset.evaluate( + fake_results_tensor, + metric=['precision', 'recall', 'f1_score', 'support', 'accuracy'], + metric_options={'topk': 1}) + assert eval_results_ == eval_results + + # test thr + eval_results = dataset.evaluate( + fake_results, + metric=['precision', 'recall', 'f1_score', 'accuracy'], + metric_options={ + 'thrs': 0.6, + 'topk': 1 + }) + assert eval_results['precision'] == pytest.approx( + (1 + 0 + 1 / 3) / 3 * 100.0) + assert eval_results['recall'] == pytest.approx((1 / 3 + 0 + 1) / 3 * 100.0) + assert eval_results['f1_score'] == pytest.approx( + (1 / 2 + 0 + 1 / 2) / 3 * 100.0) + assert eval_results['accuracy'] == pytest.approx(2 / 6 * 100) + # thrs must be a number or tuple + with pytest.raises(TypeError): + eval_results = dataset.evaluate( + fake_results, + metric=['precision', 'recall', 'f1_score', 'accuracy'], + metric_options={ + 'thrs': 'thr', + 'topk': 1 + }) + + # test topk and thr as tuple + eval_results = dataset.evaluate( + fake_results, + metric=['precision', 'recall', 'f1_score', 'accuracy'], + metric_options={ + 'thrs': (0.5, 0.6), + 'topk': (1, 2) + }) + assert { + 'precision_thr_0.50', 'precision_thr_0.60', 'recall_thr_0.50', + 'recall_thr_0.60', 'f1_score_thr_0.50', 'f1_score_thr_0.60', + 'accuracy_top-1_thr_0.50', 'accuracy_top-1_thr_0.60', + 'accuracy_top-2_thr_0.50', 'accuracy_top-2_thr_0.60' + } == eval_results.keys() + assert type(eval_results['precision_thr_0.50']) == float + assert type(eval_results['recall_thr_0.50']) == float + assert type(eval_results['f1_score_thr_0.50']) == float + assert type(eval_results['accuracy_top-1_thr_0.50']) == float + + eval_results = dataset.evaluate( + fake_results, + metric='accuracy', + metric_options={ + 'thrs': 0.5, + 'topk': (1, 2) + }) + assert {'accuracy_top-1', 'accuracy_top-2'} == eval_results.keys() + assert type(eval_results['accuracy_top-1']) == float + + eval_results = dataset.evaluate( + fake_results, + metric='accuracy', + metric_options={ + 'thrs': (0.5, 0.6), + 'topk': 1 + }) + assert {'accuracy_thr_0.50', 'accuracy_thr_0.60'} == eval_results.keys() + assert type(eval_results['accuracy_thr_0.50']) == float + + # test evaluation results for classes + eval_results = dataset.evaluate( + fake_results, + metric=['precision', 'recall', 'f1_score', 'support'], + metric_options={'average_mode': 'none'}) + assert eval_results['precision'].shape == (3, ) + assert eval_results['recall'].shape == (3, ) + assert eval_results['f1_score'].shape == (3, ) + assert eval_results['support'].shape == (3, ) + + # the average_mode method must be valid + with pytest.raises(ValueError): + eval_results = dataset.evaluate( + fake_results, + metric='precision', + metric_options={'average_mode': 'micro'}) + with pytest.raises(ValueError): + eval_results = dataset.evaluate( + fake_results, + metric='recall', + metric_options={'average_mode': 'micro'}) + with pytest.raises(ValueError): + eval_results = dataset.evaluate( + fake_results, + metric='f1_score', + metric_options={'average_mode': 'micro'}) + with pytest.raises(ValueError): + eval_results = dataset.evaluate( + fake_results, + metric='support', + metric_options={'average_mode': 'micro'}) + + # the metric must be valid for the dataset + with pytest.raises(ValueError): + eval_results = dataset.evaluate(fake_results, metric='map') + + # test multi-label evaluation + dataset = MultiLabelDataset(data_prefix='', pipeline=[], test_mode=True) + dataset.data_infos = [ + dict(gt_label=[1, 1, 0, -1]), + dict(gt_label=[1, 1, 0, -1]), + dict(gt_label=[0, -1, 1, -1]), + dict(gt_label=[0, 1, 0, -1]), + dict(gt_label=[0, 1, 0, -1]), + ] + fake_results = np.array([[0.9, 0.8, 0.3, 0.2], [0.1, 0.2, 0.2, 0.1], + [0.7, 0.5, 0.9, 0.3], [0.8, 0.1, 0.1, 0.2], + [0.8, 0.1, 0.1, 0.2]]) + + # the metric must be valid + with pytest.raises(ValueError): + metric = 'coverage' + dataset.evaluate(fake_results, metric=metric) + # only one metric + metric = 'mAP' + eval_results = dataset.evaluate(fake_results, metric=metric) + assert 'mAP' in eval_results.keys() + assert 'CP' not in eval_results.keys() + + # multiple metrics + metric = ['mAP', 'CR', 'OF1'] + eval_results = dataset.evaluate(fake_results, metric=metric) + assert 'mAP' in eval_results.keys() + assert 'CR' in eval_results.keys() + assert 'OF1' in eval_results.keys() + assert 'CF1' not in eval_results.keys() + + +def test_dataset_imagenet21k(): + base_dataset_cfg = dict( + data_prefix='tests/data/dataset', pipeline=[], recursion_subdir=True) + + with pytest.raises(NotImplementedError): + # multi_label have not be implemented + dataset_cfg = base_dataset_cfg.copy() + dataset_cfg.update({'multi_label': True}) + dataset = ImageNet21k(**dataset_cfg) + + with pytest.raises(TypeError): + # ann_file must be a string or None + dataset_cfg = base_dataset_cfg.copy() + ann_file = {'path': 'tests/data/dataset/ann.txt'} + dataset_cfg.update({'ann_file': ann_file}) + dataset = ImageNet21k(**dataset_cfg) + + # test with recursion_subdir is True + dataset = ImageNet21k(**base_dataset_cfg) + assert len(dataset) == 3 + assert isinstance(dataset[0], dict) + assert 'img_prefix' in dataset[0] + assert 'img_info' in dataset[0] + assert 'gt_label' in dataset[0] + + # test with recursion_subdir is False + dataset_cfg = base_dataset_cfg.copy() + dataset_cfg['recursion_subdir'] = False + dataset = ImageNet21k(**dataset_cfg) + assert len(dataset) == 2 + assert isinstance(dataset[0], dict) + + # test with load annotation from ann file + dataset_cfg = base_dataset_cfg.copy() + dataset_cfg['ann_file'] = 'tests/data/dataset/ann.txt' + dataset = ImageNet21k(**dataset_cfg) + assert len(dataset) == 3 + assert isinstance(dataset[0], dict) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_data/test_datasets/test_dataset_utils.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_data/test_datasets/test_dataset_utils.py new file mode 100644 index 0000000000..de471f7fa3 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_data/test_datasets/test_dataset_utils.py @@ -0,0 +1,22 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import random +import string +import tempfile + +from mmcls.datasets.utils import check_integrity, rm_suffix + + +def test_dataset_utils(): + # test rm_suffix + assert rm_suffix('a.jpg') == 'a' + assert rm_suffix('a.bak.jpg') == 'a.bak' + assert rm_suffix('a.bak.jpg', suffix='.jpg') == 'a.bak' + assert rm_suffix('a.bak.jpg', suffix='.bak.jpg') == 'a' + + # test check_integrity + rand_file = ''.join(random.sample(string.ascii_letters, 10)) + assert not check_integrity(rand_file, md5=None) + assert not check_integrity(rand_file, md5=2333) + tmp_file = tempfile.NamedTemporaryFile() + assert check_integrity(tmp_file.name, md5=None) + assert not check_integrity(tmp_file.name, md5=2333) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_data/test_datasets/test_dataset_wrapper.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_data/test_datasets/test_dataset_wrapper.py new file mode 100644 index 0000000000..2391baf150 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_data/test_datasets/test_dataset_wrapper.py @@ -0,0 +1,84 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import bisect +import math +from collections import defaultdict +from unittest.mock import MagicMock, patch + +import numpy as np + +from mmcls.datasets import (BaseDataset, ClassBalancedDataset, ConcatDataset, + RepeatDataset) + + +@patch.multiple(BaseDataset, __abstractmethods__=set()) +def construct_toy_dataset(length): + BaseDataset.CLASSES = ('foo', 'bar') + BaseDataset.__getitem__ = MagicMock(side_effect=lambda idx: idx) + dataset = BaseDataset(data_prefix='', pipeline=[], test_mode=True) + cat_ids_list = [ + np.random.randint(0, 80, num).tolist() + for num in np.random.randint(1, 20, length) + ] + dataset.data_infos = MagicMock() + dataset.data_infos.__len__.return_value = length + dataset.get_cat_ids = MagicMock(side_effect=lambda idx: cat_ids_list[idx]) + return dataset, cat_ids_list + + +def test_concat_dataset(): + dataset_a, cat_ids_list_a = construct_toy_dataset(10) + dataset_b, cat_ids_list_b = construct_toy_dataset(20) + + concat_dataset = ConcatDataset([dataset_a, dataset_b]) + assert concat_dataset[5] == 5 + assert concat_dataset[25] == 15 + assert concat_dataset.get_cat_ids(5) == cat_ids_list_a[5] + assert concat_dataset.get_cat_ids(25) == cat_ids_list_b[15] + assert len(concat_dataset) == len(dataset_a) + len(dataset_b) + assert concat_dataset.CLASSES == BaseDataset.CLASSES + + +def test_repeat_dataset(): + dataset, cat_ids_list = construct_toy_dataset(10) + repeat_dataset = RepeatDataset(dataset, 10) + assert repeat_dataset[5] == 5 + assert repeat_dataset[15] == 5 + assert repeat_dataset[27] == 7 + assert repeat_dataset.get_cat_ids(5) == cat_ids_list[5] + assert repeat_dataset.get_cat_ids(15) == cat_ids_list[5] + assert repeat_dataset.get_cat_ids(27) == cat_ids_list[7] + assert len(repeat_dataset) == 10 * len(dataset) + assert repeat_dataset.CLASSES == BaseDataset.CLASSES + + +def test_class_balanced_dataset(): + dataset, cat_ids_list = construct_toy_dataset(10) + + category_freq = defaultdict(int) + for cat_ids in cat_ids_list: + cat_ids = set(cat_ids) + for cat_id in cat_ids: + category_freq[cat_id] += 1 + for k, v in category_freq.items(): + category_freq[k] = v / len(cat_ids_list) + + mean_freq = np.mean(list(category_freq.values())) + repeat_thr = mean_freq + + category_repeat = { + cat_id: max(1.0, math.sqrt(repeat_thr / cat_freq)) + for cat_id, cat_freq in category_freq.items() + } + + repeat_factors = [] + for cat_ids in cat_ids_list: + cat_ids = set(cat_ids) + repeat_factor = max({category_repeat[cat_id] for cat_id in cat_ids}) + repeat_factors.append(math.ceil(repeat_factor)) + repeat_factors_cumsum = np.cumsum(repeat_factors) + repeat_factor_dataset = ClassBalancedDataset(dataset, repeat_thr) + assert repeat_factor_dataset.CLASSES == BaseDataset.CLASSES + assert len(repeat_factor_dataset) == repeat_factors_cumsum[-1] + for idx in np.random.randint(0, len(repeat_factor_dataset), 3): + assert repeat_factor_dataset[idx] == bisect.bisect_right( + repeat_factors_cumsum, idx) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_data/test_pipelines/test_auto_augment.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_data/test_pipelines/test_auto_augment.py new file mode 100644 index 0000000000..f5c896ef33 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_data/test_pipelines/test_auto_augment.py @@ -0,0 +1,1241 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +import random + +import mmcv +import numpy as np +import pytest +from mmcv.utils import build_from_cfg + +from mmcls.datasets.builder import PIPELINES + + +def construct_toy_data(): + img = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]], + dtype=np.uint8) + img = np.stack([img, img, img], axis=-1) + results = dict() + # image + results['ori_img'] = img + results['img'] = img + results['img2'] = copy.deepcopy(img) + results['img_shape'] = img.shape + results['ori_shape'] = img.shape + results['img_fields'] = ['img', 'img2'] + return results + + +def construct_toy_data_photometric(): + img = np.array([[0, 128, 255], [1, 127, 254], [2, 129, 253]], + dtype=np.uint8) + img = np.stack([img, img, img], axis=-1) + results = dict() + # image + results['ori_img'] = img + results['img'] = img + results['img2'] = copy.deepcopy(img) + results['img_shape'] = img.shape + results['ori_shape'] = img.shape + results['img_fields'] = ['img', 'img2'] + return results + + +def test_auto_augment(): + policies = [[ + dict(type='Posterize', bits=4, prob=0.4), + dict(type='Rotate', angle=30., prob=0.6) + ]] + + # test assertion for policies + with pytest.raises(AssertionError): + # policies shouldn't be empty + transform = dict(type='AutoAugment', policies=[]) + build_from_cfg(transform, PIPELINES) + with pytest.raises(AssertionError): + # policy should have type + invalid_policies = copy.deepcopy(policies) + invalid_policies[0][0].pop('type') + transform = dict(type='AutoAugment', policies=invalid_policies) + build_from_cfg(transform, PIPELINES) + with pytest.raises(AssertionError): + # sub policy should be a non-empty list + invalid_policies = copy.deepcopy(policies) + invalid_policies[0] = [] + transform = dict(type='AutoAugment', policies=invalid_policies) + build_from_cfg(transform, PIPELINES) + with pytest.raises(AssertionError): + # policy should be valid in PIPELINES registry. + invalid_policies = copy.deepcopy(policies) + invalid_policies.append([dict(type='Wrong_policy')]) + transform = dict(type='AutoAugment', policies=invalid_policies) + build_from_cfg(transform, PIPELINES) + + # test hparams + transform = dict( + type='AutoAugment', + policies=policies, + hparams=dict(pad_val=15, interpolation='nearest')) + pipeline = build_from_cfg(transform, PIPELINES) + # use hparams if not set in policies config + assert pipeline.policies[0][1]['pad_val'] == 15 + assert pipeline.policies[0][1]['interpolation'] == 'nearest' + + +def test_rand_augment(): + policies = [ + dict( + type='Translate', + magnitude_key='magnitude', + magnitude_range=(0, 1), + pad_val=128, + prob=1., + direction='horizontal', + interpolation='nearest'), + dict(type='Invert', prob=1.), + dict( + type='Rotate', + magnitude_key='angle', + magnitude_range=(0, 90), + prob=0.) + ] + # test assertion for num_policies + with pytest.raises(AssertionError): + transform = dict( + type='RandAugment', + policies=policies, + num_policies=1.5, + magnitude_level=12) + build_from_cfg(transform, PIPELINES) + with pytest.raises(AssertionError): + transform = dict( + type='RandAugment', + policies=policies, + num_policies=-1, + magnitude_level=12) + build_from_cfg(transform, PIPELINES) + # test assertion for magnitude_level + with pytest.raises(AssertionError): + transform = dict( + type='RandAugment', + policies=policies, + num_policies=1, + magnitude_level=None) + build_from_cfg(transform, PIPELINES) + with pytest.raises(AssertionError): + transform = dict( + type='RandAugment', + policies=policies, + num_policies=1, + magnitude_level=-1) + build_from_cfg(transform, PIPELINES) + # test assertion for magnitude_std + with pytest.raises(AssertionError): + transform = dict( + type='RandAugment', + policies=policies, + num_policies=1, + magnitude_level=12, + magnitude_std=None) + build_from_cfg(transform, PIPELINES) + with pytest.raises(AssertionError): + transform = dict( + type='RandAugment', + policies=policies, + num_policies=1, + magnitude_level=12, + magnitude_std='unknown') + build_from_cfg(transform, PIPELINES) + # test assertion for total_level + with pytest.raises(AssertionError): + transform = dict( + type='RandAugment', + policies=policies, + num_policies=1, + magnitude_level=12, + total_level=None) + build_from_cfg(transform, PIPELINES) + with pytest.raises(AssertionError): + transform = dict( + type='RandAugment', + policies=policies, + num_policies=1, + magnitude_level=12, + total_level=-30) + build_from_cfg(transform, PIPELINES) + # test assertion for policies + with pytest.raises(AssertionError): + transform = dict( + type='RandAugment', + policies=[], + num_policies=2, + magnitude_level=12) + build_from_cfg(transform, PIPELINES) + with pytest.raises(AssertionError): + invalid_policies = copy.deepcopy(policies) + invalid_policies.append(('Wrong_policy')) + transform = dict( + type='RandAugment', + policies=invalid_policies, + num_policies=2, + magnitude_level=12) + build_from_cfg(transform, PIPELINES) + with pytest.raises(AssertionError): + invalid_policies = copy.deepcopy(policies) + invalid_policies.append(dict(type='Wrong_policy')) + transform = dict( + type='RandAugment', + policies=invalid_policies, + num_policies=2, + magnitude_level=12) + build_from_cfg(transform, PIPELINES) + with pytest.raises(AssertionError): + invalid_policies = copy.deepcopy(policies) + invalid_policies[2].pop('type') + transform = dict( + type='RandAugment', + policies=invalid_policies, + num_policies=2, + magnitude_level=12) + build_from_cfg(transform, PIPELINES) + with pytest.raises(AssertionError): + invalid_policies = copy.deepcopy(policies) + invalid_policies[2].pop('magnitude_range') + transform = dict( + type='RandAugment', + policies=invalid_policies, + num_policies=2, + magnitude_level=12) + build_from_cfg(transform, PIPELINES) + + # test case where num_policies = 1 + random.seed(1) + np.random.seed(0) + results = construct_toy_data() + transform = dict( + type='RandAugment', + policies=policies, + num_policies=1, + magnitude_level=12) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + # apply translate + img_augmented = np.array( + [[128, 128, 1, 2], [128, 128, 5, 6], [128, 128, 9, 10]], + dtype=np.uint8) + img_augmented = np.stack([img_augmented, img_augmented, img_augmented], + axis=-1) + assert (results['img'] == img_augmented).all() + + results = construct_toy_data() + transform = dict( + type='RandAugment', + policies=policies, + num_policies=1, + magnitude_level=12) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + # apply rotation with prob=0. + assert (results['img'] == results['ori_img']).all() + + # test case where magnitude_range is reversed + random.seed(1) + np.random.seed(0) + results = construct_toy_data() + reversed_policies = [ + dict( + type='Translate', + magnitude_key='magnitude', + magnitude_range=(1, 0), + pad_val=128, + prob=1., + direction='horizontal'), + dict(type='Invert', prob=1.), + dict( + type='Rotate', + magnitude_key='angle', + magnitude_range=(30, 0), + prob=0.) + ] + transform = dict( + type='RandAugment', + policies=reversed_policies, + num_policies=1, + magnitude_level=30) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + assert (results['img'] == results['ori_img']).all() + + # test case where num_policies = 2 + random.seed(0) + np.random.seed(0) + results = construct_toy_data() + transform = dict( + type='RandAugment', + policies=policies, + num_policies=2, + magnitude_level=12) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + # apply rotate and rotate with prob=0 + assert (results['img'] == results['ori_img']).all() + + results = construct_toy_data() + transform = dict( + type='RandAugment', + policies=policies, + num_policies=2, + magnitude_level=12) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + # apply invert and translate + img_augmented = np.array( + [[252, 251, 128, 128], [248, 247, 128, 128], [244, 243, 128, 128]], + dtype=np.uint8) + img_augmented = np.stack([img_augmented, img_augmented, img_augmented], + axis=-1) + assert (results['img'] == img_augmented).all() + + results = construct_toy_data() + transform = dict( + type='RandAugment', + policies=policies, + num_policies=2, + magnitude_level=0) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + # apply invert and invert + assert (results['img'] == results['ori_img']).all() + + # test case where magnitude_level = 0 + results = construct_toy_data() + transform = dict( + type='RandAugment', + policies=policies, + num_policies=2, + magnitude_level=0) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + # apply rotate and translate + assert (results['img'] == results['ori_img']).all() + + # test case where magnitude_std = "inf" + random.seed(3) + np.random.seed(3) + results = construct_toy_data() + transform = dict( + type='RandAugment', + policies=policies, + num_policies=2, + magnitude_level=12, + magnitude_std='inf') + pipeline = build_from_cfg(transform, PIPELINES) + # apply invert and translate (magnitude=0.148) + results = pipeline(results) + img_augmented = np.array( + [[127, 254, 253, 252], [127, 250, 249, 248], [127, 246, 245, 244]], + dtype=np.uint8) + img_augmented = np.stack([img_augmented, img_augmented, img_augmented], + axis=-1) + np.testing.assert_array_equal(results['img'], img_augmented) + + # test case where magnitude_std = 0.5 + random.seed(3) + np.random.seed(3) + results = construct_toy_data() + transform = dict( + type='RandAugment', + policies=policies, + num_policies=2, + magnitude_level=12, + magnitude_std=0.5) + pipeline = build_from_cfg(transform, PIPELINES) + # apply invert and translate (magnitude=0.384) + results = pipeline(results) + img_augmented = np.array( + [[127, 127, 254, 253], [127, 127, 250, 249], [127, 127, 246, 245]], + dtype=np.uint8) + img_augmented = np.stack([img_augmented, img_augmented, img_augmented], + axis=-1) + np.testing.assert_array_equal(results['img'], img_augmented) + + # test case where magnitude_std is negative + random.seed(3) + np.random.seed(0) + results = construct_toy_data() + transform = dict( + type='RandAugment', + policies=policies, + num_policies=2, + magnitude_level=12, + magnitude_std=-1) + pipeline = build_from_cfg(transform, PIPELINES) + # apply translate (magnitude=0.4) and invert + results = pipeline(results) + img_augmented = np.array( + [[127, 127, 254, 253], [127, 127, 250, 249], [127, 127, 246, 245]], + dtype=np.uint8) + img_augmented = np.stack([img_augmented, img_augmented, img_augmented], + axis=-1) + np.testing.assert_array_equal(results['img'], img_augmented) + + # test hparams + random.seed(8) + np.random.seed(0) + results = construct_toy_data() + policies[2]['prob'] = 1.0 + transform = dict( + type='RandAugment', + policies=policies, + num_policies=2, + magnitude_level=12, + magnitude_std=-1, + hparams=dict(pad_val=15, interpolation='nearest')) + pipeline = build_from_cfg(transform, PIPELINES) + # apply translate (magnitude=0.4) and rotate (angle=36) + results = pipeline(results) + img_augmented = np.array( + [[128, 128, 128, 15], [128, 128, 5, 2], [15, 9, 9, 6]], dtype=np.uint8) + img_augmented = np.stack([img_augmented, img_augmented, img_augmented], + axis=-1) + np.testing.assert_array_equal(results['img'], img_augmented) + # hparams won't override setting in policies config + assert pipeline.policies[0]['pad_val'] == 128 + # use hparams if not set in policies config + assert pipeline.policies[2]['pad_val'] == 15 + assert pipeline.policies[2]['interpolation'] == 'nearest' + + +def test_shear(): + # test assertion for invalid type of magnitude + with pytest.raises(AssertionError): + transform = dict(type='Shear', magnitude=None) + build_from_cfg(transform, PIPELINES) + + # test assertion for invalid pad_val + with pytest.raises(AssertionError): + transform = dict(type='Shear', magnitude=0.5, pad_val=(0, 0)) + build_from_cfg(transform, PIPELINES) + + # test assertion for invalid value of prob + with pytest.raises(AssertionError): + transform = dict(type='Shear', magnitude=0.5, prob=100) + build_from_cfg(transform, PIPELINES) + + # test assertion for invalid direction + with pytest.raises(AssertionError): + transform = dict(type='Shear', magnitude=0.5, direction='diagonal') + build_from_cfg(transform, PIPELINES) + + # test assertion for invalid value of random_negative_prob + with pytest.raises(AssertionError): + transform = dict(type='Shear', magnitude=0.5, random_negative_prob=100) + build_from_cfg(transform, PIPELINES) + + # test case when magnitude = 0, therefore no shear + results = construct_toy_data() + transform = dict(type='Shear', magnitude=0., prob=1.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + assert (results['img'] == results['ori_img']).all() + + # test case when prob = 0, therefore no shear + results = construct_toy_data() + transform = dict(type='Shear', magnitude=0.5, prob=0.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + assert (results['img'] == results['ori_img']).all() + + # test shear horizontally, magnitude=1 + results = construct_toy_data() + transform = dict( + type='Shear', magnitude=1, pad_val=0, prob=1., random_negative_prob=0.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + sheared_img = np.array([[1, 2, 3, 4], [0, 5, 6, 7], [0, 0, 9, 10]], + dtype=np.uint8) + sheared_img = np.stack([sheared_img, sheared_img, sheared_img], axis=-1) + assert (results['img'] == sheared_img).all() + assert (results['img'] == results['img2']).all() + + # test shear vertically, magnitude=-1 + results = construct_toy_data() + transform = dict( + type='Shear', + magnitude=-1, + pad_val=0, + prob=1., + direction='vertical', + random_negative_prob=0.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + sheared_img = np.array([[1, 6, 11, 0], [5, 10, 0, 0], [9, 0, 0, 0]], + dtype=np.uint8) + sheared_img = np.stack([sheared_img, sheared_img, sheared_img], axis=-1) + assert (results['img'] == sheared_img).all() + + # test shear vertically, magnitude=1, random_negative_prob=1 + results = construct_toy_data() + transform = dict( + type='Shear', + magnitude=1, + pad_val=0, + prob=1., + direction='vertical', + random_negative_prob=1.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + sheared_img = np.array([[1, 6, 11, 0], [5, 10, 0, 0], [9, 0, 0, 0]], + dtype=np.uint8) + sheared_img = np.stack([sheared_img, sheared_img, sheared_img], axis=-1) + assert (results['img'] == sheared_img).all() + + # test auto aug with shear + results = construct_toy_data() + policies = [[transform]] + autoaug = dict(type='AutoAugment', policies=policies) + pipeline = build_from_cfg(autoaug, PIPELINES) + results = pipeline(results) + assert (results['img'] == sheared_img).all() + + +def test_translate(): + # test assertion for invalid type of magnitude + with pytest.raises(AssertionError): + transform = dict(type='Translate', magnitude=None) + build_from_cfg(transform, PIPELINES) + + # test assertion for invalid pad_val + with pytest.raises(AssertionError): + transform = dict(type='Translate', magnitude=0.5, pad_val=(0, 0)) + build_from_cfg(transform, PIPELINES) + + # test assertion for invalid value of prob + with pytest.raises(AssertionError): + transform = dict(type='Translate', magnitude=0.5, prob=100) + build_from_cfg(transform, PIPELINES) + + # test assertion for invalid direction + with pytest.raises(AssertionError): + transform = dict(type='Translate', magnitude=0.5, direction='diagonal') + build_from_cfg(transform, PIPELINES) + + # test assertion for invalid value of random_negative_prob + with pytest.raises(AssertionError): + transform = dict( + type='Translate', magnitude=0.5, random_negative_prob=100) + build_from_cfg(transform, PIPELINES) + + # test case when magnitude=0, therefore no translate + results = construct_toy_data() + transform = dict(type='Translate', magnitude=0., prob=1.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + assert (results['img'] == results['ori_img']).all() + + # test case when prob=0, therefore no translate + results = construct_toy_data() + transform = dict(type='Translate', magnitude=1., prob=0.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + assert (results['img'] == results['ori_img']).all() + + # test translate horizontally, magnitude=0.5 + results = construct_toy_data() + transform = dict( + type='Translate', + magnitude=0.5, + pad_val=0, + prob=1., + random_negative_prob=0.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + translated_img = np.array([[0, 0, 1, 2], [0, 0, 5, 6], [0, 0, 9, 10]], + dtype=np.uint8) + translated_img = np.stack([translated_img, translated_img, translated_img], + axis=-1) + assert (results['img'] == translated_img).all() + assert (results['img'] == results['img2']).all() + + # test translate vertically, magnitude=-0.5 + results = construct_toy_data() + transform = dict( + type='Translate', + magnitude=-0.5, + pad_val=0, + prob=1., + direction='vertical', + random_negative_prob=0.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + translated_img = np.array([[9, 10, 11, 12], [0, 0, 0, 0], [0, 0, 0, 0]], + dtype=np.uint8) + translated_img = np.stack([translated_img, translated_img, translated_img], + axis=-1) + assert (results['img'] == translated_img).all() + + # test translate vertically, magnitude=0.5, random_negative_prob=1 + results = construct_toy_data() + transform = dict( + type='Translate', + magnitude=0.5, + pad_val=0, + prob=1., + direction='vertical', + random_negative_prob=1.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + translated_img = np.array([[9, 10, 11, 12], [0, 0, 0, 0], [0, 0, 0, 0]], + dtype=np.uint8) + translated_img = np.stack([translated_img, translated_img, translated_img], + axis=-1) + assert (results['img'] == translated_img).all() + + +def test_rotate(): + # test assertion for invalid type of angle + with pytest.raises(AssertionError): + transform = dict(type='Rotate', angle=None) + build_from_cfg(transform, PIPELINES) + + # test assertion for invalid type of center + with pytest.raises(AssertionError): + transform = dict(type='Rotate', angle=90., center=0) + build_from_cfg(transform, PIPELINES) + + # test assertion for invalid length of center + with pytest.raises(AssertionError): + transform = dict(type='Rotate', angle=90., center=(0, )) + build_from_cfg(transform, PIPELINES) + + # test assertion for invalid scale + with pytest.raises(AssertionError): + transform = dict(type='Rotate', angle=90., scale=None) + build_from_cfg(transform, PIPELINES) + + # test assertion for invalid pad_val + with pytest.raises(AssertionError): + transform = dict(type='Rotate', angle=90., pad_val=(0, 0)) + build_from_cfg(transform, PIPELINES) + + # test assertion for invalid value of prob + with pytest.raises(AssertionError): + transform = dict(type='Rotate', angle=90., prob=100) + build_from_cfg(transform, PIPELINES) + + # test assertion for invalid value of random_negative_prob + with pytest.raises(AssertionError): + transform = dict(type='Rotate', angle=0.5, random_negative_prob=100) + build_from_cfg(transform, PIPELINES) + + # test case when angle=0, therefore no rotation + results = construct_toy_data() + transform = dict(type='Rotate', angle=0., prob=1.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + assert (results['img'] == results['ori_img']).all() + + # test case when angle=360, therefore no rotation + results = construct_toy_data() + transform = dict(type='Rotate', angle=360., prob=1.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + assert (results['img'] == results['ori_img']).all() + + # test case when prob=0, therefore no rotation + results = construct_toy_data() + transform = dict(type='Rotate', angle=90., prob=0.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + assert (results['img'] == results['ori_img']).all() + + # test rotate clockwise, angle=30. + results = construct_toy_data() + transform = dict( + type='Rotate', angle=30., pad_val=0, prob=1., random_negative_prob=0.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + rotated_img = np.array([[5, 2, 2, 0], [9, 6, 7, 4], [0, 11, 11, 8]], + dtype=np.uint8) + rotated_img = np.stack([rotated_img, rotated_img, rotated_img], axis=-1) + assert (results['img'] == rotated_img).all() + assert (results['img'] == results['img2']).all() + + # test rotate clockwise, angle=90, center=(1,1) + results = construct_toy_data() + transform = dict( + type='Rotate', + angle=90., + center=(1, 1), + prob=1., + random_negative_prob=0.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + rotated_img = np.array([[9, 5, 1, 128], [10, 6, 2, 128], [11, 7, 3, 128]], + dtype=np.uint8) + rotated_img = np.stack([rotated_img, rotated_img, rotated_img], axis=-1) + assert (results['img'] == rotated_img).all() + assert (results['img'] == results['img2']).all() + + # test rotate counter-clockwise, angle=90. + results = construct_toy_data() + transform = dict( + type='Rotate', angle=-90., pad_val=0, prob=1., random_negative_prob=0.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + rotated_img = np.array([[4, 8, 12, 0], [3, 7, 11, 0], [2, 6, 10, 0]], + dtype=np.uint8) + rotated_img = np.stack([rotated_img, rotated_img, rotated_img], axis=-1) + assert (results['img'] == rotated_img).all() + assert (results['img'] == results['img2']).all() + + # test rotate counter-clockwise, angle=90, random_negative_prob=1 + results = construct_toy_data() + transform = dict( + type='Rotate', angle=-90., pad_val=0, prob=1., random_negative_prob=1.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + rotated_img = np.array([[0, 10, 6, 2], [0, 11, 7, 3], [0, 12, 8, 4]], + dtype=np.uint8) + rotated_img = np.stack([rotated_img, rotated_img, rotated_img], axis=-1) + assert (results['img'] == rotated_img).all() + assert (results['img'] == results['img2']).all() + + +def test_auto_contrast(): + # test assertion for invalid value of prob + with pytest.raises(AssertionError): + transform = dict(type='AutoContrast', prob=100) + build_from_cfg(transform, PIPELINES) + + # test case when prob=0, therefore no auto_contrast + results = construct_toy_data() + transform = dict(type='AutoContrast', prob=0.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + assert (results['img'] == results['ori_img']).all() + + # test case when prob=1 + results = construct_toy_data() + transform = dict(type='AutoContrast', prob=1.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + auto_contrasted_img = np.array( + [[0, 23, 46, 69], [92, 115, 139, 162], [185, 208, 231, 255]], + dtype=np.uint8) + auto_contrasted_img = np.stack( + [auto_contrasted_img, auto_contrasted_img, auto_contrasted_img], + axis=-1) + assert (results['img'] == auto_contrasted_img).all() + assert (results['img'] == results['img2']).all() + + +def test_invert(): + # test assertion for invalid value of prob + with pytest.raises(AssertionError): + transform = dict(type='Invert', prob=100) + build_from_cfg(transform, PIPELINES) + + # test case when prob=0, therefore no invert + results = construct_toy_data() + transform = dict(type='Invert', prob=0.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + assert (results['img'] == results['ori_img']).all() + + # test case when prob=1 + results = construct_toy_data() + transform = dict(type='Invert', prob=1.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + inverted_img = np.array( + [[254, 253, 252, 251], [250, 249, 248, 247], [246, 245, 244, 243]], + dtype=np.uint8) + inverted_img = np.stack([inverted_img, inverted_img, inverted_img], + axis=-1) + assert (results['img'] == inverted_img).all() + assert (results['img'] == results['img2']).all() + + +def test_equalize(nb_rand_test=100): + + def _imequalize(img): + # equalize the image using PIL.ImageOps.equalize + from PIL import ImageOps, Image + img = Image.fromarray(img) + equalized_img = np.asarray(ImageOps.equalize(img)) + return equalized_img + + # test assertion for invalid value of prob + with pytest.raises(AssertionError): + transform = dict(type='Equalize', prob=100) + build_from_cfg(transform, PIPELINES) + + # test case when prob=0, therefore no equalize + results = construct_toy_data() + transform = dict(type='Equalize', prob=0.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + assert (results['img'] == results['ori_img']).all() + + # test case when prob=1 with randomly sampled image. + results = construct_toy_data() + transform = dict(type='Equalize', prob=1.) + pipeline = build_from_cfg(transform, PIPELINES) + for _ in range(nb_rand_test): + img = np.clip(np.random.normal(0, 1, (256, 256, 3)) * 260, 0, + 255).astype(np.uint8) + results['img'] = img + results = pipeline(copy.deepcopy(results)) + assert (results['img'] == _imequalize(img)).all() + + +def test_solarize(): + # test assertion for invalid type of thr + with pytest.raises(AssertionError): + transform = dict(type='Solarize', thr=(1, 2)) + build_from_cfg(transform, PIPELINES) + + # test case when prob=0, therefore no solarize + results = construct_toy_data_photometric() + transform = dict(type='Solarize', thr=128, prob=0.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + assert (results['img'] == results['ori_img']).all() + + # test case when thr=256, therefore no solarize + results = construct_toy_data_photometric() + transform = dict(type='Solarize', thr=256, prob=1.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + assert (results['img'] == results['ori_img']).all() + + # test case when thr=128 + results = construct_toy_data_photometric() + transform = dict(type='Solarize', thr=128, prob=1.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + img_solarized = np.array([[0, 127, 0], [1, 127, 1], [2, 126, 2]], + dtype=np.uint8) + img_solarized = np.stack([img_solarized, img_solarized, img_solarized], + axis=-1) + assert (results['img'] == img_solarized).all() + assert (results['img'] == results['img2']).all() + + # test case when thr=100 + results = construct_toy_data_photometric() + transform = dict(type='Solarize', thr=100, prob=1.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + img_solarized = np.array([[0, 127, 0], [1, 128, 1], [2, 126, 2]], + dtype=np.uint8) + img_solarized = np.stack([img_solarized, img_solarized, img_solarized], + axis=-1) + assert (results['img'] == img_solarized).all() + assert (results['img'] == results['img2']).all() + + +def test_solarize_add(): + # test assertion for invalid type of magnitude + with pytest.raises(AssertionError): + transform = dict(type='SolarizeAdd', magnitude=(1, 2)) + build_from_cfg(transform, PIPELINES) + + # test assertion for invalid type of thr + with pytest.raises(AssertionError): + transform = dict(type='SolarizeAdd', magnitude=100, thr=(1, 2)) + build_from_cfg(transform, PIPELINES) + + # test case when prob=0, therefore no solarize + results = construct_toy_data_photometric() + transform = dict(type='SolarizeAdd', magnitude=100, thr=128, prob=0.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + assert (results['img'] == results['ori_img']).all() + + # test case when thr=0, therefore no solarize + results = construct_toy_data_photometric() + transform = dict(type='SolarizeAdd', magnitude=100, thr=0, prob=1.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + assert (results['img'] == results['ori_img']).all() + + # test case when thr=128, magnitude=100 + results = construct_toy_data_photometric() + transform = dict(type='SolarizeAdd', magnitude=100, thr=128, prob=1.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + img_solarized = np.array( + [[100, 128, 255], [101, 227, 254], [102, 129, 253]], dtype=np.uint8) + img_solarized = np.stack([img_solarized, img_solarized, img_solarized], + axis=-1) + assert (results['img'] == img_solarized).all() + assert (results['img'] == results['img2']).all() + + # test case when thr=100, magnitude=50 + results = construct_toy_data_photometric() + transform = dict(type='SolarizeAdd', magnitude=50, thr=100, prob=1.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + img_solarized = np.array([[50, 128, 255], [51, 127, 254], [52, 129, 253]], + dtype=np.uint8) + img_solarized = np.stack([img_solarized, img_solarized, img_solarized], + axis=-1) + assert (results['img'] == img_solarized).all() + assert (results['img'] == results['img2']).all() + + +def test_posterize(): + # test assertion for invalid value of bits + with pytest.raises(AssertionError): + transform = dict(type='Posterize', bits=10) + build_from_cfg(transform, PIPELINES) + + # test case when prob=0, therefore no posterize + results = construct_toy_data_photometric() + transform = dict(type='Posterize', bits=4, prob=0.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + assert (results['img'] == results['ori_img']).all() + + # test case when bits=8, therefore no solarize + results = construct_toy_data_photometric() + transform = dict(type='Posterize', bits=8, prob=1.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + assert (results['img'] == results['ori_img']).all() + + # test case when bits=1 + results = construct_toy_data_photometric() + transform = dict(type='Posterize', bits=1, prob=1.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + img_posterized = np.array([[0, 128, 128], [0, 0, 128], [0, 128, 128]], + dtype=np.uint8) + img_posterized = np.stack([img_posterized, img_posterized, img_posterized], + axis=-1) + assert (results['img'] == img_posterized).all() + assert (results['img'] == results['img2']).all() + + # test case when bits=3 + results = construct_toy_data_photometric() + transform = dict(type='Posterize', bits=3, prob=1.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + img_posterized = np.array([[0, 128, 224], [0, 96, 224], [0, 128, 224]], + dtype=np.uint8) + img_posterized = np.stack([img_posterized, img_posterized, img_posterized], + axis=-1) + assert (results['img'] == img_posterized).all() + assert (results['img'] == results['img2']).all() + + +def test_contrast(nb_rand_test=100): + + def _adjust_contrast(img, factor): + from PIL.ImageEnhance import Contrast + from PIL import Image + # Image.fromarray defaultly supports RGB, not BGR. + # convert from BGR to RGB + img = Image.fromarray(img[..., ::-1], mode='RGB') + contrasted_img = Contrast(img).enhance(factor) + # convert from RGB to BGR + return np.asarray(contrasted_img)[..., ::-1] + + # test assertion for invalid type of magnitude + with pytest.raises(AssertionError): + transform = dict(type='Contrast', magnitude=None) + build_from_cfg(transform, PIPELINES) + + # test assertion for invalid value of prob + with pytest.raises(AssertionError): + transform = dict(type='Contrast', magnitude=0.5, prob=100) + build_from_cfg(transform, PIPELINES) + + # test assertion for invalid value of random_negative_prob + with pytest.raises(AssertionError): + transform = dict( + type='Contrast', magnitude=0.5, random_negative_prob=100) + build_from_cfg(transform, PIPELINES) + + # test case when magnitude=0, therefore no adjusting contrast + results = construct_toy_data_photometric() + transform = dict(type='Contrast', magnitude=0., prob=1.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + assert (results['img'] == results['ori_img']).all() + + # test case when prob=0, therefore no adjusting contrast + results = construct_toy_data_photometric() + transform = dict(type='Contrast', magnitude=1., prob=0.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + assert (results['img'] == results['ori_img']).all() + + # test case when prob=1 with randomly sampled image. + results = construct_toy_data() + for _ in range(nb_rand_test): + magnitude = np.random.uniform() * np.random.choice([-1, 1]) + transform = dict( + type='Contrast', + magnitude=magnitude, + prob=1., + random_negative_prob=0.) + pipeline = build_from_cfg(transform, PIPELINES) + img = np.clip(np.random.uniform(0, 1, (256, 256, 3)) * 260, 0, + 255).astype(np.uint8) + results['img'] = img + results = pipeline(copy.deepcopy(results)) + # Note the gap (less_equal 1) between PIL.ImageEnhance.Contrast + # and mmcv.adjust_contrast comes from the gap that converts from + # a color image to gray image using mmcv or PIL. + np.testing.assert_allclose( + results['img'], + _adjust_contrast(img, 1 + magnitude), + rtol=0, + atol=1) + + +def test_color_transform(): + # test assertion for invalid type of magnitude + with pytest.raises(AssertionError): + transform = dict(type='ColorTransform', magnitude=None) + build_from_cfg(transform, PIPELINES) + + # test assertion for invalid value of prob + with pytest.raises(AssertionError): + transform = dict(type='ColorTransform', magnitude=0.5, prob=100) + build_from_cfg(transform, PIPELINES) + + # test assertion for invalid value of random_negative_prob + with pytest.raises(AssertionError): + transform = dict( + type='ColorTransform', magnitude=0.5, random_negative_prob=100) + build_from_cfg(transform, PIPELINES) + + # test case when magnitude=0, therefore no color transform + results = construct_toy_data_photometric() + transform = dict(type='ColorTransform', magnitude=0., prob=1.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + assert (results['img'] == results['ori_img']).all() + + # test case when prob=0, therefore no color transform + results = construct_toy_data_photometric() + transform = dict(type='ColorTransform', magnitude=1., prob=0.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + assert (results['img'] == results['ori_img']).all() + + # test case when magnitude=-1, therefore got gray img + results = construct_toy_data_photometric() + transform = dict( + type='ColorTransform', magnitude=-1., prob=1., random_negative_prob=0) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + img_gray = mmcv.bgr2gray(results['ori_img']) + img_gray = np.stack([img_gray, img_gray, img_gray], axis=-1) + assert (results['img'] == img_gray).all() + + # test case when magnitude=0.5 + results = construct_toy_data_photometric() + transform = dict( + type='ColorTransform', magnitude=.5, prob=1., random_negative_prob=0) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + img_r = np.round( + np.clip((results['ori_img'] * 0.5 + img_gray * 0.5), 0, + 255)).astype(results['ori_img'].dtype) + assert (results['img'] == img_r).all() + assert (results['img'] == results['img2']).all() + + # test case when magnitude=0.3, random_negative_prob=1 + results = construct_toy_data_photometric() + transform = dict( + type='ColorTransform', magnitude=.3, prob=1., random_negative_prob=1.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + img_r = np.round( + np.clip((results['ori_img'] * 0.7 + img_gray * 0.3), 0, + 255)).astype(results['ori_img'].dtype) + assert (results['img'] == img_r).all() + assert (results['img'] == results['img2']).all() + + +def test_brightness(nb_rand_test=100): + + def _adjust_brightness(img, factor): + # adjust the brightness of image using + # PIL.ImageEnhance.Brightness + from PIL.ImageEnhance import Brightness + from PIL import Image + img = Image.fromarray(img) + brightened_img = Brightness(img).enhance(factor) + return np.asarray(brightened_img) + + # test assertion for invalid type of magnitude + with pytest.raises(AssertionError): + transform = dict(type='Brightness', magnitude=None) + build_from_cfg(transform, PIPELINES) + + # test assertion for invalid value of prob + with pytest.raises(AssertionError): + transform = dict(type='Brightness', magnitude=0.5, prob=100) + build_from_cfg(transform, PIPELINES) + + # test assertion for invalid value of random_negative_prob + with pytest.raises(AssertionError): + transform = dict( + type='Brightness', magnitude=0.5, random_negative_prob=100) + build_from_cfg(transform, PIPELINES) + + # test case when magnitude=0, therefore no adjusting brightness + results = construct_toy_data_photometric() + transform = dict(type='Brightness', magnitude=0., prob=1.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + assert (results['img'] == results['ori_img']).all() + + # test case when prob=0, therefore no adjusting brightness + results = construct_toy_data_photometric() + transform = dict(type='Brightness', magnitude=1., prob=0.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + assert (results['img'] == results['ori_img']).all() + + # test case when prob=1 with randomly sampled image. + results = construct_toy_data() + for _ in range(nb_rand_test): + magnitude = np.random.uniform() * np.random.choice([-1, 1]) + transform = dict( + type='Brightness', + magnitude=magnitude, + prob=1., + random_negative_prob=0.) + pipeline = build_from_cfg(transform, PIPELINES) + img = np.clip(np.random.uniform(0, 1, (256, 256, 3)) * 260, 0, + 255).astype(np.uint8) + results['img'] = img + results = pipeline(copy.deepcopy(results)) + np.testing.assert_allclose( + results['img'], + _adjust_brightness(img, 1 + magnitude), + rtol=0, + atol=1) + + +def test_sharpness(nb_rand_test=100): + + def _adjust_sharpness(img, factor): + # adjust the sharpness of image using + # PIL.ImageEnhance.Sharpness + from PIL.ImageEnhance import Sharpness + from PIL import Image + img = Image.fromarray(img) + sharpened_img = Sharpness(img).enhance(factor) + return np.asarray(sharpened_img) + + # test assertion for invalid type of magnitude + with pytest.raises(AssertionError): + transform = dict(type='Sharpness', magnitude=None) + build_from_cfg(transform, PIPELINES) + + # test assertion for invalid value of prob + with pytest.raises(AssertionError): + transform = dict(type='Sharpness', magnitude=0.5, prob=100) + build_from_cfg(transform, PIPELINES) + + # test assertion for invalid value of random_negative_prob + with pytest.raises(AssertionError): + transform = dict( + type='Sharpness', magnitude=0.5, random_negative_prob=100) + build_from_cfg(transform, PIPELINES) + + # test case when magnitude=0, therefore no adjusting sharpness + results = construct_toy_data_photometric() + transform = dict(type='Sharpness', magnitude=0., prob=1.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + assert (results['img'] == results['ori_img']).all() + + # test case when prob=0, therefore no adjusting sharpness + results = construct_toy_data_photometric() + transform = dict(type='Sharpness', magnitude=1., prob=0.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + assert (results['img'] == results['ori_img']).all() + + # test case when prob=1 with randomly sampled image. + results = construct_toy_data() + for _ in range(nb_rand_test): + magnitude = np.random.uniform() * np.random.choice([-1, 1]) + transform = dict( + type='Sharpness', + magnitude=magnitude, + prob=1., + random_negative_prob=0.) + pipeline = build_from_cfg(transform, PIPELINES) + img = np.clip(np.random.uniform(0, 1, (256, 256, 3)) * 260, 0, + 255).astype(np.uint8) + results['img'] = img + results = pipeline(copy.deepcopy(results)) + np.testing.assert_allclose( + results['img'][1:-1, 1:-1], + _adjust_sharpness(img, 1 + magnitude)[1:-1, 1:-1], + rtol=0, + atol=1) + + +def test_cutout(): + + # test assertion for invalid type of shape + with pytest.raises(TypeError): + transform = dict(type='Cutout', shape=None) + build_from_cfg(transform, PIPELINES) + + # test assertion for invalid value of prob + with pytest.raises(AssertionError): + transform = dict(type='Cutout', shape=1, prob=100) + build_from_cfg(transform, PIPELINES) + + # test case when prob=0, therefore no cutout + results = construct_toy_data() + transform = dict(type='Cutout', shape=2, prob=0.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + assert (results['img'] == results['ori_img']).all() + + # test case when shape=0, therefore no cutout + results = construct_toy_data() + transform = dict(type='Cutout', shape=0, prob=1.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + assert (results['img'] == results['ori_img']).all() + + # test case when shape=6, therefore the whole img has been cut + results = construct_toy_data() + transform = dict(type='Cutout', shape=6, prob=1.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + assert (results['img'] == np.ones_like(results['ori_img']) * 128).all() + + # test case when shape is int + np.random.seed(0) + results = construct_toy_data() + transform = dict(type='Cutout', shape=1, prob=1.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + img_cutout = np.array([[1, 2, 3, 4], [5, 128, 7, 8], [9, 10, 11, 12]], + dtype=np.uint8) + img_cutout = np.stack([img_cutout, img_cutout, img_cutout], axis=-1) + assert (results['img'] == img_cutout).all() + + # test case when shape is tuple + np.random.seed(0) + results = construct_toy_data() + transform = dict(type='Cutout', shape=(1, 2), pad_val=0, prob=1.) + pipeline = build_from_cfg(transform, PIPELINES) + results = pipeline(results) + img_cutout = np.array([[1, 2, 3, 4], [5, 0, 0, 8], [9, 10, 11, 12]], + dtype=np.uint8) + img_cutout = np.stack([img_cutout, img_cutout, img_cutout], axis=-1) + assert (results['img'] == img_cutout).all() diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_data/test_pipelines/test_loading.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_data/test_pipelines/test_loading.py new file mode 100644 index 0000000000..928fbc842e --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_data/test_pipelines/test_loading.py @@ -0,0 +1,59 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +import os.path as osp + +import numpy as np + +from mmcls.datasets.pipelines import LoadImageFromFile + + +class TestLoading(object): + + @classmethod + def setup_class(cls): + cls.data_prefix = osp.join(osp.dirname(__file__), '../../data') + + def test_load_img(self): + results = dict( + img_prefix=self.data_prefix, img_info=dict(filename='color.jpg')) + transform = LoadImageFromFile() + results = transform(copy.deepcopy(results)) + assert results['filename'] == osp.join(self.data_prefix, 'color.jpg') + assert results['ori_filename'] == 'color.jpg' + assert results['img'].shape == (300, 400, 3) + assert results['img'].dtype == np.uint8 + assert results['img_shape'] == (300, 400, 3) + assert results['ori_shape'] == (300, 400, 3) + np.testing.assert_equal(results['img_norm_cfg']['mean'], + np.zeros(3, dtype=np.float32)) + assert repr(transform) == transform.__class__.__name__ + \ + "(to_float32=False, color_type='color', " + \ + "file_client_args={'backend': 'disk'})" + + # no img_prefix + results = dict( + img_prefix=None, img_info=dict(filename='tests/data/color.jpg')) + transform = LoadImageFromFile() + results = transform(copy.deepcopy(results)) + assert results['filename'] == 'tests/data/color.jpg' + assert results['img'].shape == (300, 400, 3) + + # to_float32 + transform = LoadImageFromFile(to_float32=True) + results = transform(copy.deepcopy(results)) + assert results['img'].dtype == np.float32 + + # gray image + results = dict( + img_prefix=self.data_prefix, img_info=dict(filename='gray.jpg')) + transform = LoadImageFromFile() + results = transform(copy.deepcopy(results)) + assert results['img'].shape == (288, 512, 3) + assert results['img'].dtype == np.uint8 + + transform = LoadImageFromFile(color_type='unchanged') + results = transform(copy.deepcopy(results)) + assert results['img'].shape == (288, 512) + assert results['img'].dtype == np.uint8 + np.testing.assert_equal(results['img_norm_cfg']['mean'], + np.zeros(1, dtype=np.float32)) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_data/test_pipelines/test_transform.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_data/test_pipelines/test_transform.py new file mode 100644 index 0000000000..9a26a8439d --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_data/test_pipelines/test_transform.py @@ -0,0 +1,1188 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +import os.path as osp +import random + +import mmcv +import numpy as np +import pytest +import torch +import torchvision +from mmcv.utils import build_from_cfg +from numpy.testing import assert_array_almost_equal, assert_array_equal +from PIL import Image +from torchvision import transforms + +import mmcls.datasets.pipelines.transforms as mmcls_transforms +from mmcls.datasets.builder import PIPELINES +from mmcls.datasets.pipelines import Compose + + +def construct_toy_data(): + img = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]], + dtype=np.uint8) + img = np.stack([img, img, img], axis=-1) + results = dict() + # image + results['ori_img'] = img + results['img'] = copy.deepcopy(img) + results['ori_shape'] = img.shape + results['img_shape'] = img.shape + return results + + +def test_resize(): + # test assertion if size is smaller than 0 + with pytest.raises(AssertionError): + transform = dict(type='Resize', size=-1) + build_from_cfg(transform, PIPELINES) + + # test assertion if size is tuple but the second value is smaller than 0 + # and the second value is not equal to -1 + with pytest.raises(AssertionError): + transform = dict(type='Resize', size=(224, -2)) + build_from_cfg(transform, PIPELINES) + + # test assertion if size is tuple but the first value is smaller than 0 + with pytest.raises(AssertionError): + transform = dict(type='Resize', size=(-1, 224)) + build_from_cfg(transform, PIPELINES) + + # test assertion if size is tuple and len(size) < 2 + with pytest.raises(AssertionError): + transform = dict(type='Resize', size=(224, )) + build_from_cfg(transform, PIPELINES) + + # test assertion if size is tuple len(size) > 2 + with pytest.raises(AssertionError): + transform = dict(type='Resize', size=(224, 224, 3)) + build_from_cfg(transform, PIPELINES) + + # test assertion when interpolation is invalid + with pytest.raises(AssertionError): + transform = dict(type='Resize', size=224, interpolation='2333') + build_from_cfg(transform, PIPELINES) + + # test repr + transform = dict(type='Resize', size=224) + resize_module = build_from_cfg(transform, PIPELINES) + assert isinstance(repr(resize_module), str) + + # read test image + results = dict() + img = mmcv.imread( + osp.join(osp.dirname(__file__), '../../data/color.jpg'), 'color') + original_img = copy.deepcopy(img) + results['img'] = img + results['img2'] = copy.deepcopy(img) + results['img_shape'] = img.shape + results['ori_shape'] = img.shape + results['img_fields'] = ['img', 'img2'] + + def reset_results(results, original_img): + results['img'] = copy.deepcopy(original_img) + results['img2'] = copy.deepcopy(original_img) + results['img_shape'] = original_img.shape + results['ori_shape'] = original_img.shape + results['img_fields'] = ['img', 'img2'] + return results + + # test resize when size is int + transform = dict(type='Resize', size=224, interpolation='bilinear') + resize_module = build_from_cfg(transform, PIPELINES) + results = resize_module(results) + assert np.equal(results['img'], results['img2']).all() + assert results['img_shape'] == (224, 224, 3) + + # test resize when size is tuple and the second value is -1 + transform = dict(type='Resize', size=(224, -1), interpolation='bilinear') + resize_module = build_from_cfg(transform, PIPELINES) + results = reset_results(results, original_img) + results = resize_module(results) + assert np.equal(results['img'], results['img2']).all() + assert results['img_shape'] == (224, 298, 3) + + # test resize when size is tuple + transform = dict(type='Resize', size=(224, 224), interpolation='bilinear') + resize_module = build_from_cfg(transform, PIPELINES) + results = reset_results(results, original_img) + results = resize_module(results) + assert np.equal(results['img'], results['img2']).all() + assert results['img_shape'] == (224, 224, 3) + + # test resize when resize_height != resize_width + transform = dict(type='Resize', size=(224, 256), interpolation='bilinear') + resize_module = build_from_cfg(transform, PIPELINES) + results = reset_results(results, original_img) + results = resize_module(results) + assert np.equal(results['img'], results['img2']).all() + assert results['img_shape'] == (224, 256, 3) + + # test resize when size is larger than img.shape + img_height, img_width, _ = original_img.shape + transform = dict( + type='Resize', + size=(img_height * 2, img_width * 2), + interpolation='bilinear') + resize_module = build_from_cfg(transform, PIPELINES) + results = reset_results(results, original_img) + results = resize_module(results) + assert np.equal(results['img'], results['img2']).all() + assert results['img_shape'] == (img_height * 2, img_width * 2, 3) + + # test resize with different backends + transform_cv2 = dict( + type='Resize', + size=(224, 256), + interpolation='bilinear', + backend='cv2') + transform_pil = dict( + type='Resize', + size=(224, 256), + interpolation='bilinear', + backend='pillow') + resize_module_cv2 = build_from_cfg(transform_cv2, PIPELINES) + resize_module_pil = build_from_cfg(transform_pil, PIPELINES) + results = reset_results(results, original_img) + results['img_fields'] = ['img'] + results_cv2 = resize_module_cv2(results) + results['img_fields'] = ['img2'] + results_pil = resize_module_pil(results) + assert np.allclose(results_cv2['img'], results_pil['img2'], atol=45) + + # compare results with torchvision + transform = dict(type='Resize', size=(224, 224), interpolation='area') + resize_module = build_from_cfg(transform, PIPELINES) + results = reset_results(results, original_img) + results = resize_module(results) + resize_module = transforms.Resize( + size=(224, 224), interpolation=Image.BILINEAR) + pil_img = Image.fromarray(original_img) + resized_img = resize_module(pil_img) + resized_img = np.array(resized_img) + assert np.equal(results['img'], results['img2']).all() + assert results['img_shape'] == (224, 224, 3) + assert np.allclose(results['img'], resized_img, atol=30) + + +def test_center_crop(): + # test assertion if size is smaller than 0 + with pytest.raises(AssertionError): + transform = dict(type='CenterCrop', crop_size=-1) + build_from_cfg(transform, PIPELINES) + + # test assertion if size is tuple but one value is smaller than 0 + with pytest.raises(AssertionError): + transform = dict(type='CenterCrop', crop_size=(224, -1)) + build_from_cfg(transform, PIPELINES) + + # test assertion if size is tuple and len(size) < 2 + with pytest.raises(AssertionError): + transform = dict(type='CenterCrop', crop_size=(224, )) + build_from_cfg(transform, PIPELINES) + + # test assertion if size is tuple len(size) > 2 + with pytest.raises(AssertionError): + transform = dict(type='CenterCrop', crop_size=(224, 224, 3)) + build_from_cfg(transform, PIPELINES) + + # test assertion if efficientnet is True and crop_size is tuple + with pytest.raises(AssertionError): + transform = dict( + type='CenterCrop', + crop_size=(224, 224), + efficientnet_style=True, + ) + build_from_cfg(transform, PIPELINES) + + # test assertion if efficientnet is True and interpolation is invalid + with pytest.raises(AssertionError): + transform = dict( + type='CenterCrop', + crop_size=224, + efficientnet_style=True, + interpolation='2333') + build_from_cfg(transform, PIPELINES) + + # test assertion if efficientnet is True and crop_padding is negative + with pytest.raises(AssertionError): + transform = dict( + type='CenterCrop', + crop_size=224, + efficientnet_style=True, + crop_padding=-1) + build_from_cfg(transform, PIPELINES) + + # test repr + transform = dict(type='CenterCrop', crop_size=224) + center_crop_module = build_from_cfg(transform, PIPELINES) + assert isinstance(repr(center_crop_module), str) + + # read test image + results = dict() + img = mmcv.imread( + osp.join(osp.dirname(__file__), '../../data/color.jpg'), 'color') + original_img = copy.deepcopy(img) + results['img'] = img + results['img2'] = copy.deepcopy(img) + results['img_shape'] = img.shape + results['ori_shape'] = img.shape + results['img_fields'] = ['img', 'img2'] + + def reset_results(results, original_img): + results['img'] = copy.deepcopy(original_img) + results['img2'] = copy.deepcopy(original_img) + results['img_shape'] = original_img.shape + results['ori_shape'] = original_img.shape + return results + + # test CenterCrop when size is int + transform = dict(type='CenterCrop', crop_size=224) + center_crop_module = build_from_cfg(transform, PIPELINES) + results = center_crop_module(results) + assert np.equal(results['img'], results['img2']).all() + assert results['img_shape'] == (224, 224, 3) + + # test CenterCrop when size is int and efficientnet_style is True + # and crop_padding=0 + transform = dict( + type='CenterCrop', + crop_size=224, + efficientnet_style=True, + crop_padding=0) + center_crop_module = build_from_cfg(transform, PIPELINES) + results = reset_results(results, original_img) + results = center_crop_module(results) + assert np.equal(results['img'], results['img2']).all() + assert results['img_shape'] == (224, 224, 3) + results_img = copy.deepcopy(results['img']) + + short_edge = min(*results['ori_shape'][:2]) + transform = dict(type='CenterCrop', crop_size=short_edge) + baseline_center_crop_module = build_from_cfg(transform, PIPELINES) + transform = dict(type='Resize', size=224) + baseline_resize_module = build_from_cfg(transform, PIPELINES) + results = reset_results(results, original_img) + results = baseline_center_crop_module(results) + results = baseline_resize_module(results) + assert np.equal(results['img'], results_img).all() + + # test CenterCrop when size is tuple + transform = dict(type='CenterCrop', crop_size=(224, 224)) + center_crop_module = build_from_cfg(transform, PIPELINES) + results = reset_results(results, original_img) + results = center_crop_module(results) + assert np.equal(results['img'], results['img2']).all() + assert results['img_shape'] == (224, 224, 3) + + # test CenterCrop when crop_height != crop_width + transform = dict(type='CenterCrop', crop_size=(256, 224)) + center_crop_module = build_from_cfg(transform, PIPELINES) + results = reset_results(results, original_img) + results = center_crop_module(results) + assert np.equal(results['img'], results['img2']).all() + assert results['img_shape'] == (256, 224, 3) + + # test CenterCrop when crop_size is equal to img.shape + img_height, img_width, _ = original_img.shape + transform = dict(type='CenterCrop', crop_size=(img_height, img_width)) + center_crop_module = build_from_cfg(transform, PIPELINES) + results = reset_results(results, original_img) + results = center_crop_module(results) + assert np.equal(results['img'], results['img2']).all() + assert results['img_shape'] == (img_height, img_width, 3) + + # test CenterCrop when crop_size is larger than img.shape + transform = dict( + type='CenterCrop', crop_size=(img_height * 2, img_width * 2)) + center_crop_module = build_from_cfg(transform, PIPELINES) + results = reset_results(results, original_img) + results = center_crop_module(results) + assert np.equal(results['img'], results['img2']).all() + assert results['img_shape'] == (img_height, img_width, 3) + + # test CenterCrop when crop_width is smaller than img_width + transform = dict(type='CenterCrop', crop_size=(img_height, img_width / 2)) + center_crop_module = build_from_cfg(transform, PIPELINES) + results = reset_results(results, original_img) + results = center_crop_module(results) + assert np.equal(results['img'], results['img2']).all() + assert results['img_shape'] == (img_height, img_width / 2, 3) + + # test CenterCrop when crop_height is smaller than img_height + transform = dict(type='CenterCrop', crop_size=(img_height / 2, img_width)) + center_crop_module = build_from_cfg(transform, PIPELINES) + results = reset_results(results, original_img) + results = center_crop_module(results) + assert np.equal(results['img'], results['img2']).all() + assert results['img_shape'] == (img_height / 2, img_width, 3) + + # compare results with torchvision + transform = dict(type='CenterCrop', crop_size=224) + center_crop_module = build_from_cfg(transform, PIPELINES) + results = reset_results(results, original_img) + results = center_crop_module(results) + center_crop_module = transforms.CenterCrop(size=224) + pil_img = Image.fromarray(original_img) + cropped_img = center_crop_module(pil_img) + cropped_img = np.array(cropped_img) + assert np.equal(results['img'], results['img2']).all() + assert np.equal(results['img'], cropped_img).all() + + +def test_normalize(): + img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True) + + # test repr + transform = dict(type='Normalize', **img_norm_cfg) + normalize_module = build_from_cfg(transform, PIPELINES) + assert isinstance(repr(normalize_module), str) + + # read data + results = dict() + img = mmcv.imread( + osp.join(osp.dirname(__file__), '../../data/color.jpg'), 'color') + original_img = copy.deepcopy(img) + results['img'] = img + results['img2'] = copy.deepcopy(img) + results['img_shape'] = img.shape + results['ori_shape'] = img.shape + results['img_fields'] = ['img', 'img2'] + + norm_results = normalize_module(results) + assert np.equal(norm_results['img'], norm_results['img2']).all() + + # compare results with manual computation + mean = np.array(img_norm_cfg['mean']) + std = np.array(img_norm_cfg['std']) + normalized_img = (original_img[..., ::-1] - mean) / std + assert np.allclose(norm_results['img'], normalized_img) + + # compare results with torchvision + normalize_module = transforms.Normalize(mean=mean, std=std) + tensor_img = original_img[..., ::-1].copy() + tensor_img = torch.Tensor(tensor_img.transpose(2, 0, 1)) + normalized_img = normalize_module(tensor_img) + normalized_img = np.array(normalized_img).transpose(1, 2, 0) + assert np.equal(norm_results['img'], normalized_img).all() + + +def test_randomcrop(): + ori_img = mmcv.imread( + osp.join(osp.dirname(__file__), '../../data/color.jpg'), 'color') + ori_img_pil = Image.open( + osp.join(osp.dirname(__file__), '../../data/color.jpg')) + seed = random.randint(0, 100) + + # test crop size is int + kwargs = dict(size=200, padding=0, pad_if_needed=True, fill=0) + random.seed(seed) + np.random.seed(seed) + aug = [] + aug.extend([torchvision.transforms.RandomCrop(**kwargs)]) + composed_transform = Compose(aug) + baseline = composed_transform(ori_img_pil) + + kwargs = dict(size=200, padding=0, pad_if_needed=True, pad_val=0) + random.seed(seed) + np.random.seed(seed) + aug = [] + aug.extend([mmcls_transforms.RandomCrop(**kwargs)]) + composed_transform = Compose(aug) + + # test __repr__() + print(composed_transform) + results = dict() + results['img'] = ori_img + img = composed_transform(results)['img'] + assert np.array(img).shape == (200, 200, 3) + assert np.array(baseline).shape == (200, 200, 3) + nonzero = len((ori_img - np.array(ori_img_pil)[:, :, ::-1]).nonzero()) + nonzero_transform = len((img - np.array(baseline)[:, :, ::-1]).nonzero()) + assert nonzero == nonzero_transform + + # test crop size < image size + kwargs = dict(size=(200, 300), padding=0, pad_if_needed=True, fill=0) + random.seed(seed) + np.random.seed(seed) + aug = [] + aug.extend([torchvision.transforms.RandomCrop(**kwargs)]) + composed_transform = Compose(aug) + baseline = composed_transform(ori_img_pil) + + kwargs = dict(size=(200, 300), padding=0, pad_if_needed=True, pad_val=0) + random.seed(seed) + np.random.seed(seed) + aug = [] + aug.extend([mmcls_transforms.RandomCrop(**kwargs)]) + composed_transform = Compose(aug) + results = dict() + results['img'] = ori_img + img = composed_transform(results)['img'] + assert np.array(img).shape == (200, 300, 3) + assert np.array(baseline).shape == (200, 300, 3) + nonzero = len((ori_img - np.array(ori_img_pil)[:, :, ::-1]).nonzero()) + nonzero_transform = len((img - np.array(baseline)[:, :, ::-1]).nonzero()) + assert nonzero == nonzero_transform + + # test crop size > image size + kwargs = dict(size=(600, 700), padding=0, pad_if_needed=True, fill=0) + random.seed(seed) + np.random.seed(seed) + aug = [] + aug.extend([torchvision.transforms.RandomCrop(**kwargs)]) + composed_transform = Compose(aug) + baseline = composed_transform(ori_img_pil) + + kwargs = dict(size=(600, 700), padding=0, pad_if_needed=True, pad_val=0) + random.seed(seed) + np.random.seed(seed) + aug = [] + aug.extend([mmcls_transforms.RandomCrop(**kwargs)]) + composed_transform = Compose(aug) + results = dict() + results['img'] = ori_img + img = composed_transform(results)['img'] + assert np.array(img).shape == (600, 700, 3) + assert np.array(baseline).shape == (600, 700, 3) + nonzero = len((ori_img - np.array(ori_img_pil)[:, :, ::-1]).nonzero()) + nonzero_transform = len((img - np.array(baseline)[:, :, ::-1]).nonzero()) + assert nonzero == nonzero_transform + + # test crop size == image size + kwargs = dict( + size=(ori_img.shape[0], ori_img.shape[1]), + padding=0, + pad_if_needed=True, + fill=0) + random.seed(seed) + np.random.seed(seed) + aug = [] + aug.extend([torchvision.transforms.RandomCrop(**kwargs)]) + composed_transform = Compose(aug) + baseline = composed_transform(ori_img_pil) + + kwargs = dict( + size=(ori_img.shape[0], ori_img.shape[1]), + padding=0, + pad_if_needed=True, + pad_val=0) + random.seed(seed) + np.random.seed(seed) + aug = [] + aug.extend([mmcls_transforms.RandomCrop(**kwargs)]) + composed_transform = Compose(aug) + results = dict() + results['img'] = ori_img + img = composed_transform(results)['img'] + + assert np.array(img).shape == (img.shape[0], img.shape[1], 3) + assert np.array(baseline).shape == (img.shape[0], img.shape[1], 3) + nonzero = len((ori_img - np.array(ori_img_pil)[:, :, ::-1]).nonzero()) + nonzero_transform = len((img - np.array(baseline)[:, :, ::-1]).nonzero()) + assert nonzero == nonzero_transform + assert_array_equal(ori_img, img) + assert_array_equal(np.array(baseline), np.array(ori_img_pil)) + + # test different padding mode + for mode in ['constant', 'edge', 'reflect', 'symmetric']: + kwargs = dict(size=(500, 600), padding=0, pad_if_needed=True, fill=0) + kwargs['padding_mode'] = mode + random.seed(seed) + np.random.seed(seed) + aug = [] + aug.extend([torchvision.transforms.RandomCrop(**kwargs)]) + composed_transform = Compose(aug) + baseline = composed_transform(ori_img_pil) + + kwargs = dict( + size=(500, 600), padding=0, pad_if_needed=True, pad_val=0) + random.seed(seed) + np.random.seed(seed) + aug = [] + aug.extend([mmcls_transforms.RandomCrop(**kwargs)]) + composed_transform = Compose(aug) + results = dict() + results['img'] = ori_img + img = composed_transform(results)['img'] + assert np.array(img).shape == (500, 600, 3) + assert np.array(baseline).shape == (500, 600, 3) + nonzero = len((ori_img - np.array(ori_img_pil)[:, :, ::-1]).nonzero()) + nonzero_transform = len( + (img - np.array(baseline)[:, :, ::-1]).nonzero()) + assert nonzero == nonzero_transform + + +def test_randomresizedcrop(): + ori_img = mmcv.imread( + osp.join(osp.dirname(__file__), '../../data/color.jpg'), 'color') + ori_img_pil = Image.open( + osp.join(osp.dirname(__file__), '../../data/color.jpg')) + + seed = random.randint(0, 100) + + # test when scale is not of kind (min, max) + with pytest.raises(ValueError): + kwargs = dict( + size=(200, 300), scale=(1.0, 0.08), ratio=(3. / 4., 4. / 3.)) + aug = [] + aug.extend([mmcls_transforms.RandomResizedCrop(**kwargs)]) + composed_transform = Compose(aug) + results = dict() + results['img'] = ori_img + composed_transform(results)['img'] + + # test when ratio is not of kind (min, max) + with pytest.raises(ValueError): + kwargs = dict( + size=(200, 300), scale=(0.08, 1.0), ratio=(4. / 3., 3. / 4.)) + aug = [] + aug.extend([mmcls_transforms.RandomResizedCrop(**kwargs)]) + composed_transform = Compose(aug) + results = dict() + results['img'] = ori_img + composed_transform(results)['img'] + + # test when efficientnet_style is True and crop_padding < 0 + with pytest.raises(AssertionError): + kwargs = dict(size=200, efficientnet_style=True, crop_padding=-1) + aug = [] + aug.extend([mmcls_transforms.RandomResizedCrop(**kwargs)]) + composed_transform = Compose(aug) + results = dict() + results['img'] = ori_img + composed_transform(results)['img'] + + # test crop size is int + kwargs = dict(size=200, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.)) + random.seed(seed) + np.random.seed(seed) + aug = [] + aug.extend([torchvision.transforms.RandomResizedCrop(**kwargs)]) + composed_transform = Compose(aug) + baseline = composed_transform(ori_img_pil) + + random.seed(seed) + np.random.seed(seed) + aug = [] + aug.extend([mmcls_transforms.RandomResizedCrop(**kwargs)]) + composed_transform = Compose(aug) + + # test __repr__() + print(composed_transform) + results = dict() + results['img'] = ori_img + img = composed_transform(results)['img'] + assert np.array(img).shape == (200, 200, 3) + assert np.array(baseline).shape == (200, 200, 3) + nonzero = len((ori_img - np.array(ori_img_pil)[:, :, ::-1]).nonzero()) + nonzero_transform = len((img - np.array(baseline)[:, :, ::-1]).nonzero()) + assert nonzero == nonzero_transform + + # test crop size < image size + kwargs = dict(size=(200, 300), scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.)) + random.seed(seed) + np.random.seed(seed) + aug = [] + aug.extend([torchvision.transforms.RandomResizedCrop(**kwargs)]) + composed_transform = Compose(aug) + baseline = composed_transform(ori_img_pil) + + random.seed(seed) + np.random.seed(seed) + aug = [] + aug.extend([mmcls_transforms.RandomResizedCrop(**kwargs)]) + composed_transform = Compose(aug) + results = dict() + results['img'] = ori_img + img = composed_transform(results)['img'] + assert np.array(img).shape == (200, 300, 3) + assert np.array(baseline).shape == (200, 300, 3) + nonzero = len((ori_img - np.array(ori_img_pil)[:, :, ::-1]).nonzero()) + nonzero_transform = len((img - np.array(baseline)[:, :, ::-1]).nonzero()) + assert nonzero == nonzero_transform + + # test crop size < image size when efficientnet_style = True + kwargs = dict( + size=200, + scale=(0.08, 1.0), + ratio=(3. / 4., 4. / 3.), + efficientnet_style=True) + random.seed(seed) + np.random.seed(seed) + aug = [] + aug.extend([mmcls_transforms.RandomResizedCrop(**kwargs)]) + composed_transform = Compose(aug) + results = dict() + results['img'] = ori_img + img = composed_transform(results)['img'] + assert img.shape == (200, 200, 3) + + # test crop size > image size + kwargs = dict(size=(600, 700), scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.)) + random.seed(seed) + np.random.seed(seed) + aug = [] + aug.extend([torchvision.transforms.RandomResizedCrop(**kwargs)]) + composed_transform = Compose(aug) + baseline = composed_transform(ori_img_pil) + + random.seed(seed) + np.random.seed(seed) + aug = [] + aug.extend([mmcls_transforms.RandomResizedCrop(**kwargs)]) + composed_transform = Compose(aug) + results = dict() + results['img'] = ori_img + img = composed_transform(results)['img'] + assert np.array(img).shape == (600, 700, 3) + assert np.array(baseline).shape == (600, 700, 3) + nonzero = len((ori_img - np.array(ori_img_pil)[:, :, ::-1]).nonzero()) + nonzero_transform = len((img - np.array(baseline)[:, :, ::-1]).nonzero()) + assert nonzero == nonzero_transform + + # test crop size < image size when efficientnet_style = True + kwargs = dict( + size=600, + scale=(0.08, 1.0), + ratio=(3. / 4., 4. / 3.), + efficientnet_style=True) + random.seed(seed) + np.random.seed(seed) + aug = [] + aug.extend([mmcls_transforms.RandomResizedCrop(**kwargs)]) + composed_transform = Compose(aug) + results = dict() + results['img'] = ori_img + img = composed_transform(results)['img'] + assert img.shape == (600, 600, 3) + + # test cropping the whole image + kwargs = dict( + size=(ori_img.shape[0], ori_img.shape[1]), + scale=(1.0, 2.0), + ratio=(1.0, 2.0)) + random.seed(seed) + np.random.seed(seed) + aug = [] + aug.extend([torchvision.transforms.RandomResizedCrop(**kwargs)]) + composed_transform = Compose(aug) + baseline = composed_transform(ori_img_pil) + + random.seed(seed) + np.random.seed(seed) + aug = [] + aug.extend([mmcls_transforms.RandomResizedCrop(**kwargs)]) + composed_transform = Compose(aug) + results = dict() + results['img'] = ori_img + img = composed_transform(results)['img'] + assert np.array(img).shape == (ori_img.shape[0], ori_img.shape[1], 3) + assert np.array(baseline).shape == (ori_img.shape[0], ori_img.shape[1], 3) + nonzero = len((ori_img - np.array(ori_img_pil)[:, :, ::-1]).nonzero()) + nonzero_transform = len((img - np.array(baseline)[:, :, ::-1]).nonzero()) + assert nonzero == nonzero_transform + # assert_array_equal(ori_img, img) + # assert_array_equal(np.array(ori_img_pil), np.array(baseline)) + + # test central crop when in_ratio < min(ratio) + kwargs = dict( + size=(ori_img.shape[0], ori_img.shape[1]), + scale=(1.0, 2.0), + ratio=(2., 3.)) + random.seed(seed) + np.random.seed(seed) + aug = [] + aug.extend([torchvision.transforms.RandomResizedCrop(**kwargs)]) + composed_transform = Compose(aug) + baseline = composed_transform(ori_img_pil) + + random.seed(seed) + np.random.seed(seed) + aug = [] + aug.extend([mmcls_transforms.RandomResizedCrop(**kwargs)]) + composed_transform = Compose(aug) + results = dict() + results['img'] = ori_img + img = composed_transform(results)['img'] + assert np.array(img).shape == (ori_img.shape[0], ori_img.shape[1], 3) + assert np.array(baseline).shape == (ori_img.shape[0], ori_img.shape[1], 3) + nonzero = len((ori_img - np.array(ori_img_pil)[:, :, ::-1]).nonzero()) + nonzero_transform = len((img - np.array(baseline)[:, :, ::-1]).nonzero()) + assert nonzero == nonzero_transform + + # test central crop when in_ratio > max(ratio) + kwargs = dict( + size=(ori_img.shape[0], ori_img.shape[1]), + scale=(1.0, 2.0), + ratio=(3. / 4., 1)) + random.seed(seed) + np.random.seed(seed) + aug = [] + aug.extend([torchvision.transforms.RandomResizedCrop(**kwargs)]) + composed_transform = Compose(aug) + baseline = composed_transform(ori_img_pil) + + random.seed(seed) + np.random.seed(seed) + aug = [] + aug.extend([mmcls_transforms.RandomResizedCrop(**kwargs)]) + composed_transform = Compose(aug) + results = dict() + results['img'] = ori_img + img = composed_transform(results)['img'] + assert np.array(img).shape == (ori_img.shape[0], ori_img.shape[1], 3) + assert np.array(baseline).shape == (ori_img.shape[0], ori_img.shape[1], 3) + nonzero = len((ori_img - np.array(ori_img_pil)[:, :, ::-1]).nonzero()) + nonzero_transform = len((img - np.array(baseline)[:, :, ::-1]).nonzero()) + assert nonzero == nonzero_transform + + # test central crop when max_attempts = 0 and efficientnet_style = True + kwargs = dict( + size=200, + scale=(0.08, 1.0), + ratio=(3. / 4., 4. / 3.), + efficientnet_style=True, + max_attempts=0, + crop_padding=32) + random.seed(seed) + np.random.seed(seed) + aug = [] + aug.extend([mmcls_transforms.RandomResizedCrop(**kwargs)]) + composed_transform = Compose(aug) + results = dict() + results['img'] = ori_img + img = composed_transform(results)['img'] + + kwargs = dict(crop_size=200, efficientnet_style=True, crop_padding=32) + resize_kwargs = dict(size=200) + random.seed(seed) + np.random.seed(seed) + aug = [] + aug.extend([mmcls_transforms.CenterCrop(**kwargs)]) + aug.extend([mmcls_transforms.Resize(**resize_kwargs)]) + composed_transform = Compose(aug) + results = dict() + results['img'] = ori_img + baseline = composed_transform(results)['img'] + + assert img.shape == baseline.shape + assert np.equal(img, baseline).all() + + # test central crop when max_attempts = 0 and efficientnet_style = True + kwargs = dict( + size=200, + scale=(0.08, 1.0), + ratio=(3. / 4., 4. / 3.), + efficientnet_style=True, + max_attempts=100, + min_covered=1) + random.seed(seed) + np.random.seed(seed) + aug = [] + aug.extend([mmcls_transforms.RandomResizedCrop(**kwargs)]) + composed_transform = Compose(aug) + results = dict() + results['img'] = ori_img + img = composed_transform(results)['img'] + + kwargs = dict(crop_size=200, efficientnet_style=True, crop_padding=32) + resize_kwargs = dict(size=200) + random.seed(seed) + np.random.seed(seed) + aug = [] + aug.extend([mmcls_transforms.CenterCrop(**kwargs)]) + aug.extend([mmcls_transforms.Resize(**resize_kwargs)]) + composed_transform = Compose(aug) + results = dict() + results['img'] = ori_img + baseline = composed_transform(results)['img'] + + assert img.shape == baseline.shape + assert np.equal(img, baseline).all() + + # test different interpolation types + for mode in ['nearest', 'bilinear', 'bicubic', 'area', 'lanczos']: + kwargs = dict( + size=(600, 700), + scale=(0.08, 1.0), + ratio=(3. / 4., 4. / 3.), + interpolation=mode) + aug = [] + aug.extend([mmcls_transforms.RandomResizedCrop(**kwargs)]) + composed_transform = Compose(aug) + results = dict() + results['img'] = ori_img + img = composed_transform(results)['img'] + assert img.shape == (600, 700, 3) + + +def test_randomgrayscale(): + + # test rgb2gray, return the grayscale image with p>1 + in_img = np.random.rand(10, 10, 3).astype(np.float32) + kwargs = dict(gray_prob=2) + + aug = [] + aug.extend([mmcls_transforms.RandomGrayscale(**kwargs)]) + composed_transform = Compose(aug) + print(composed_transform) + results = dict() + results['img'] = in_img + img = composed_transform(results)['img'] + computed_gray = ( + in_img[:, :, 0] * 0.299 + in_img[:, :, 1] * 0.587 + + in_img[:, :, 2] * 0.114) + for i in range(img.shape[2]): + assert_array_almost_equal(img[:, :, i], computed_gray, decimal=4) + assert img.shape == (10, 10, 3) + + # test rgb2gray, return the original image with p=-1 + in_img = np.random.rand(10, 10, 3).astype(np.float32) + kwargs = dict(gray_prob=-1) + + aug = [] + aug.extend([mmcls_transforms.RandomGrayscale(**kwargs)]) + composed_transform = Compose(aug) + results = dict() + results['img'] = in_img + img = composed_transform(results)['img'] + assert_array_equal(img, in_img) + assert img.shape == (10, 10, 3) + + # test image with one channel with our method + # and the function from torchvision + in_img = np.random.rand(10, 10, 1).astype(np.float32) + kwargs = dict(gray_prob=2) + + aug = [] + aug.extend([mmcls_transforms.RandomGrayscale(**kwargs)]) + composed_transform = Compose(aug) + results = dict() + results['img'] = in_img + img = composed_transform(results)['img'] + assert_array_equal(img, in_img) + assert img.shape == (10, 10, 1) + + in_img_pil = Image.fromarray(in_img[:, :, 0], mode='L') + kwargs = dict(p=2) + aug = [] + aug.extend([torchvision.transforms.RandomGrayscale(**kwargs)]) + composed_transform = Compose(aug) + img_pil = composed_transform(in_img_pil) + assert_array_equal(np.array(img_pil), np.array(in_img_pil)) + assert np.array(img_pil).shape == (10, 10) + + +def test_randomflip(): + # test assertion if flip probability is smaller than 0 + with pytest.raises(AssertionError): + transform = dict(type='RandomFlip', flip_prob=-1) + build_from_cfg(transform, PIPELINES) + + # test assertion if flip probability is larger than 1 + with pytest.raises(AssertionError): + transform = dict(type='RandomFlip', flip_prob=2) + build_from_cfg(transform, PIPELINES) + + # test assertion if direction is not horizontal and vertical + with pytest.raises(AssertionError): + transform = dict(type='RandomFlip', direction='random') + build_from_cfg(transform, PIPELINES) + + # test assertion if direction is not lowercase + with pytest.raises(AssertionError): + transform = dict(type='RandomFlip', direction='Horizontal') + build_from_cfg(transform, PIPELINES) + + # read test image + results = dict() + img = mmcv.imread( + osp.join(osp.dirname(__file__), '../../data/color.jpg'), 'color') + original_img = copy.deepcopy(img) + results['img'] = img + results['img2'] = copy.deepcopy(img) + results['img_shape'] = img.shape + results['ori_shape'] = img.shape + results['img_fields'] = ['img', 'img2'] + + def reset_results(results, original_img): + results['img'] = copy.deepcopy(original_img) + results['img2'] = copy.deepcopy(original_img) + results['img_shape'] = original_img.shape + results['ori_shape'] = original_img.shape + return results + + # test RandomFlip when flip_prob is 0 + transform = dict(type='RandomFlip', flip_prob=0) + flip_module = build_from_cfg(transform, PIPELINES) + results = flip_module(results) + assert np.equal(results['img'], original_img).all() + assert np.equal(results['img'], results['img2']).all() + + # test RandomFlip when flip_prob is 1 + transform = dict(type='RandomFlip', flip_prob=1) + flip_module = build_from_cfg(transform, PIPELINES) + results = flip_module(results) + assert np.equal(results['img'], results['img2']).all() + + # compare horizontal flip with torchvision + transform = dict(type='RandomFlip', flip_prob=1, direction='horizontal') + flip_module = build_from_cfg(transform, PIPELINES) + results = reset_results(results, original_img) + results = flip_module(results) + flip_module = transforms.RandomHorizontalFlip(p=1) + pil_img = Image.fromarray(original_img) + flipped_img = flip_module(pil_img) + flipped_img = np.array(flipped_img) + assert np.equal(results['img'], results['img2']).all() + assert np.equal(results['img'], flipped_img).all() + + # compare vertical flip with torchvision + transform = dict(type='RandomFlip', flip_prob=1, direction='vertical') + flip_module = build_from_cfg(transform, PIPELINES) + results = reset_results(results, original_img) + results = flip_module(results) + flip_module = transforms.RandomVerticalFlip(p=1) + pil_img = Image.fromarray(original_img) + flipped_img = flip_module(pil_img) + flipped_img = np.array(flipped_img) + assert np.equal(results['img'], results['img2']).all() + assert np.equal(results['img'], flipped_img).all() + + +def test_random_erasing(): + # test erase_prob assertion + with pytest.raises(AssertionError): + cfg = dict(type='RandomErasing', erase_prob=-1.) + build_from_cfg(cfg, PIPELINES) + with pytest.raises(AssertionError): + cfg = dict(type='RandomErasing', erase_prob=1) + build_from_cfg(cfg, PIPELINES) + + # test area_ratio assertion + with pytest.raises(AssertionError): + cfg = dict(type='RandomErasing', min_area_ratio=-1.) + build_from_cfg(cfg, PIPELINES) + with pytest.raises(AssertionError): + cfg = dict(type='RandomErasing', max_area_ratio=1) + build_from_cfg(cfg, PIPELINES) + with pytest.raises(AssertionError): + # min_area_ratio should be smaller than max_area_ratio + cfg = dict( + type='RandomErasing', min_area_ratio=0.6, max_area_ratio=0.4) + build_from_cfg(cfg, PIPELINES) + + # test aspect_range assertion + with pytest.raises(AssertionError): + cfg = dict(type='RandomErasing', aspect_range='str') + build_from_cfg(cfg, PIPELINES) + with pytest.raises(AssertionError): + cfg = dict(type='RandomErasing', aspect_range=-1) + build_from_cfg(cfg, PIPELINES) + with pytest.raises(AssertionError): + # In aspect_range (min, max), min should be smaller than max. + cfg = dict(type='RandomErasing', aspect_range=[1.6, 0.6]) + build_from_cfg(cfg, PIPELINES) + + # test mode assertion + with pytest.raises(AssertionError): + cfg = dict(type='RandomErasing', mode='unknown') + build_from_cfg(cfg, PIPELINES) + + # test fill_std assertion + with pytest.raises(AssertionError): + cfg = dict(type='RandomErasing', fill_std='unknown') + build_from_cfg(cfg, PIPELINES) + + # test implicit conversion of aspect_range + cfg = dict(type='RandomErasing', aspect_range=0.5) + random_erasing = build_from_cfg(cfg, PIPELINES) + assert random_erasing.aspect_range == (0.5, 2.) + + cfg = dict(type='RandomErasing', aspect_range=2.) + random_erasing = build_from_cfg(cfg, PIPELINES) + assert random_erasing.aspect_range == (0.5, 2.) + + # test implicit conversion of fill_color + cfg = dict(type='RandomErasing', fill_color=15) + random_erasing = build_from_cfg(cfg, PIPELINES) + assert random_erasing.fill_color == [15, 15, 15] + + # test implicit conversion of fill_std + cfg = dict(type='RandomErasing', fill_std=0.5) + random_erasing = build_from_cfg(cfg, PIPELINES) + assert random_erasing.fill_std == [0.5, 0.5, 0.5] + + # test when erase_prob=0. + results = construct_toy_data() + cfg = dict( + type='RandomErasing', + erase_prob=0., + mode='const', + fill_color=(255, 255, 255)) + random_erasing = build_from_cfg(cfg, PIPELINES) + results = random_erasing(results) + np.testing.assert_array_equal(results['img'], results['ori_img']) + + # test mode 'const' + random.seed(0) + np.random.seed(0) + results = construct_toy_data() + cfg = dict( + type='RandomErasing', + erase_prob=1., + mode='const', + fill_color=(255, 255, 255)) + random_erasing = build_from_cfg(cfg, PIPELINES) + results = random_erasing(results) + + expect_out = np.array([[1, 255, 3, 4], [5, 255, 7, 8], [9, 10, 11, 12]], + dtype=np.uint8) + expect_out = np.stack([expect_out] * 3, axis=-1) + np.testing.assert_array_equal(results['img'], expect_out) + + # test mode 'rand' with normal distribution + random.seed(0) + np.random.seed(0) + results = construct_toy_data() + cfg = dict(type='RandomErasing', erase_prob=1., mode='rand') + random_erasing = build_from_cfg(cfg, PIPELINES) + results = random_erasing(results) + + expect_out = results['ori_img'] + expect_out[:2, 1] = [[159, 98, 76], [14, 69, 122]] + np.testing.assert_array_equal(results['img'], expect_out) + + # test mode 'rand' with uniform distribution + random.seed(0) + np.random.seed(0) + results = construct_toy_data() + cfg = dict( + type='RandomErasing', + erase_prob=1., + mode='rand', + fill_std=(10, 255, 0)) + random_erasing = build_from_cfg(cfg, PIPELINES) + results = random_erasing(results) + + expect_out = results['ori_img'] + expect_out[:2, 1] = [[113, 255, 128], [126, 83, 128]] + np.testing.assert_array_equal(results['img'], expect_out) + + +def test_color_jitter(): + # read test image + results = dict() + img = mmcv.imread( + osp.join(osp.dirname(__file__), '../../data/color.jpg'), 'color') + original_img = copy.deepcopy(img) + results['img'] = img + results['img2'] = copy.deepcopy(img) + results['img_shape'] = img.shape + results['ori_shape'] = img.shape + results['img_fields'] = ['img', 'img2'] + + def reset_results(results, original_img): + results['img'] = copy.deepcopy(original_img) + results['img2'] = copy.deepcopy(original_img) + results['img_shape'] = original_img.shape + results['ori_shape'] = original_img.shape + return results + + transform = dict( + type='ColorJitter', brightness=0., contrast=0., saturation=0.) + colorjitter_module = build_from_cfg(transform, PIPELINES) + results = colorjitter_module(results) + assert np.equal(results['img'], original_img).all() + assert np.equal(results['img'], results['img2']).all() + + results = reset_results(results, original_img) + transform = dict( + type='ColorJitter', brightness=0.3, contrast=0.3, saturation=0.3) + colorjitter_module = build_from_cfg(transform, PIPELINES) + results = colorjitter_module(results) + assert not np.equal(results['img'], original_img).all() + + +def test_lighting(): + # test assertion if eigval or eigvec is wrong type or length + with pytest.raises(AssertionError): + transform = dict(type='Lighting', eigval=1, eigvec=[[1, 0, 0]]) + build_from_cfg(transform, PIPELINES) + with pytest.raises(AssertionError): + transform = dict(type='Lighting', eigval=[1], eigvec=[1, 0, 0]) + build_from_cfg(transform, PIPELINES) + with pytest.raises(AssertionError): + transform = dict( + type='Lighting', eigval=[1, 2], eigvec=[[1, 0, 0], [0, 1]]) + build_from_cfg(transform, PIPELINES) + + # read test image + results = dict() + img = mmcv.imread( + osp.join(osp.dirname(__file__), '../../data/color.jpg'), 'color') + original_img = copy.deepcopy(img) + results['img'] = img + results['img2'] = copy.deepcopy(img) + results['img_shape'] = img.shape + results['ori_shape'] = img.shape + results['img_fields'] = ['img', 'img2'] + + def reset_results(results, original_img): + results['img'] = copy.deepcopy(original_img) + results['img2'] = copy.deepcopy(original_img) + results['img_shape'] = original_img.shape + results['ori_shape'] = original_img.shape + return results + + eigval = [0.2175, 0.0188, 0.0045] + eigvec = [[-0.5675, 0.7192, 0.4009], [-0.5808, -0.0045, -0.8140], + [-0.5836, -0.6948, 0.4203]] + transform = dict(type='Lighting', eigval=eigval, eigvec=eigvec) + lightening_module = build_from_cfg(transform, PIPELINES) + results = lightening_module(results) + assert not np.equal(results['img'], results['img2']).all() + assert results['img'].dtype == float + assert results['img2'].dtype == float + + results = reset_results(results, original_img) + transform = dict( + type='Lighting', + eigval=eigval, + eigvec=eigvec, + alphastd=0., + to_rgb=False) + lightening_module = build_from_cfg(transform, PIPELINES) + results = lightening_module(results) + assert np.equal(results['img'], original_img).all() + assert np.equal(results['img'], results['img2']).all() + assert results['img'].dtype == float + assert results['img2'].dtype == float + + +def test_albu_transform(): + results = dict( + img_prefix=osp.join(osp.dirname(__file__), '../../data'), + img_info=dict(filename='color.jpg')) + + # Define simple pipeline + load = dict(type='LoadImageFromFile') + load = build_from_cfg(load, PIPELINES) + + albu_transform = dict( + type='Albu', transforms=[dict(type='ChannelShuffle', p=1)]) + albu_transform = build_from_cfg(albu_transform, PIPELINES) + + normalize = dict(type='Normalize', mean=[0] * 3, std=[0] * 3, to_rgb=True) + normalize = build_from_cfg(normalize, PIPELINES) + + # Execute transforms + results = load(results) + results = albu_transform(results) + results = normalize(results) + + assert results['img'].dtype == np.float32 diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_downstream/test_mmdet_inference.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_downstream/test_mmdet_inference.py new file mode 100644 index 0000000000..ba43113680 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_downstream/test_mmdet_inference.py @@ -0,0 +1,96 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +from mmcv import Config +from mmdet.apis import inference_detector +from mmdet.models import build_detector + +from mmcls.models import (MobileNetV2, MobileNetV3, RegNet, ResNeSt, ResNet, + ResNeXt, SEResNet, SEResNeXt, SwinTransformer) + +backbone_configs = dict( + mobilenetv2=dict( + backbone=dict( + type='mmcls.MobileNetV2', + widen_factor=1.0, + norm_cfg=dict(type='GN', num_groups=2, requires_grad=True), + out_indices=(4, 7)), + out_channels=[96, 1280]), + mobilenetv3=dict( + backbone=dict( + type='mmcls.MobileNetV3', + norm_cfg=dict(type='GN', num_groups=2, requires_grad=True), + out_indices=range(7, 12)), + out_channels=[48, 48, 96, 96, 96]), + regnet=dict( + backbone=dict(type='mmcls.RegNet', arch='regnetx_400mf'), + out_channels=384), + resnext=dict( + backbone=dict( + type='mmcls.ResNeXt', depth=50, groups=32, width_per_group=4), + out_channels=2048), + resnet=dict( + backbone=dict(type='mmcls.ResNet', depth=50), out_channels=2048), + seresnet=dict( + backbone=dict(type='mmcls.SEResNet', depth=50), out_channels=2048), + seresnext=dict( + backbone=dict( + type='mmcls.SEResNeXt', depth=50, groups=32, width_per_group=4), + out_channels=2048), + resnest=dict( + backbone=dict( + type='mmcls.ResNeSt', + depth=50, + radix=2, + reduction_factor=4, + out_indices=(0, 1, 2, 3)), + out_channels=[256, 512, 1024, 2048]), + swin=dict( + backbone=dict( + type='mmcls.SwinTransformer', + arch='small', + drop_path_rate=0.2, + img_size=800, + out_indices=(2, 3), + auto_pad=True), + out_channels=[384, 768])) + +module_mapping = { + 'mobilenetv2': MobileNetV2, + 'mobilenetv3': MobileNetV3, + 'regnet': RegNet, + 'resnext': ResNeXt, + 'resnet': ResNet, + 'seresnext': SEResNeXt, + 'seresnet': SEResNet, + 'resnest': ResNeSt, + 'swin': SwinTransformer +} + + +def test_mmdet_inference(): + config_path = './tests/data/retinanet.py' + rng = np.random.RandomState(0) + img1 = rng.rand(100, 100, 3) + + for module_name, backbone_config in backbone_configs.items(): + config = Config.fromfile(config_path) + config.model.backbone = backbone_config['backbone'] + out_channels = backbone_config['out_channels'] + if isinstance(out_channels, int): + config.model.neck = None + config.model.bbox_head.in_channels = out_channels + anchor_generator = config.model.bbox_head.anchor_generator + anchor_generator.strides = anchor_generator.strides[:1] + else: + config.model.neck.in_channels = out_channels + + model = build_detector(config.model) + module = module_mapping[module_name] + assert isinstance(model.backbone, module) + + model.cfg = config + + model.eval() + print(module_name) + result = inference_detector(model, img1) + assert len(result) == config.num_classes diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_metrics/test_losses.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_metrics/test_losses.py new file mode 100644 index 0000000000..cb643d68fc --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_metrics/test_losses.py @@ -0,0 +1,303 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmcls.models import build_loss + + +def test_asymmetric_loss(): + # test asymmetric_loss + cls_score = torch.Tensor([[5, -5, 0], [5, -5, 0]]) + label = torch.Tensor([[1, 0, 1], [0, 1, 0]]) + weight = torch.tensor([0.5, 0.5]) + + loss_cfg = dict( + type='AsymmetricLoss', + gamma_pos=1.0, + gamma_neg=4.0, + clip=0.05, + reduction='mean', + loss_weight=1.0) + loss = build_loss(loss_cfg) + assert torch.allclose(loss(cls_score, label), torch.tensor(3.80845 / 3)) + + # test asymmetric_loss with weight + assert torch.allclose( + loss(cls_score, label, weight=weight), torch.tensor(3.80845 / 6)) + + # test asymmetric_loss without clip + loss_cfg = dict( + type='AsymmetricLoss', + gamma_pos=1.0, + gamma_neg=4.0, + clip=None, + reduction='mean', + loss_weight=1.0) + loss = build_loss(loss_cfg) + assert torch.allclose(loss(cls_score, label), torch.tensor(5.1186 / 3)) + + +def test_cross_entropy_loss(): + with pytest.raises(AssertionError): + # use_sigmoid and use_soft could not be set simultaneously + loss_cfg = dict( + type='CrossEntropyLoss', use_sigmoid=True, use_soft=True) + loss = build_loss(loss_cfg) + + # test ce_loss + cls_score = torch.Tensor([[-1000, 1000], [100, -100]]) + label = torch.Tensor([0, 1]).long() + class_weight = [0.3, 0.7] # class 0 : 0.3, class 1 : 0.7 + weight = torch.tensor([0.6, 0.4]) + + # test ce_loss without class weight + loss_cfg = dict(type='CrossEntropyLoss', reduction='mean', loss_weight=1.0) + loss = build_loss(loss_cfg) + assert torch.allclose(loss(cls_score, label), torch.tensor(1100.)) + # test ce_loss with weight + assert torch.allclose( + loss(cls_score, label, weight=weight), torch.tensor(640.)) + + # test ce_loss with class weight + loss_cfg = dict( + type='CrossEntropyLoss', + reduction='mean', + loss_weight=1.0, + class_weight=class_weight) + loss = build_loss(loss_cfg) + assert torch.allclose(loss(cls_score, label), torch.tensor(370.)) + # test ce_loss with weight + assert torch.allclose( + loss(cls_score, label, weight=weight), torch.tensor(208.)) + + # test bce_loss + cls_score = torch.Tensor([[-200, 100], [500, -1000], [300, -300]]) + label = torch.Tensor([[1, 0], [0, 1], [1, 0]]) + weight = torch.Tensor([0.6, 0.4, 0.5]) + class_weight = [0.1, 0.9] # class 0: 0.1, class 1: 0.9 + + # test bce_loss without class weight + loss_cfg = dict( + type='CrossEntropyLoss', + use_sigmoid=True, + reduction='mean', + loss_weight=1.0) + loss = build_loss(loss_cfg) + assert torch.allclose(loss(cls_score, label), torch.tensor(300.)) + # test ce_loss with weight + assert torch.allclose( + loss(cls_score, label, weight=weight), torch.tensor(130.)) + + # test bce_loss with class weight + loss_cfg = dict( + type='CrossEntropyLoss', + use_sigmoid=True, + reduction='mean', + loss_weight=1.0, + class_weight=class_weight) + loss = build_loss(loss_cfg) + assert torch.allclose(loss(cls_score, label), torch.tensor(176.667)) + # test bce_loss with weight + assert torch.allclose( + loss(cls_score, label, weight=weight), torch.tensor(74.333)) + + # test soft_ce_loss + cls_score = torch.Tensor([[-1000, 1000], [100, -100]]) + label = torch.Tensor([[1.0, 0.0], [0.0, 1.0]]) + class_weight = [0.3, 0.7] # class 0 : 0.3, class 1 : 0.7 + weight = torch.tensor([0.6, 0.4]) + + # test soft_ce_loss without class weight + loss_cfg = dict( + type='CrossEntropyLoss', + use_soft=True, + reduction='mean', + loss_weight=1.0) + loss = build_loss(loss_cfg) + assert torch.allclose(loss(cls_score, label), torch.tensor(1100.)) + # test soft_ce_loss with weight + assert torch.allclose( + loss(cls_score, label, weight=weight), torch.tensor(640.)) + + # test soft_ce_loss with class weight + loss_cfg = dict( + type='CrossEntropyLoss', + use_soft=True, + reduction='mean', + loss_weight=1.0, + class_weight=class_weight) + loss = build_loss(loss_cfg) + assert torch.allclose(loss(cls_score, label), torch.tensor(370.)) + # test soft_ce_loss with weight + assert torch.allclose( + loss(cls_score, label, weight=weight), torch.tensor(208.)) + + +def test_focal_loss(): + # test focal_loss + cls_score = torch.Tensor([[5, -5, 0], [5, -5, 0]]) + label = torch.Tensor([[1, 0, 1], [0, 1, 0]]) + weight = torch.tensor([0.5, 0.5]) + + loss_cfg = dict( + type='FocalLoss', + gamma=2.0, + alpha=0.25, + reduction='mean', + loss_weight=1.0) + loss = build_loss(loss_cfg) + assert torch.allclose(loss(cls_score, label), torch.tensor(0.8522)) + # test focal_loss with weight + assert torch.allclose( + loss(cls_score, label, weight=weight), torch.tensor(0.8522 / 2)) + + +def test_label_smooth_loss(): + # test label_smooth_val assertion + with pytest.raises(AssertionError): + loss_cfg = dict(type='LabelSmoothLoss', label_smooth_val=1.0) + build_loss(loss_cfg) + + with pytest.raises(AssertionError): + loss_cfg = dict(type='LabelSmoothLoss', label_smooth_val='str') + build_loss(loss_cfg) + + # test reduction assertion + with pytest.raises(AssertionError): + loss_cfg = dict( + type='LabelSmoothLoss', label_smooth_val=0.1, reduction='unknown') + build_loss(loss_cfg) + + # test mode assertion + with pytest.raises(AssertionError): + loss_cfg = dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='unknown') + build_loss(loss_cfg) + + # test original mode label smooth loss + cls_score = torch.tensor([[1., -1.]]) + label = torch.tensor([0]) + + loss_cfg = dict( + type='LabelSmoothLoss', + label_smooth_val=0.1, + mode='original', + reduction='mean', + loss_weight=1.0) + loss = build_loss(loss_cfg) + correct = 0.2269 # from timm + assert loss(cls_score, label) - correct <= 0.0001 + + # test classy_vision mode label smooth loss + loss_cfg = dict( + type='LabelSmoothLoss', + label_smooth_val=0.1, + mode='classy_vision', + reduction='mean', + loss_weight=1.0) + loss = build_loss(loss_cfg) + correct = 0.2178 # from ClassyVision + assert loss(cls_score, label) - correct <= 0.0001 + + # test multi_label mode label smooth loss + cls_score = torch.tensor([[1., -1., 1]]) + label = torch.tensor([[1, 0, 1]]) + + loss_cfg = dict( + type='LabelSmoothLoss', + label_smooth_val=0.1, + mode='multi_label', + reduction='mean', + loss_weight=1.0) + loss = build_loss(loss_cfg) + smooth_label = torch.tensor([[0.9, 0.1, 0.9]]) + correct = torch.binary_cross_entropy_with_logits(cls_score, + smooth_label).mean() + assert torch.allclose(loss(cls_score, label), correct) + + # test label linear combination smooth loss + cls_score = torch.tensor([[1., -1., 0.]]) + label1 = torch.tensor([[1., 0., 0.]]) + label2 = torch.tensor([[0., 0., 1.]]) + label_mix = label1 * 0.6 + label2 * 0.4 + + loss_cfg = dict( + type='LabelSmoothLoss', + label_smooth_val=0.1, + mode='original', + reduction='mean', + num_classes=3, + loss_weight=1.0) + loss = build_loss(loss_cfg) + smooth_label1 = loss.original_smooth_label(label1) + smooth_label2 = loss.original_smooth_label(label2) + label_smooth_mix = smooth_label1 * 0.6 + smooth_label2 * 0.4 + correct = (-torch.log_softmax(cls_score, -1) * label_smooth_mix).sum() + + assert loss(cls_score, label_mix) - correct <= 0.0001 + + # test label smooth loss with weight + cls_score = torch.tensor([[1., -1.], [1., -1.]]) + label = torch.tensor([0, 1]) + weight = torch.tensor([0.5, 0.5]) + + loss_cfg = dict( + type='LabelSmoothLoss', + reduction='mean', + label_smooth_val=0.1, + loss_weight=1.0) + loss = build_loss(loss_cfg) + assert torch.allclose( + loss(cls_score, label, weight=weight), + loss(cls_score, label) / 2) + + +# migrate from mmdetection with modifications +def test_seesaw_loss(): + # only softmax version of Seesaw Loss is implemented + with pytest.raises(AssertionError): + loss_cfg = dict(type='SeesawLoss', use_sigmoid=True, loss_weight=1.0) + build_loss(loss_cfg) + + # test that cls_score.size(-1) == num_classes + loss_cls_cfg = dict( + type='SeesawLoss', p=0.0, q=0.0, loss_weight=1.0, num_classes=2) + loss_cls = build_loss(loss_cls_cfg) + # the length of fake_pred should be num_classe = 4 + with pytest.raises(AssertionError): + fake_pred = torch.Tensor([[-100, 100, -100]]) + fake_label = torch.Tensor([1]).long() + loss_cls(fake_pred, fake_label) + # the length of fake_pred should be num_classes + 2 = 4 + with pytest.raises(AssertionError): + fake_pred = torch.Tensor([[-100, 100, -100, 100]]) + fake_label = torch.Tensor([1]).long() + loss_cls(fake_pred, fake_label) + + # test the calculation without p and q + loss_cls_cfg = dict( + type='SeesawLoss', p=0.0, q=0.0, loss_weight=1.0, num_classes=2) + loss_cls = build_loss(loss_cls_cfg) + fake_pred = torch.Tensor([[-100, 100]]) + fake_label = torch.Tensor([1]).long() + loss = loss_cls(fake_pred, fake_label) + assert torch.allclose(loss, torch.tensor(0.)) + + # test the calculation with p and without q + loss_cls_cfg = dict( + type='SeesawLoss', p=1.0, q=0.0, loss_weight=1.0, num_classes=2) + loss_cls = build_loss(loss_cls_cfg) + fake_pred = torch.Tensor([[-100, 100]]) + fake_label = torch.Tensor([0]).long() + loss_cls.cum_samples[0] = torch.exp(torch.Tensor([20])) + loss = loss_cls(fake_pred, fake_label) + assert torch.allclose(loss, torch.tensor(180.)) + + # test the calculation with q and without p + loss_cls_cfg = dict( + type='SeesawLoss', p=0.0, q=1.0, loss_weight=1.0, num_classes=2) + loss_cls = build_loss(loss_cls_cfg) + fake_pred = torch.Tensor([[-100, 100]]) + fake_label = torch.Tensor([0]).long() + loss = loss_cls(fake_pred, fake_label) + assert torch.allclose(loss, torch.tensor(200.) + torch.tensor(100.).log()) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_metrics/test_metrics.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_metrics/test_metrics.py new file mode 100644 index 0000000000..8906af8a98 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_metrics/test_metrics.py @@ -0,0 +1,57 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmcls.core import average_performance, mAP + + +def test_mAP(): + target = torch.Tensor([[1, 1, 0, -1], [1, 1, 0, -1], [0, -1, 1, -1], + [0, 1, 0, -1]]) + pred = torch.Tensor([[0.9, 0.8, 0.3, 0.2], [0.1, 0.2, 0.2, 0.1], + [0.7, 0.5, 0.9, 0.3], [0.8, 0.1, 0.1, 0.2]]) + + # target and pred should both be np.ndarray or torch.Tensor + with pytest.raises(TypeError): + target_list = target.tolist() + _ = mAP(pred, target_list) + + # target and pred should be in the same shape + with pytest.raises(AssertionError): + target_shorter = target[:-1] + _ = mAP(pred, target_shorter) + + assert mAP(pred, target) == pytest.approx(68.75, rel=1e-2) + + target_no_difficult = torch.Tensor([[1, 1, 0, 0], [0, 1, 0, 0], + [0, 0, 1, 0], [1, 0, 0, 0]]) + assert mAP(pred, target_no_difficult) == pytest.approx(70.83, rel=1e-2) + + +def test_average_performance(): + target = torch.Tensor([[1, 1, 0, -1], [1, 1, 0, -1], [0, -1, 1, -1], + [0, 1, 0, -1], [0, 1, 0, -1]]) + pred = torch.Tensor([[0.9, 0.8, 0.3, 0.2], [0.1, 0.2, 0.2, 0.1], + [0.7, 0.5, 0.9, 0.3], [0.8, 0.1, 0.1, 0.2], + [0.8, 0.1, 0.1, 0.2]]) + + # target and pred should both be np.ndarray or torch.Tensor + with pytest.raises(TypeError): + target_list = target.tolist() + _ = average_performance(pred, target_list) + + # target and pred should be in the same shape + with pytest.raises(AssertionError): + target_shorter = target[:-1] + _ = average_performance(pred, target_shorter) + + assert average_performance(pred, target) == average_performance( + pred, target, thr=0.5) + assert average_performance(pred, target, thr=0.5, k=2) \ + == average_performance(pred, target, thr=0.5) + assert average_performance( + pred, target, thr=0.3) == pytest.approx( + (31.25, 43.75, 36.46, 33.33, 42.86, 37.50), rel=1e-2) + assert average_performance( + pred, target, k=2) == pytest.approx( + (43.75, 50.00, 46.67, 40.00, 57.14, 47.06), rel=1e-2) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_mobilenet_v2.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_mobilenet_v2.py new file mode 100644 index 0000000000..9ea7557026 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_mobilenet_v2.py @@ -0,0 +1,259 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch +from torch.nn.modules import GroupNorm +from torch.nn.modules.batchnorm import _BatchNorm + +from mmcls.models.backbones import MobileNetV2 +from mmcls.models.backbones.mobilenet_v2 import InvertedResidual + + +def is_block(modules): + """Check if is ResNet building block.""" + if isinstance(modules, (InvertedResidual, )): + return True + return False + + +def is_norm(modules): + """Check if is one of the norms.""" + if isinstance(modules, (GroupNorm, _BatchNorm)): + return True + return False + + +def check_norm_state(modules, train_state): + """Check if norm layer is in correct train state.""" + for mod in modules: + if isinstance(mod, _BatchNorm): + if mod.training != train_state: + return False + return True + + +def test_mobilenetv2_invertedresidual(): + + with pytest.raises(AssertionError): + # stride must be in [1, 2] + InvertedResidual(16, 24, stride=3, expand_ratio=6) + + # Test InvertedResidual with checkpoint forward, stride=1 + block = InvertedResidual(16, 24, stride=1, expand_ratio=6) + x = torch.randn(1, 16, 56, 56) + x_out = block(x) + assert x_out.shape == torch.Size((1, 24, 56, 56)) + + # Test InvertedResidual with expand_ratio=1 + block = InvertedResidual(16, 16, stride=1, expand_ratio=1) + assert len(block.conv) == 2 + + # Test InvertedResidual with use_res_connect + block = InvertedResidual(16, 16, stride=1, expand_ratio=6) + x = torch.randn(1, 16, 56, 56) + x_out = block(x) + assert block.use_res_connect is True + assert x_out.shape == torch.Size((1, 16, 56, 56)) + + # Test InvertedResidual with checkpoint forward, stride=2 + block = InvertedResidual(16, 24, stride=2, expand_ratio=6) + x = torch.randn(1, 16, 56, 56) + x_out = block(x) + assert x_out.shape == torch.Size((1, 24, 28, 28)) + + # Test InvertedResidual with checkpoint forward + block = InvertedResidual(16, 24, stride=1, expand_ratio=6, with_cp=True) + assert block.with_cp + x = torch.randn(1, 16, 56, 56) + x_out = block(x) + assert x_out.shape == torch.Size((1, 24, 56, 56)) + + # Test InvertedResidual with act_cfg=dict(type='ReLU') + block = InvertedResidual( + 16, 24, stride=1, expand_ratio=6, act_cfg=dict(type='ReLU')) + x = torch.randn(1, 16, 56, 56) + x_out = block(x) + assert x_out.shape == torch.Size((1, 24, 56, 56)) + + +def test_mobilenetv2_backbone(): + with pytest.raises(TypeError): + # pretrained must be a string path + model = MobileNetV2() + model.init_weights(pretrained=0) + + with pytest.raises(ValueError): + # frozen_stages must in range(-1, 8) + MobileNetV2(frozen_stages=8) + + with pytest.raises(ValueError): + # out_indices in range(0, 8) + MobileNetV2(out_indices=[8]) + + # Test MobileNetV2 with first stage frozen + frozen_stages = 1 + model = MobileNetV2(frozen_stages=frozen_stages) + model.init_weights() + model.train() + + for mod in model.conv1.modules(): + for param in mod.parameters(): + assert param.requires_grad is False + for i in range(1, frozen_stages + 1): + layer = getattr(model, f'layer{i}') + for mod in layer.modules(): + if isinstance(mod, _BatchNorm): + assert mod.training is False + for param in layer.parameters(): + assert param.requires_grad is False + + # Test MobileNetV2 with norm_eval=True + model = MobileNetV2(norm_eval=True) + model.init_weights() + model.train() + + assert check_norm_state(model.modules(), False) + + # Test MobileNetV2 forward with widen_factor=1.0 + model = MobileNetV2(widen_factor=1.0, out_indices=range(0, 8)) + model.init_weights() + model.train() + + assert check_norm_state(model.modules(), True) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 8 + assert feat[0].shape == torch.Size((1, 16, 112, 112)) + assert feat[1].shape == torch.Size((1, 24, 56, 56)) + assert feat[2].shape == torch.Size((1, 32, 28, 28)) + assert feat[3].shape == torch.Size((1, 64, 14, 14)) + assert feat[4].shape == torch.Size((1, 96, 14, 14)) + assert feat[5].shape == torch.Size((1, 160, 7, 7)) + assert feat[6].shape == torch.Size((1, 320, 7, 7)) + assert feat[7].shape == torch.Size((1, 1280, 7, 7)) + + # Test MobileNetV2 forward with widen_factor=0.5 + model = MobileNetV2(widen_factor=0.5, out_indices=range(0, 7)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 7 + assert feat[0].shape == torch.Size((1, 8, 112, 112)) + assert feat[1].shape == torch.Size((1, 16, 56, 56)) + assert feat[2].shape == torch.Size((1, 16, 28, 28)) + assert feat[3].shape == torch.Size((1, 32, 14, 14)) + assert feat[4].shape == torch.Size((1, 48, 14, 14)) + assert feat[5].shape == torch.Size((1, 80, 7, 7)) + assert feat[6].shape == torch.Size((1, 160, 7, 7)) + + # Test MobileNetV2 forward with widen_factor=2.0 + model = MobileNetV2(widen_factor=2.0) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 1 + assert feat[0].shape == torch.Size((1, 2560, 7, 7)) + + # Test MobileNetV2 forward with out_indices=None + model = MobileNetV2(widen_factor=1.0) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 1 + assert feat[0].shape == torch.Size((1, 1280, 7, 7)) + + # Test MobileNetV2 forward with dict(type='ReLU') + model = MobileNetV2( + widen_factor=1.0, act_cfg=dict(type='ReLU'), out_indices=range(0, 7)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 7 + assert feat[0].shape == torch.Size((1, 16, 112, 112)) + assert feat[1].shape == torch.Size((1, 24, 56, 56)) + assert feat[2].shape == torch.Size((1, 32, 28, 28)) + assert feat[3].shape == torch.Size((1, 64, 14, 14)) + assert feat[4].shape == torch.Size((1, 96, 14, 14)) + assert feat[5].shape == torch.Size((1, 160, 7, 7)) + assert feat[6].shape == torch.Size((1, 320, 7, 7)) + + # Test MobileNetV2 with BatchNorm forward + model = MobileNetV2(widen_factor=1.0, out_indices=range(0, 7)) + for m in model.modules(): + if is_norm(m): + assert isinstance(m, _BatchNorm) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 7 + assert feat[0].shape == torch.Size((1, 16, 112, 112)) + assert feat[1].shape == torch.Size((1, 24, 56, 56)) + assert feat[2].shape == torch.Size((1, 32, 28, 28)) + assert feat[3].shape == torch.Size((1, 64, 14, 14)) + assert feat[4].shape == torch.Size((1, 96, 14, 14)) + assert feat[5].shape == torch.Size((1, 160, 7, 7)) + assert feat[6].shape == torch.Size((1, 320, 7, 7)) + + # Test MobileNetV2 with GroupNorm forward + model = MobileNetV2( + widen_factor=1.0, + norm_cfg=dict(type='GN', num_groups=2, requires_grad=True), + out_indices=range(0, 7)) + for m in model.modules(): + if is_norm(m): + assert isinstance(m, GroupNorm) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 7 + assert feat[0].shape == torch.Size((1, 16, 112, 112)) + assert feat[1].shape == torch.Size((1, 24, 56, 56)) + assert feat[2].shape == torch.Size((1, 32, 28, 28)) + assert feat[3].shape == torch.Size((1, 64, 14, 14)) + assert feat[4].shape == torch.Size((1, 96, 14, 14)) + assert feat[5].shape == torch.Size((1, 160, 7, 7)) + assert feat[6].shape == torch.Size((1, 320, 7, 7)) + + # Test MobileNetV2 with layers 1, 3, 5 out forward + model = MobileNetV2(widen_factor=1.0, out_indices=(0, 2, 4)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 3 + assert feat[0].shape == torch.Size((1, 16, 112, 112)) + assert feat[1].shape == torch.Size((1, 32, 28, 28)) + assert feat[2].shape == torch.Size((1, 96, 14, 14)) + + # Test MobileNetV2 with checkpoint forward + model = MobileNetV2( + widen_factor=1.0, with_cp=True, out_indices=range(0, 7)) + for m in model.modules(): + if is_block(m): + assert m.with_cp + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 7 + assert feat[0].shape == torch.Size((1, 16, 112, 112)) + assert feat[1].shape == torch.Size((1, 24, 56, 56)) + assert feat[2].shape == torch.Size((1, 32, 28, 28)) + assert feat[3].shape == torch.Size((1, 64, 14, 14)) + assert feat[4].shape == torch.Size((1, 96, 14, 14)) + assert feat[5].shape == torch.Size((1, 160, 7, 7)) + assert feat[6].shape == torch.Size((1, 320, 7, 7)) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_mobilenet_v3.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_mobilenet_v3.py new file mode 100644 index 0000000000..b122dbd769 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_mobilenet_v3.py @@ -0,0 +1,175 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch +from torch.nn.modules import GroupNorm +from torch.nn.modules.batchnorm import _BatchNorm + +from mmcls.models.backbones import MobileNetV3 +from mmcls.models.utils import InvertedResidual + + +def is_norm(modules): + """Check if is one of the norms.""" + if isinstance(modules, (GroupNorm, _BatchNorm)): + return True + return False + + +def check_norm_state(modules, train_state): + """Check if norm layer is in correct train state.""" + for mod in modules: + if isinstance(mod, _BatchNorm): + if mod.training != train_state: + return False + return True + + +def test_mobilenetv3_backbone(): + with pytest.raises(TypeError): + # pretrained must be a string path + model = MobileNetV3() + model.init_weights(pretrained=0) + + with pytest.raises(AssertionError): + # arch must in [small, large] + MobileNetV3(arch='others') + + with pytest.raises(ValueError): + # frozen_stages must less than 13 when arch is small + MobileNetV3(arch='small', frozen_stages=13) + + with pytest.raises(ValueError): + # frozen_stages must less than 17 when arch is large + MobileNetV3(arch='large', frozen_stages=17) + + with pytest.raises(ValueError): + # max out_indices must less than 13 when arch is small + MobileNetV3(arch='small', out_indices=(13, )) + + with pytest.raises(ValueError): + # max out_indices must less than 17 when arch is large + MobileNetV3(arch='large', out_indices=(17, )) + + # Test MobileNetV3 + model = MobileNetV3() + model.init_weights() + model.train() + + # Test MobileNetV3 with first stage frozen + frozen_stages = 1 + model = MobileNetV3(frozen_stages=frozen_stages) + model.init_weights() + model.train() + for i in range(0, frozen_stages + 1): + layer = getattr(model, f'layer{i}') + for mod in layer.modules(): + if isinstance(mod, _BatchNorm): + assert mod.training is False + for param in layer.parameters(): + assert param.requires_grad is False + + # Test MobileNetV3 with norm eval + model = MobileNetV3(norm_eval=True, out_indices=range(0, 12)) + model.init_weights() + model.train() + assert check_norm_state(model.modules(), False) + + # Test MobileNetV3 forward with small arch + model = MobileNetV3(out_indices=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 13 + assert feat[0].shape == torch.Size([1, 16, 112, 112]) + assert feat[1].shape == torch.Size([1, 16, 56, 56]) + assert feat[2].shape == torch.Size([1, 24, 28, 28]) + assert feat[3].shape == torch.Size([1, 24, 28, 28]) + assert feat[4].shape == torch.Size([1, 40, 14, 14]) + assert feat[5].shape == torch.Size([1, 40, 14, 14]) + assert feat[6].shape == torch.Size([1, 40, 14, 14]) + assert feat[7].shape == torch.Size([1, 48, 14, 14]) + assert feat[8].shape == torch.Size([1, 48, 14, 14]) + assert feat[9].shape == torch.Size([1, 96, 7, 7]) + assert feat[10].shape == torch.Size([1, 96, 7, 7]) + assert feat[11].shape == torch.Size([1, 96, 7, 7]) + assert feat[12].shape == torch.Size([1, 576, 7, 7]) + + # Test MobileNetV3 forward with small arch and GroupNorm + model = MobileNetV3( + out_indices=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12), + norm_cfg=dict(type='GN', num_groups=2, requires_grad=True)) + for m in model.modules(): + if is_norm(m): + assert isinstance(m, GroupNorm) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 13 + assert feat[0].shape == torch.Size([1, 16, 112, 112]) + assert feat[1].shape == torch.Size([1, 16, 56, 56]) + assert feat[2].shape == torch.Size([1, 24, 28, 28]) + assert feat[3].shape == torch.Size([1, 24, 28, 28]) + assert feat[4].shape == torch.Size([1, 40, 14, 14]) + assert feat[5].shape == torch.Size([1, 40, 14, 14]) + assert feat[6].shape == torch.Size([1, 40, 14, 14]) + assert feat[7].shape == torch.Size([1, 48, 14, 14]) + assert feat[8].shape == torch.Size([1, 48, 14, 14]) + assert feat[9].shape == torch.Size([1, 96, 7, 7]) + assert feat[10].shape == torch.Size([1, 96, 7, 7]) + assert feat[11].shape == torch.Size([1, 96, 7, 7]) + assert feat[12].shape == torch.Size([1, 576, 7, 7]) + + # Test MobileNetV3 forward with large arch + model = MobileNetV3( + arch='large', + out_indices=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 17 + assert feat[0].shape == torch.Size([1, 16, 112, 112]) + assert feat[1].shape == torch.Size([1, 16, 112, 112]) + assert feat[2].shape == torch.Size([1, 24, 56, 56]) + assert feat[3].shape == torch.Size([1, 24, 56, 56]) + assert feat[4].shape == torch.Size([1, 40, 28, 28]) + assert feat[5].shape == torch.Size([1, 40, 28, 28]) + assert feat[6].shape == torch.Size([1, 40, 28, 28]) + assert feat[7].shape == torch.Size([1, 80, 14, 14]) + assert feat[8].shape == torch.Size([1, 80, 14, 14]) + assert feat[9].shape == torch.Size([1, 80, 14, 14]) + assert feat[10].shape == torch.Size([1, 80, 14, 14]) + assert feat[11].shape == torch.Size([1, 112, 14, 14]) + assert feat[12].shape == torch.Size([1, 112, 14, 14]) + assert feat[13].shape == torch.Size([1, 160, 7, 7]) + assert feat[14].shape == torch.Size([1, 160, 7, 7]) + assert feat[15].shape == torch.Size([1, 160, 7, 7]) + assert feat[16].shape == torch.Size([1, 960, 7, 7]) + + # Test MobileNetV3 forward with large arch + model = MobileNetV3(arch='large', out_indices=(0, )) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 1 + assert feat[0].shape == torch.Size([1, 16, 112, 112]) + + # Test MobileNetV3 with checkpoint forward + model = MobileNetV3(with_cp=True) + for m in model.modules(): + if isinstance(m, InvertedResidual): + assert m.with_cp + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 1 + assert feat[0].shape == torch.Size([1, 576, 7, 7]) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_regnet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_regnet.py new file mode 100644 index 0000000000..67de1c8733 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_regnet.py @@ -0,0 +1,94 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmcls.models.backbones import RegNet + +regnet_test_data = [ + ('regnetx_400mf', + dict(w0=24, wa=24.48, wm=2.54, group_w=16, depth=22, + bot_mul=1.0), [32, 64, 160, 384]), + ('regnetx_800mf', + dict(w0=56, wa=35.73, wm=2.28, group_w=16, depth=16, + bot_mul=1.0), [64, 128, 288, 672]), + ('regnetx_1.6gf', + dict(w0=80, wa=34.01, wm=2.25, group_w=24, depth=18, + bot_mul=1.0), [72, 168, 408, 912]), + ('regnetx_3.2gf', + dict(w0=88, wa=26.31, wm=2.25, group_w=48, depth=25, + bot_mul=1.0), [96, 192, 432, 1008]), + ('regnetx_4.0gf', + dict(w0=96, wa=38.65, wm=2.43, group_w=40, depth=23, + bot_mul=1.0), [80, 240, 560, 1360]), + ('regnetx_6.4gf', + dict(w0=184, wa=60.83, wm=2.07, group_w=56, depth=17, + bot_mul=1.0), [168, 392, 784, 1624]), + ('regnetx_8.0gf', + dict(w0=80, wa=49.56, wm=2.88, group_w=120, depth=23, + bot_mul=1.0), [80, 240, 720, 1920]), + ('regnetx_12gf', + dict(w0=168, wa=73.36, wm=2.37, group_w=112, depth=19, + bot_mul=1.0), [224, 448, 896, 2240]), +] + + +@pytest.mark.parametrize('arch_name,arch,out_channels', regnet_test_data) +def test_regnet_backbone(arch_name, arch, out_channels): + with pytest.raises(AssertionError): + # ResNeXt depth should be in [50, 101, 152] + RegNet(arch_name + '233') + + # output the last feature map + model = RegNet(arch_name) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 1 + assert isinstance(feat[0], torch.Tensor) + assert feat[0].shape == (1, out_channels[-1], 7, 7) + + # output feature map of all stages + model = RegNet(arch_name, out_indices=(0, 1, 2, 3)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == (1, out_channels[0], 56, 56) + assert feat[1].shape == (1, out_channels[1], 28, 28) + assert feat[2].shape == (1, out_channels[2], 14, 14) + assert feat[3].shape == (1, out_channels[3], 7, 7) + + +@pytest.mark.parametrize('arch_name,arch,out_channels', regnet_test_data) +def test_custom_arch(arch_name, arch, out_channels): + # output the last feature map + model = RegNet(arch) + model.init_weights() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 1 + assert isinstance(feat[0], torch.Tensor) + assert feat[0].shape == (1, out_channels[-1], 7, 7) + + # output feature map of all stages + model = RegNet(arch, out_indices=(0, 1, 2, 3)) + model.init_weights() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == (1, out_channels[0], 56, 56) + assert feat[1].shape == (1, out_channels[1], 28, 28) + assert feat[2].shape == (1, out_channels[2], 14, 14) + assert feat[3].shape == (1, out_channels[3], 7, 7) + + +def test_exception(): + # arch must be a str or dict + with pytest.raises(TypeError): + _ = RegNet(50) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_repvgg.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_repvgg.py new file mode 100644 index 0000000000..0bbf6f31f1 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_repvgg.py @@ -0,0 +1,293 @@ +import os +import tempfile + +import pytest +import torch +from mmcv.runner import load_checkpoint, save_checkpoint +from torch import nn +from torch.nn.modules import GroupNorm +from torch.nn.modules.batchnorm import _BatchNorm + +from mmcls.models.backbones import RepVGG +from mmcls.models.backbones.repvgg import RepVGGBlock +from mmcls.models.utils import SELayer + + +def check_norm_state(modules, train_state): + """Check if norm layer is in correct train state.""" + for mod in modules: + if isinstance(mod, _BatchNorm): + if mod.training != train_state: + return False + return True + + +def is_norm(modules): + """Check if is one of the norms.""" + if isinstance(modules, (GroupNorm, _BatchNorm)): + return True + return False + + +def is_repvgg_block(modules): + if isinstance(modules, RepVGGBlock): + return True + return False + + +def test_repvgg_repvggblock(): + # Test RepVGGBlock with in_channels != out_channels, stride = 1 + block = RepVGGBlock(5, 10, stride=1) + block.eval() + x = torch.randn(1, 5, 16, 16) + x_out_not_deploy = block(x) + assert block.branch_norm is None + assert not hasattr(block, 'branch_reparam') + assert hasattr(block, 'branch_1x1') + assert hasattr(block, 'branch_3x3') + assert hasattr(block, 'branch_norm') + assert block.se_cfg is None + assert x_out_not_deploy.shape == torch.Size((1, 10, 16, 16)) + block.switch_to_deploy() + assert block.deploy is True + x_out_deploy = block(x) + assert x_out_deploy.shape == torch.Size((1, 10, 16, 16)) + assert torch.allclose(x_out_not_deploy, x_out_deploy, atol=1e-5, rtol=1e-4) + + # Test RepVGGBlock with in_channels == out_channels, stride = 1 + block = RepVGGBlock(12, 12, stride=1) + block.eval() + x = torch.randn(1, 12, 8, 8) + x_out_not_deploy = block(x) + assert isinstance(block.branch_norm, nn.BatchNorm2d) + assert not hasattr(block, 'branch_reparam') + assert x_out_not_deploy.shape == torch.Size((1, 12, 8, 8)) + block.switch_to_deploy() + assert block.deploy is True + x_out_deploy = block(x) + assert x_out_deploy.shape == torch.Size((1, 12, 8, 8)) + assert torch.allclose(x_out_not_deploy, x_out_deploy, atol=1e-5, rtol=1e-4) + + # Test RepVGGBlock with in_channels == out_channels, stride = 2 + block = RepVGGBlock(16, 16, stride=2) + block.eval() + x = torch.randn(1, 16, 8, 8) + x_out_not_deploy = block(x) + assert block.branch_norm is None + assert x_out_not_deploy.shape == torch.Size((1, 16, 4, 4)) + block.switch_to_deploy() + assert block.deploy is True + x_out_deploy = block(x) + assert x_out_deploy.shape == torch.Size((1, 16, 4, 4)) + assert torch.allclose(x_out_not_deploy, x_out_deploy, atol=1e-5, rtol=1e-4) + + # Test RepVGGBlock with padding == dilation == 2 + block = RepVGGBlock(14, 14, stride=1, padding=2, dilation=2) + block.eval() + x = torch.randn(1, 14, 16, 16) + x_out_not_deploy = block(x) + assert isinstance(block.branch_norm, nn.BatchNorm2d) + assert x_out_not_deploy.shape == torch.Size((1, 14, 16, 16)) + block.switch_to_deploy() + assert block.deploy is True + x_out_deploy = block(x) + assert x_out_deploy.shape == torch.Size((1, 14, 16, 16)) + assert torch.allclose(x_out_not_deploy, x_out_deploy, atol=1e-5, rtol=1e-4) + + # Test RepVGGBlock with groups = 2 + block = RepVGGBlock(4, 4, stride=1, groups=2) + block.eval() + x = torch.randn(1, 4, 5, 6) + x_out_not_deploy = block(x) + assert x_out_not_deploy.shape == torch.Size((1, 4, 5, 6)) + block.switch_to_deploy() + assert block.deploy is True + x_out_deploy = block(x) + assert x_out_deploy.shape == torch.Size((1, 4, 5, 6)) + assert torch.allclose(x_out_not_deploy, x_out_deploy, atol=1e-5, rtol=1e-4) + + # Test RepVGGBlock with se + se_cfg = dict(ratio=4, divisor=1) + block = RepVGGBlock(18, 18, stride=1, se_cfg=se_cfg) + block.train() + x = torch.randn(1, 18, 5, 5) + x_out_not_deploy = block(x) + assert isinstance(block.se_layer, SELayer) + assert x_out_not_deploy.shape == torch.Size((1, 18, 5, 5)) + + # Test RepVGGBlock with checkpoint forward + block = RepVGGBlock(24, 24, stride=1, with_cp=True) + assert block.with_cp + x = torch.randn(1, 24, 7, 7) + x_out = block(x) + assert x_out.shape == torch.Size((1, 24, 7, 7)) + + # Test RepVGGBlock with deploy == True + block = RepVGGBlock(8, 8, stride=1, deploy=True) + assert isinstance(block.branch_reparam, nn.Conv2d) + assert not hasattr(block, 'branch_3x3') + assert not hasattr(block, 'branch_1x1') + assert not hasattr(block, 'branch_norm') + x = torch.randn(1, 8, 16, 16) + x_out = block(x) + assert x_out.shape == torch.Size((1, 8, 16, 16)) + + +def test_repvgg_backbone(): + with pytest.raises(TypeError): + # arch must be str or dict + RepVGG(arch=[4, 6, 16, 1]) + + with pytest.raises(AssertionError): + # arch must in arch_settings + RepVGG(arch='A3') + + with pytest.raises(KeyError): + # arch must have num_blocks and width_factor + arch = dict(num_blocks=[2, 4, 14, 1]) + RepVGG(arch=arch) + + # len(arch['num_blocks']) == len(arch['width_factor']) + # == len(strides) == len(dilations) + with pytest.raises(AssertionError): + arch = dict(num_blocks=[2, 4, 14, 1], width_factor=[0.75, 0.75, 0.75]) + RepVGG(arch=arch) + + # len(strides) must equal to 4 + with pytest.raises(AssertionError): + RepVGG('A0', strides=(1, 1, 1)) + + # len(dilations) must equal to 4 + with pytest.raises(AssertionError): + RepVGG('A0', strides=(1, 1, 1, 1), dilations=(1, 1, 2)) + + # max(out_indices) < len(arch['num_blocks']) + with pytest.raises(AssertionError): + RepVGG('A0', out_indices=(5, )) + + # max(arch['group_idx'].keys()) <= sum(arch['num_blocks']) + with pytest.raises(AssertionError): + arch = dict( + num_blocks=[2, 4, 14, 1], + width_factor=[0.75, 0.75, 0.75], + group_idx={22: 2}) + RepVGG(arch=arch) + + # Test RepVGG norm state + model = RepVGG('A0') + model.train() + assert check_norm_state(model.modules(), True) + + # Test RepVGG with first stage frozen + frozen_stages = 1 + model = RepVGG('A0', frozen_stages=frozen_stages) + model.train() + for param in model.stem.parameters(): + assert param.requires_grad is False + for i in range(0, frozen_stages): + stage_name = model.stages[i] + stage = model.__getattr__(stage_name) + for mod in stage: + if isinstance(mod, _BatchNorm): + assert mod.training is False + for param in stage.parameters(): + assert param.requires_grad is False + + # Test RepVGG with norm_eval + model = RepVGG('A0', norm_eval=True) + model.train() + assert check_norm_state(model.modules(), False) + + # Test RepVGG forward with layer 3 forward + model = RepVGG('A0', out_indices=(3, )) + model.init_weights() + model.train() + + for m in model.modules(): + if is_norm(m): + assert isinstance(m, _BatchNorm) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert isinstance(feat, tuple) + assert len(feat) == 1 + assert isinstance(feat[0], torch.Tensor) + assert feat[0].shape == torch.Size((1, 1280, 7, 7)) + + # Test RepVGG forward + model_test_settings = [ + dict(model_name='A0', out_sizes=(48, 96, 192, 1280)), + dict(model_name='A1', out_sizes=(64, 128, 256, 1280)), + dict(model_name='A2', out_sizes=(96, 192, 384, 1408)), + dict(model_name='B0', out_sizes=(64, 128, 256, 1280)), + dict(model_name='B1', out_sizes=(128, 256, 512, 2048)), + dict(model_name='B1g2', out_sizes=(128, 256, 512, 2048)), + dict(model_name='B1g4', out_sizes=(128, 256, 512, 2048)), + dict(model_name='B2', out_sizes=(160, 320, 640, 2560)), + dict(model_name='B2g2', out_sizes=(160, 320, 640, 2560)), + dict(model_name='B2g4', out_sizes=(160, 320, 640, 2560)), + dict(model_name='B3', out_sizes=(192, 384, 768, 2560)), + dict(model_name='B3g2', out_sizes=(192, 384, 768, 2560)), + dict(model_name='B3g4', out_sizes=(192, 384, 768, 2560)), + dict(model_name='D2se', out_sizes=(160, 320, 640, 2560)) + ] + + choose_models = ['A0', 'B1', 'B1g2', 'D2se'] + # Test RepVGG model forward + for model_test_setting in model_test_settings: + if model_test_setting['model_name'] not in choose_models: + continue + model = RepVGG( + model_test_setting['model_name'], out_indices=(0, 1, 2, 3)) + model.init_weights() + + # Test Norm + for m in model.modules(): + if is_norm(m): + assert isinstance(m, _BatchNorm) + + model.train() + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert feat[0].shape == torch.Size( + (1, model_test_setting['out_sizes'][0], 56, 56)) + assert feat[1].shape == torch.Size( + (1, model_test_setting['out_sizes'][1], 28, 28)) + assert feat[2].shape == torch.Size( + (1, model_test_setting['out_sizes'][2], 14, 14)) + assert feat[3].shape == torch.Size( + (1, model_test_setting['out_sizes'][3], 7, 7)) + + # Test eval of "train" mode and "deploy" mode + gap = nn.AdaptiveAvgPool2d(output_size=(1)) + fc = nn.Linear(model_test_setting['out_sizes'][3], 10) + model.eval() + feat = model(imgs) + pred = fc(gap(feat[3]).flatten(1)) + model.switch_to_deploy() + for m in model.modules(): + if isinstance(m, RepVGGBlock): + assert m.deploy is True + feat_deploy = model(imgs) + pred_deploy = fc(gap(feat_deploy[3]).flatten(1)) + for i in range(4): + torch.allclose(feat[i], feat_deploy[i]) + torch.allclose(pred, pred_deploy) + + +def test_repvgg_load(): + # Test output before and load from deploy checkpoint + model = RepVGG('A1', out_indices=(0, 1, 2, 3)) + inputs = torch.randn((1, 3, 224, 224)) + ckpt_path = os.path.join(tempfile.gettempdir(), 'ckpt.pth') + model.switch_to_deploy() + model.eval() + outputs = model(inputs) + + model_deploy = RepVGG('A1', out_indices=(0, 1, 2, 3), deploy=True) + save_checkpoint(model, ckpt_path) + load_checkpoint(model_deploy, ckpt_path, strict=True) + + outputs_load = model_deploy(inputs) + for feat, feat_load in zip(outputs, outputs_load): + assert torch.allclose(feat, feat_load) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_res2net.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_res2net.py new file mode 100644 index 0000000000..173d3e628e --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_res2net.py @@ -0,0 +1,71 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch +from mmcv.utils.parrots_wrapper import _BatchNorm + +from mmcls.models.backbones import Res2Net + + +def check_norm_state(modules, train_state): + """Check if norm layer is in correct train state.""" + for mod in modules: + if isinstance(mod, _BatchNorm): + if mod.training != train_state: + return False + return True + + +def test_resnet_cifar(): + # Only support depth 50, 101 and 152 + with pytest.raises(KeyError): + Res2Net(depth=18) + + # test the feature map size when depth is 50 + # and deep_stem=True, avg_down=True + model = Res2Net( + depth=50, out_indices=(0, 1, 2, 3), deep_stem=True, avg_down=True) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model.stem(imgs) + assert feat.shape == (1, 64, 112, 112) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == (1, 256, 56, 56) + assert feat[1].shape == (1, 512, 28, 28) + assert feat[2].shape == (1, 1024, 14, 14) + assert feat[3].shape == (1, 2048, 7, 7) + + # test the feature map size when depth is 101 + # and deep_stem=False, avg_down=False + model = Res2Net( + depth=101, out_indices=(0, 1, 2, 3), deep_stem=False, avg_down=False) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model.conv1(imgs) + assert feat.shape == (1, 64, 112, 112) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == (1, 256, 56, 56) + assert feat[1].shape == (1, 512, 28, 28) + assert feat[2].shape == (1, 1024, 14, 14) + assert feat[3].shape == (1, 2048, 7, 7) + + # Test Res2Net with first stage frozen + frozen_stages = 1 + model = Res2Net(depth=50, frozen_stages=frozen_stages, deep_stem=False) + model.init_weights() + model.train() + assert check_norm_state([model.norm1], False) + for param in model.conv1.parameters(): + assert param.requires_grad is False + for i in range(1, frozen_stages + 1): + layer = getattr(model, f'layer{i}') + for mod in layer.modules(): + if isinstance(mod, _BatchNorm): + assert mod.training is False + for param in layer.parameters(): + assert param.requires_grad is False diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_resnest.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_resnest.py new file mode 100644 index 0000000000..7a0b250ddb --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_resnest.py @@ -0,0 +1,44 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmcls.models.backbones import ResNeSt +from mmcls.models.backbones.resnest import Bottleneck as BottleneckS + + +def test_bottleneck(): + with pytest.raises(AssertionError): + # Style must be in ['pytorch', 'caffe'] + BottleneckS(64, 64, radix=2, reduction_factor=4, style='tensorflow') + + # Test ResNeSt Bottleneck structure + block = BottleneckS( + 64, 256, radix=2, reduction_factor=4, stride=2, style='pytorch') + assert block.avd_layer.stride == 2 + assert block.conv2.channels == 64 + + # Test ResNeSt Bottleneck forward + block = BottleneckS(64, 64, radix=2, reduction_factor=4) + x = torch.randn(2, 64, 56, 56) + x_out = block(x) + assert x_out.shape == torch.Size([2, 64, 56, 56]) + + +def test_resnest(): + with pytest.raises(KeyError): + # ResNeSt depth should be in [50, 101, 152, 200] + ResNeSt(depth=18) + + # Test ResNeSt with radix 2, reduction_factor 4 + model = ResNeSt( + depth=50, radix=2, reduction_factor=4, out_indices=(0, 1, 2, 3)) + model.init_weights() + model.train() + + imgs = torch.randn(2, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == torch.Size([2, 256, 56, 56]) + assert feat[1].shape == torch.Size([2, 512, 28, 28]) + assert feat[2].shape == torch.Size([2, 1024, 14, 14]) + assert feat[3].shape == torch.Size([2, 2048, 7, 7]) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_resnet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_resnet.py new file mode 100644 index 0000000000..a6bdbd613b --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_resnet.py @@ -0,0 +1,566 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch +import torch.nn as nn +from mmcv.cnn import ConvModule +from mmcv.utils.parrots_wrapper import _BatchNorm + +from mmcls.models.backbones import ResNet, ResNetV1d +from mmcls.models.backbones.resnet import (BasicBlock, Bottleneck, ResLayer, + get_expansion) + + +def is_block(modules): + """Check if is ResNet building block.""" + if isinstance(modules, (BasicBlock, Bottleneck)): + return True + return False + + +def all_zeros(modules): + """Check if the weight(and bias) is all zero.""" + weight_zero = torch.equal(modules.weight.data, + torch.zeros_like(modules.weight.data)) + if hasattr(modules, 'bias'): + bias_zero = torch.equal(modules.bias.data, + torch.zeros_like(modules.bias.data)) + else: + bias_zero = True + + return weight_zero and bias_zero + + +def check_norm_state(modules, train_state): + """Check if norm layer is in correct train state.""" + for mod in modules: + if isinstance(mod, _BatchNorm): + if mod.training != train_state: + return False + return True + + +def test_get_expansion(): + assert get_expansion(Bottleneck, 2) == 2 + assert get_expansion(BasicBlock) == 1 + assert get_expansion(Bottleneck) == 4 + + class MyResBlock(nn.Module): + + expansion = 8 + + assert get_expansion(MyResBlock) == 8 + + # expansion must be an integer or None + with pytest.raises(TypeError): + get_expansion(Bottleneck, '0') + + # expansion is not specified and cannot be inferred + with pytest.raises(TypeError): + + class SomeModule(nn.Module): + pass + + get_expansion(SomeModule) + + +def test_basic_block(): + # expansion must be 1 + with pytest.raises(AssertionError): + BasicBlock(64, 64, expansion=2) + + # BasicBlock with stride 1, out_channels == in_channels + block = BasicBlock(64, 64) + assert block.in_channels == 64 + assert block.mid_channels == 64 + assert block.out_channels == 64 + assert block.conv1.in_channels == 64 + assert block.conv1.out_channels == 64 + assert block.conv1.kernel_size == (3, 3) + assert block.conv1.stride == (1, 1) + assert block.conv2.in_channels == 64 + assert block.conv2.out_channels == 64 + assert block.conv2.kernel_size == (3, 3) + x = torch.randn(1, 64, 56, 56) + x_out = block(x) + assert x_out.shape == torch.Size([1, 64, 56, 56]) + + # BasicBlock with stride 1 and downsample + downsample = nn.Sequential( + nn.Conv2d(64, 128, kernel_size=1, bias=False), nn.BatchNorm2d(128)) + block = BasicBlock(64, 128, downsample=downsample) + assert block.in_channels == 64 + assert block.mid_channels == 128 + assert block.out_channels == 128 + assert block.conv1.in_channels == 64 + assert block.conv1.out_channels == 128 + assert block.conv1.kernel_size == (3, 3) + assert block.conv1.stride == (1, 1) + assert block.conv2.in_channels == 128 + assert block.conv2.out_channels == 128 + assert block.conv2.kernel_size == (3, 3) + x = torch.randn(1, 64, 56, 56) + x_out = block(x) + assert x_out.shape == torch.Size([1, 128, 56, 56]) + + # BasicBlock with stride 2 and downsample + downsample = nn.Sequential( + nn.Conv2d(64, 128, kernel_size=1, stride=2, bias=False), + nn.BatchNorm2d(128)) + block = BasicBlock(64, 128, stride=2, downsample=downsample) + assert block.in_channels == 64 + assert block.mid_channels == 128 + assert block.out_channels == 128 + assert block.conv1.in_channels == 64 + assert block.conv1.out_channels == 128 + assert block.conv1.kernel_size == (3, 3) + assert block.conv1.stride == (2, 2) + assert block.conv2.in_channels == 128 + assert block.conv2.out_channels == 128 + assert block.conv2.kernel_size == (3, 3) + x = torch.randn(1, 64, 56, 56) + x_out = block(x) + assert x_out.shape == torch.Size([1, 128, 28, 28]) + + # forward with checkpointing + block = BasicBlock(64, 64, with_cp=True) + assert block.with_cp + x = torch.randn(1, 64, 56, 56, requires_grad=True) + x_out = block(x) + assert x_out.shape == torch.Size([1, 64, 56, 56]) + + +def test_bottleneck(): + # style must be in ['pytorch', 'caffe'] + with pytest.raises(AssertionError): + Bottleneck(64, 64, style='tensorflow') + + # expansion must be divisible by out_channels + with pytest.raises(AssertionError): + Bottleneck(64, 64, expansion=3) + + # Test Bottleneck style + block = Bottleneck(64, 64, stride=2, style='pytorch') + assert block.conv1.stride == (1, 1) + assert block.conv2.stride == (2, 2) + block = Bottleneck(64, 64, stride=2, style='caffe') + assert block.conv1.stride == (2, 2) + assert block.conv2.stride == (1, 1) + + # Bottleneck with stride 1 + block = Bottleneck(64, 64, style='pytorch') + assert block.in_channels == 64 + assert block.mid_channels == 16 + assert block.out_channels == 64 + assert block.conv1.in_channels == 64 + assert block.conv1.out_channels == 16 + assert block.conv1.kernel_size == (1, 1) + assert block.conv2.in_channels == 16 + assert block.conv2.out_channels == 16 + assert block.conv2.kernel_size == (3, 3) + assert block.conv3.in_channels == 16 + assert block.conv3.out_channels == 64 + assert block.conv3.kernel_size == (1, 1) + x = torch.randn(1, 64, 56, 56) + x_out = block(x) + assert x_out.shape == (1, 64, 56, 56) + + # Bottleneck with stride 1 and downsample + downsample = nn.Sequential( + nn.Conv2d(64, 128, kernel_size=1), nn.BatchNorm2d(128)) + block = Bottleneck(64, 128, style='pytorch', downsample=downsample) + assert block.in_channels == 64 + assert block.mid_channels == 32 + assert block.out_channels == 128 + assert block.conv1.in_channels == 64 + assert block.conv1.out_channels == 32 + assert block.conv1.kernel_size == (1, 1) + assert block.conv2.in_channels == 32 + assert block.conv2.out_channels == 32 + assert block.conv2.kernel_size == (3, 3) + assert block.conv3.in_channels == 32 + assert block.conv3.out_channels == 128 + assert block.conv3.kernel_size == (1, 1) + x = torch.randn(1, 64, 56, 56) + x_out = block(x) + assert x_out.shape == (1, 128, 56, 56) + + # Bottleneck with stride 2 and downsample + downsample = nn.Sequential( + nn.Conv2d(64, 128, kernel_size=1, stride=2), nn.BatchNorm2d(128)) + block = Bottleneck( + 64, 128, stride=2, style='pytorch', downsample=downsample) + x = torch.randn(1, 64, 56, 56) + x_out = block(x) + assert x_out.shape == (1, 128, 28, 28) + + # Bottleneck with expansion 2 + block = Bottleneck(64, 64, style='pytorch', expansion=2) + assert block.in_channels == 64 + assert block.mid_channels == 32 + assert block.out_channels == 64 + assert block.conv1.in_channels == 64 + assert block.conv1.out_channels == 32 + assert block.conv1.kernel_size == (1, 1) + assert block.conv2.in_channels == 32 + assert block.conv2.out_channels == 32 + assert block.conv2.kernel_size == (3, 3) + assert block.conv3.in_channels == 32 + assert block.conv3.out_channels == 64 + assert block.conv3.kernel_size == (1, 1) + x = torch.randn(1, 64, 56, 56) + x_out = block(x) + assert x_out.shape == (1, 64, 56, 56) + + # Test Bottleneck with checkpointing + block = Bottleneck(64, 64, with_cp=True) + block.train() + assert block.with_cp + x = torch.randn(1, 64, 56, 56, requires_grad=True) + x_out = block(x) + assert x_out.shape == torch.Size([1, 64, 56, 56]) + + +def test_basicblock_reslayer(): + # 3 BasicBlock w/o downsample + layer = ResLayer(BasicBlock, 3, 32, 32) + assert len(layer) == 3 + for i in range(3): + assert layer[i].in_channels == 32 + assert layer[i].out_channels == 32 + assert layer[i].downsample is None + x = torch.randn(1, 32, 56, 56) + x_out = layer(x) + assert x_out.shape == (1, 32, 56, 56) + + # 3 BasicBlock w/ stride 1 and downsample + layer = ResLayer(BasicBlock, 3, 32, 64) + assert len(layer) == 3 + assert layer[0].in_channels == 32 + assert layer[0].out_channels == 64 + assert layer[0].downsample is not None and len(layer[0].downsample) == 2 + assert isinstance(layer[0].downsample[0], nn.Conv2d) + assert layer[0].downsample[0].stride == (1, 1) + for i in range(1, 3): + assert layer[i].in_channels == 64 + assert layer[i].out_channels == 64 + assert layer[i].downsample is None + x = torch.randn(1, 32, 56, 56) + x_out = layer(x) + assert x_out.shape == (1, 64, 56, 56) + + # 3 BasicBlock w/ stride 2 and downsample + layer = ResLayer(BasicBlock, 3, 32, 64, stride=2) + assert len(layer) == 3 + assert layer[0].in_channels == 32 + assert layer[0].out_channels == 64 + assert layer[0].stride == 2 + assert layer[0].downsample is not None and len(layer[0].downsample) == 2 + assert isinstance(layer[0].downsample[0], nn.Conv2d) + assert layer[0].downsample[0].stride == (2, 2) + for i in range(1, 3): + assert layer[i].in_channels == 64 + assert layer[i].out_channels == 64 + assert layer[i].stride == 1 + assert layer[i].downsample is None + x = torch.randn(1, 32, 56, 56) + x_out = layer(x) + assert x_out.shape == (1, 64, 28, 28) + + # 3 BasicBlock w/ stride 2 and downsample with avg pool + layer = ResLayer(BasicBlock, 3, 32, 64, stride=2, avg_down=True) + assert len(layer) == 3 + assert layer[0].in_channels == 32 + assert layer[0].out_channels == 64 + assert layer[0].stride == 2 + assert layer[0].downsample is not None and len(layer[0].downsample) == 3 + assert isinstance(layer[0].downsample[0], nn.AvgPool2d) + assert layer[0].downsample[0].stride == 2 + for i in range(1, 3): + assert layer[i].in_channels == 64 + assert layer[i].out_channels == 64 + assert layer[i].stride == 1 + assert layer[i].downsample is None + x = torch.randn(1, 32, 56, 56) + x_out = layer(x) + assert x_out.shape == (1, 64, 28, 28) + + +def test_bottleneck_reslayer(): + # 3 Bottleneck w/o downsample + layer = ResLayer(Bottleneck, 3, 32, 32) + assert len(layer) == 3 + for i in range(3): + assert layer[i].in_channels == 32 + assert layer[i].out_channels == 32 + assert layer[i].downsample is None + x = torch.randn(1, 32, 56, 56) + x_out = layer(x) + assert x_out.shape == (1, 32, 56, 56) + + # 3 Bottleneck w/ stride 1 and downsample + layer = ResLayer(Bottleneck, 3, 32, 64) + assert len(layer) == 3 + assert layer[0].in_channels == 32 + assert layer[0].out_channels == 64 + assert layer[0].stride == 1 + assert layer[0].conv1.out_channels == 16 + assert layer[0].downsample is not None and len(layer[0].downsample) == 2 + assert isinstance(layer[0].downsample[0], nn.Conv2d) + assert layer[0].downsample[0].stride == (1, 1) + for i in range(1, 3): + assert layer[i].in_channels == 64 + assert layer[i].out_channels == 64 + assert layer[i].conv1.out_channels == 16 + assert layer[i].stride == 1 + assert layer[i].downsample is None + x = torch.randn(1, 32, 56, 56) + x_out = layer(x) + assert x_out.shape == (1, 64, 56, 56) + + # 3 Bottleneck w/ stride 2 and downsample + layer = ResLayer(Bottleneck, 3, 32, 64, stride=2) + assert len(layer) == 3 + assert layer[0].in_channels == 32 + assert layer[0].out_channels == 64 + assert layer[0].stride == 2 + assert layer[0].conv1.out_channels == 16 + assert layer[0].downsample is not None and len(layer[0].downsample) == 2 + assert isinstance(layer[0].downsample[0], nn.Conv2d) + assert layer[0].downsample[0].stride == (2, 2) + for i in range(1, 3): + assert layer[i].in_channels == 64 + assert layer[i].out_channels == 64 + assert layer[i].conv1.out_channels == 16 + assert layer[i].stride == 1 + assert layer[i].downsample is None + x = torch.randn(1, 32, 56, 56) + x_out = layer(x) + assert x_out.shape == (1, 64, 28, 28) + + # 3 Bottleneck w/ stride 2 and downsample with avg pool + layer = ResLayer(Bottleneck, 3, 32, 64, stride=2, avg_down=True) + assert len(layer) == 3 + assert layer[0].in_channels == 32 + assert layer[0].out_channels == 64 + assert layer[0].stride == 2 + assert layer[0].conv1.out_channels == 16 + assert layer[0].downsample is not None and len(layer[0].downsample) == 3 + assert isinstance(layer[0].downsample[0], nn.AvgPool2d) + assert layer[0].downsample[0].stride == 2 + for i in range(1, 3): + assert layer[i].in_channels == 64 + assert layer[i].out_channels == 64 + assert layer[i].conv1.out_channels == 16 + assert layer[i].stride == 1 + assert layer[i].downsample is None + x = torch.randn(1, 32, 56, 56) + x_out = layer(x) + assert x_out.shape == (1, 64, 28, 28) + + # 3 Bottleneck with custom expansion + layer = ResLayer(Bottleneck, 3, 32, 32, expansion=2) + assert len(layer) == 3 + for i in range(3): + assert layer[i].in_channels == 32 + assert layer[i].out_channels == 32 + assert layer[i].stride == 1 + assert layer[i].conv1.out_channels == 16 + assert layer[i].downsample is None + x = torch.randn(1, 32, 56, 56) + x_out = layer(x) + assert x_out.shape == (1, 32, 56, 56) + + +def test_resnet(): + """Test resnet backbone.""" + with pytest.raises(KeyError): + # ResNet depth should be in [18, 34, 50, 101, 152] + ResNet(20) + + with pytest.raises(AssertionError): + # In ResNet: 1 <= num_stages <= 4 + ResNet(50, num_stages=0) + + with pytest.raises(AssertionError): + # In ResNet: 1 <= num_stages <= 4 + ResNet(50, num_stages=5) + + with pytest.raises(AssertionError): + # len(strides) == len(dilations) == num_stages + ResNet(50, strides=(1, ), dilations=(1, 1), num_stages=3) + + with pytest.raises(TypeError): + # pretrained must be a string path + model = ResNet(50) + model.init_weights(pretrained=0) + + with pytest.raises(AssertionError): + # Style must be in ['pytorch', 'caffe'] + ResNet(50, style='tensorflow') + + # Test ResNet50 norm_eval=True + model = ResNet(50, norm_eval=True) + model.init_weights() + model.train() + assert check_norm_state(model.modules(), False) + + # Test ResNet50 with torchvision pretrained weight + model = ResNet( + depth=50, + norm_eval=True, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')) + model.init_weights() + model.train() + assert check_norm_state(model.modules(), False) + + # Test ResNet50 with first stage frozen + frozen_stages = 1 + model = ResNet(50, frozen_stages=frozen_stages) + model.init_weights() + model.train() + assert model.norm1.training is False + for layer in [model.conv1, model.norm1]: + for param in layer.parameters(): + assert param.requires_grad is False + for i in range(1, frozen_stages + 1): + layer = getattr(model, f'layer{i}') + for mod in layer.modules(): + if isinstance(mod, _BatchNorm): + assert mod.training is False + for param in layer.parameters(): + assert param.requires_grad is False + + # Test ResNet18 forward + model = ResNet(18, out_indices=(0, 1, 2, 3)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == (1, 64, 56, 56) + assert feat[1].shape == (1, 128, 28, 28) + assert feat[2].shape == (1, 256, 14, 14) + assert feat[3].shape == (1, 512, 7, 7) + + # Test ResNet50 with BatchNorm forward + model = ResNet(50, out_indices=(0, 1, 2, 3)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == (1, 256, 56, 56) + assert feat[1].shape == (1, 512, 28, 28) + assert feat[2].shape == (1, 1024, 14, 14) + assert feat[3].shape == (1, 2048, 7, 7) + + # Test ResNet50 with layers 1, 2, 3 out forward + model = ResNet(50, out_indices=(0, 1, 2)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 3 + assert feat[0].shape == (1, 256, 56, 56) + assert feat[1].shape == (1, 512, 28, 28) + assert feat[2].shape == (1, 1024, 14, 14) + + # Test ResNet50 with layers 3 (top feature maps) out forward + model = ResNet(50, out_indices=(3, )) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 1 + assert feat[0].shape == (1, 2048, 7, 7) + + # Test ResNet50 with checkpoint forward + model = ResNet(50, out_indices=(0, 1, 2, 3), with_cp=True) + for m in model.modules(): + if is_block(m): + assert m.with_cp + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == (1, 256, 56, 56) + assert feat[1].shape == (1, 512, 28, 28) + assert feat[2].shape == (1, 1024, 14, 14) + assert feat[3].shape == (1, 2048, 7, 7) + + # zero initialization of residual blocks + model = ResNet(50, out_indices=(0, 1, 2, 3), zero_init_residual=True) + model.init_weights() + for m in model.modules(): + if isinstance(m, Bottleneck): + assert all_zeros(m.norm3) + elif isinstance(m, BasicBlock): + assert all_zeros(m.norm2) + + # non-zero initialization of residual blocks + model = ResNet(50, out_indices=(0, 1, 2, 3), zero_init_residual=False) + model.init_weights() + for m in model.modules(): + if isinstance(m, Bottleneck): + assert not all_zeros(m.norm3) + elif isinstance(m, BasicBlock): + assert not all_zeros(m.norm2) + + +def test_resnet_v1d(): + model = ResNetV1d(depth=50, out_indices=(0, 1, 2, 3)) + model.init_weights() + model.train() + + assert len(model.stem) == 3 + for i in range(3): + assert isinstance(model.stem[i], ConvModule) + + imgs = torch.randn(1, 3, 224, 224) + feat = model.stem(imgs) + assert feat.shape == (1, 64, 112, 112) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == (1, 256, 56, 56) + assert feat[1].shape == (1, 512, 28, 28) + assert feat[2].shape == (1, 1024, 14, 14) + assert feat[3].shape == (1, 2048, 7, 7) + + # Test ResNet50V1d with first stage frozen + frozen_stages = 1 + model = ResNetV1d(depth=50, frozen_stages=frozen_stages) + assert len(model.stem) == 3 + for i in range(3): + assert isinstance(model.stem[i], ConvModule) + model.init_weights() + model.train() + check_norm_state(model.stem, False) + for param in model.stem.parameters(): + assert param.requires_grad is False + for i in range(1, frozen_stages + 1): + layer = getattr(model, f'layer{i}') + for mod in layer.modules(): + if isinstance(mod, _BatchNorm): + assert mod.training is False + for param in layer.parameters(): + assert param.requires_grad is False + + +def test_resnet_half_channel(): + model = ResNet(50, base_channels=32, out_indices=(0, 1, 2, 3)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == (1, 128, 56, 56) + assert feat[1].shape == (1, 256, 28, 28) + assert feat[2].shape == (1, 512, 14, 14) + assert feat[3].shape == (1, 1024, 7, 7) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_resnet_cifar.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_resnet_cifar.py new file mode 100644 index 0000000000..af7bba61ec --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_resnet_cifar.py @@ -0,0 +1,67 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch +from mmcv.utils.parrots_wrapper import _BatchNorm + +from mmcls.models.backbones import ResNet_CIFAR + + +def check_norm_state(modules, train_state): + """Check if norm layer is in correct train state.""" + for mod in modules: + if isinstance(mod, _BatchNorm): + if mod.training != train_state: + return False + return True + + +def test_resnet_cifar(): + # deep_stem must be False + with pytest.raises(AssertionError): + ResNet_CIFAR(depth=18, deep_stem=True) + + # test the feature map size when depth is 18 + model = ResNet_CIFAR(depth=18, out_indices=(0, 1, 2, 3)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 32, 32) + feat = model.conv1(imgs) + assert feat.shape == (1, 64, 32, 32) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == (1, 64, 32, 32) + assert feat[1].shape == (1, 128, 16, 16) + assert feat[2].shape == (1, 256, 8, 8) + assert feat[3].shape == (1, 512, 4, 4) + + # test the feature map size when depth is 50 + model = ResNet_CIFAR(depth=50, out_indices=(0, 1, 2, 3)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 32, 32) + feat = model.conv1(imgs) + assert feat.shape == (1, 64, 32, 32) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == (1, 256, 32, 32) + assert feat[1].shape == (1, 512, 16, 16) + assert feat[2].shape == (1, 1024, 8, 8) + assert feat[3].shape == (1, 2048, 4, 4) + + # Test ResNet_CIFAR with first stage frozen + frozen_stages = 1 + model = ResNet_CIFAR(depth=50, frozen_stages=frozen_stages) + model.init_weights() + model.train() + check_norm_state([model.norm1], False) + for param in model.conv1.parameters(): + assert param.requires_grad is False + for i in range(1, frozen_stages + 1): + layer = getattr(model, f'layer{i}') + for mod in layer.modules(): + if isinstance(mod, _BatchNorm): + assert mod.training is False + for param in layer.parameters(): + assert param.requires_grad is False diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_resnext.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_resnext.py new file mode 100644 index 0000000000..4ee15f9330 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_resnext.py @@ -0,0 +1,61 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmcls.models.backbones import ResNeXt +from mmcls.models.backbones.resnext import Bottleneck as BottleneckX + + +def test_bottleneck(): + with pytest.raises(AssertionError): + # Style must be in ['pytorch', 'caffe'] + BottleneckX(64, 64, groups=32, width_per_group=4, style='tensorflow') + + # Test ResNeXt Bottleneck structure + block = BottleneckX( + 64, 256, groups=32, width_per_group=4, stride=2, style='pytorch') + assert block.conv2.stride == (2, 2) + assert block.conv2.groups == 32 + assert block.conv2.out_channels == 128 + + # Test ResNeXt Bottleneck forward + block = BottleneckX(64, 64, base_channels=16, groups=32, width_per_group=4) + x = torch.randn(1, 64, 56, 56) + x_out = block(x) + assert x_out.shape == torch.Size([1, 64, 56, 56]) + + +def test_resnext(): + with pytest.raises(KeyError): + # ResNeXt depth should be in [50, 101, 152] + ResNeXt(depth=18) + + # Test ResNeXt with group 32, width_per_group 4 + model = ResNeXt( + depth=50, groups=32, width_per_group=4, out_indices=(0, 1, 2, 3)) + for m in model.modules(): + if isinstance(m, BottleneckX): + assert m.conv2.groups == 32 + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == torch.Size([1, 256, 56, 56]) + assert feat[1].shape == torch.Size([1, 512, 28, 28]) + assert feat[2].shape == torch.Size([1, 1024, 14, 14]) + assert feat[3].shape == torch.Size([1, 2048, 7, 7]) + + # Test ResNeXt with group 32, width_per_group 4 and layers 3 out forward + model = ResNeXt(depth=50, groups=32, width_per_group=4, out_indices=(3, )) + for m in model.modules(): + if isinstance(m, BottleneckX): + assert m.conv2.groups == 32 + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 1 + assert feat[0].shape == torch.Size([1, 2048, 7, 7]) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_seresnet.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_seresnet.py new file mode 100644 index 0000000000..32670209cf --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_seresnet.py @@ -0,0 +1,247 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch +from torch.nn.modules import AvgPool2d +from torch.nn.modules.batchnorm import _BatchNorm + +from mmcls.models.backbones import SEResNet +from mmcls.models.backbones.resnet import ResLayer +from mmcls.models.backbones.seresnet import SEBottleneck, SELayer + + +def all_zeros(modules): + """Check if the weight(and bias) is all zero.""" + weight_zero = torch.equal(modules.weight.data, + torch.zeros_like(modules.weight.data)) + if hasattr(modules, 'bias'): + bias_zero = torch.equal(modules.bias.data, + torch.zeros_like(modules.bias.data)) + else: + bias_zero = True + + return weight_zero and bias_zero + + +def check_norm_state(modules, train_state): + """Check if norm layer is in correct train state.""" + for mod in modules: + if isinstance(mod, _BatchNorm): + if mod.training != train_state: + return False + return True + + +def test_selayer(): + # Test selayer forward + layer = SELayer(64) + x = torch.randn(1, 64, 56, 56) + x_out = layer(x) + assert x_out.shape == torch.Size([1, 64, 56, 56]) + + # Test selayer forward with different ratio + layer = SELayer(64, ratio=8) + x = torch.randn(1, 64, 56, 56) + x_out = layer(x) + assert x_out.shape == torch.Size([1, 64, 56, 56]) + + +def test_bottleneck(): + + with pytest.raises(AssertionError): + # Style must be in ['pytorch', 'caffe'] + SEBottleneck(64, 64, style='tensorflow') + + # Test SEBottleneck with checkpoint forward + block = SEBottleneck(64, 64, with_cp=True) + assert block.with_cp + x = torch.randn(1, 64, 56, 56) + x_out = block(x) + assert x_out.shape == torch.Size([1, 64, 56, 56]) + + # Test Bottleneck style + block = SEBottleneck(64, 256, stride=2, style='pytorch') + assert block.conv1.stride == (1, 1) + assert block.conv2.stride == (2, 2) + block = SEBottleneck(64, 256, stride=2, style='caffe') + assert block.conv1.stride == (2, 2) + assert block.conv2.stride == (1, 1) + + # Test Bottleneck forward + block = SEBottleneck(64, 64) + x = torch.randn(1, 64, 56, 56) + x_out = block(x) + assert x_out.shape == torch.Size([1, 64, 56, 56]) + + +def test_res_layer(): + # Test ResLayer of 3 Bottleneck w\o downsample + layer = ResLayer(SEBottleneck, 3, 64, 64, se_ratio=16) + assert len(layer) == 3 + assert layer[0].conv1.in_channels == 64 + assert layer[0].conv1.out_channels == 16 + for i in range(1, len(layer)): + assert layer[i].conv1.in_channels == 64 + assert layer[i].conv1.out_channels == 16 + for i in range(len(layer)): + assert layer[i].downsample is None + x = torch.randn(1, 64, 56, 56) + x_out = layer(x) + assert x_out.shape == torch.Size([1, 64, 56, 56]) + + # Test ResLayer of 3 SEBottleneck with downsample + layer = ResLayer(SEBottleneck, 3, 64, 256, se_ratio=16) + assert layer[0].downsample[0].out_channels == 256 + for i in range(1, len(layer)): + assert layer[i].downsample is None + x = torch.randn(1, 64, 56, 56) + x_out = layer(x) + assert x_out.shape == torch.Size([1, 256, 56, 56]) + + # Test ResLayer of 3 SEBottleneck with stride=2 + layer = ResLayer(SEBottleneck, 3, 64, 256, stride=2, se_ratio=8) + assert layer[0].downsample[0].out_channels == 256 + assert layer[0].downsample[0].stride == (2, 2) + for i in range(1, len(layer)): + assert layer[i].downsample is None + x = torch.randn(1, 64, 56, 56) + x_out = layer(x) + assert x_out.shape == torch.Size([1, 256, 28, 28]) + + # Test ResLayer of 3 SEBottleneck with stride=2 and average downsample + layer = ResLayer( + SEBottleneck, 3, 64, 256, stride=2, avg_down=True, se_ratio=8) + assert isinstance(layer[0].downsample[0], AvgPool2d) + assert layer[0].downsample[1].out_channels == 256 + assert layer[0].downsample[1].stride == (1, 1) + for i in range(1, len(layer)): + assert layer[i].downsample is None + x = torch.randn(1, 64, 56, 56) + x_out = layer(x) + assert x_out.shape == torch.Size([1, 256, 28, 28]) + + +def test_seresnet(): + """Test resnet backbone.""" + with pytest.raises(KeyError): + # SEResNet depth should be in [50, 101, 152] + SEResNet(20) + + with pytest.raises(AssertionError): + # In SEResNet: 1 <= num_stages <= 4 + SEResNet(50, num_stages=0) + + with pytest.raises(AssertionError): + # In SEResNet: 1 <= num_stages <= 4 + SEResNet(50, num_stages=5) + + with pytest.raises(AssertionError): + # len(strides) == len(dilations) == num_stages + SEResNet(50, strides=(1, ), dilations=(1, 1), num_stages=3) + + with pytest.raises(TypeError): + # pretrained must be a string path + model = SEResNet(50) + model.init_weights(pretrained=0) + + with pytest.raises(AssertionError): + # Style must be in ['pytorch', 'caffe'] + SEResNet(50, style='tensorflow') + + # Test SEResNet50 norm_eval=True + model = SEResNet(50, norm_eval=True) + model.init_weights() + model.train() + assert check_norm_state(model.modules(), False) + + # Test SEResNet50 with torchvision pretrained weight + model = SEResNet( + depth=50, + norm_eval=True, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')) + model.init_weights() + model.train() + assert check_norm_state(model.modules(), False) + + # Test SEResNet50 with first stage frozen + frozen_stages = 1 + model = SEResNet(50, frozen_stages=frozen_stages) + model.init_weights() + model.train() + assert model.norm1.training is False + for layer in [model.conv1, model.norm1]: + for param in layer.parameters(): + assert param.requires_grad is False + for i in range(1, frozen_stages + 1): + layer = getattr(model, f'layer{i}') + for mod in layer.modules(): + if isinstance(mod, _BatchNorm): + assert mod.training is False + for param in layer.parameters(): + assert param.requires_grad is False + + # Test SEResNet50 with BatchNorm forward + model = SEResNet(50, out_indices=(0, 1, 2, 3)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == torch.Size([1, 256, 56, 56]) + assert feat[1].shape == torch.Size([1, 512, 28, 28]) + assert feat[2].shape == torch.Size([1, 1024, 14, 14]) + assert feat[3].shape == torch.Size([1, 2048, 7, 7]) + + # Test SEResNet50 with layers 1, 2, 3 out forward + model = SEResNet(50, out_indices=(0, 1, 2)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 3 + assert feat[0].shape == torch.Size([1, 256, 56, 56]) + assert feat[1].shape == torch.Size([1, 512, 28, 28]) + assert feat[2].shape == torch.Size([1, 1024, 14, 14]) + + # Test SEResNet50 with layers 3 (top feature maps) out forward + model = SEResNet(50, out_indices=(3, )) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 1 + assert feat[0].shape == torch.Size([1, 2048, 7, 7]) + + # Test SEResNet50 with checkpoint forward + model = SEResNet(50, out_indices=(0, 1, 2, 3), with_cp=True) + for m in model.modules(): + if isinstance(m, SEBottleneck): + assert m.with_cp + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == torch.Size([1, 256, 56, 56]) + assert feat[1].shape == torch.Size([1, 512, 28, 28]) + assert feat[2].shape == torch.Size([1, 1024, 14, 14]) + assert feat[3].shape == torch.Size([1, 2048, 7, 7]) + + # Test SEResNet50 zero initialization of residual + model = SEResNet(50, out_indices=(0, 1, 2, 3), zero_init_residual=True) + model.init_weights() + for m in model.modules(): + if isinstance(m, SEBottleneck): + assert all_zeros(m.norm3) + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == torch.Size([1, 256, 56, 56]) + assert feat[1].shape == torch.Size([1, 512, 28, 28]) + assert feat[2].shape == torch.Size([1, 1024, 14, 14]) + assert feat[3].shape == torch.Size([1, 2048, 7, 7]) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_seresnext.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_seresnext.py new file mode 100644 index 0000000000..2431c0708d --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_seresnext.py @@ -0,0 +1,74 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmcls.models.backbones import SEResNeXt +from mmcls.models.backbones.seresnext import SEBottleneck as SEBottleneckX + + +def test_bottleneck(): + with pytest.raises(AssertionError): + # Style must be in ['pytorch', 'caffe'] + SEBottleneckX(64, 64, groups=32, width_per_group=4, style='tensorflow') + + # Test SEResNeXt Bottleneck structure + block = SEBottleneckX( + 64, 256, groups=32, width_per_group=4, stride=2, style='pytorch') + assert block.width_per_group == 4 + assert block.conv2.stride == (2, 2) + assert block.conv2.groups == 32 + assert block.conv2.out_channels == 128 + assert block.conv2.out_channels == block.mid_channels + + # Test SEResNeXt Bottleneck structure (groups=1) + block = SEBottleneckX( + 64, 256, groups=1, width_per_group=4, stride=2, style='pytorch') + assert block.conv2.stride == (2, 2) + assert block.conv2.groups == 1 + assert block.conv2.out_channels == 64 + assert block.mid_channels == 64 + assert block.conv2.out_channels == block.mid_channels + + # Test SEResNeXt Bottleneck forward + block = SEBottleneckX( + 64, 64, base_channels=16, groups=32, width_per_group=4) + x = torch.randn(1, 64, 56, 56) + x_out = block(x) + assert x_out.shape == torch.Size([1, 64, 56, 56]) + + +def test_seresnext(): + with pytest.raises(KeyError): + # SEResNeXt depth should be in [50, 101, 152] + SEResNeXt(depth=18) + + # Test SEResNeXt with group 32, width_per_group 4 + model = SEResNeXt( + depth=50, groups=32, width_per_group=4, out_indices=(0, 1, 2, 3)) + for m in model.modules(): + if isinstance(m, SEBottleneckX): + assert m.conv2.groups == 32 + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == torch.Size([1, 256, 56, 56]) + assert feat[1].shape == torch.Size([1, 512, 28, 28]) + assert feat[2].shape == torch.Size([1, 1024, 14, 14]) + assert feat[3].shape == torch.Size([1, 2048, 7, 7]) + + # Test SEResNeXt with group 32, width_per_group 4 and layers 3 out forward + model = SEResNeXt( + depth=50, groups=32, width_per_group=4, out_indices=(3, )) + for m in model.modules(): + if isinstance(m, SEBottleneckX): + assert m.conv2.groups == 32 + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 1 + assert feat[0].shape == torch.Size([1, 2048, 7, 7]) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_shufflenet_v1.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_shufflenet_v1.py new file mode 100644 index 0000000000..97beee7abb --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_shufflenet_v1.py @@ -0,0 +1,246 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch +from torch.nn.modules import GroupNorm +from torch.nn.modules.batchnorm import _BatchNorm + +from mmcls.models.backbones import ShuffleNetV1 +from mmcls.models.backbones.shufflenet_v1 import ShuffleUnit + + +def is_block(modules): + """Check if is ResNet building block.""" + if isinstance(modules, (ShuffleUnit, )): + return True + return False + + +def is_norm(modules): + """Check if is one of the norms.""" + if isinstance(modules, (GroupNorm, _BatchNorm)): + return True + return False + + +def check_norm_state(modules, train_state): + """Check if norm layer is in correct train state.""" + for mod in modules: + if isinstance(mod, _BatchNorm): + if mod.training != train_state: + return False + return True + + +def test_shufflenetv1_shuffleuint(): + + with pytest.raises(ValueError): + # combine must be in ['add', 'concat'] + ShuffleUnit(24, 16, groups=3, first_block=True, combine='test') + + with pytest.raises(AssertionError): + # in_channels must be equal tp = outplanes when combine='add' + ShuffleUnit(64, 24, groups=4, first_block=True, combine='add') + + # Test ShuffleUnit with combine='add' + block = ShuffleUnit(24, 24, groups=3, first_block=True, combine='add') + x = torch.randn(1, 24, 56, 56) + x_out = block(x) + assert x_out.shape == torch.Size((1, 24, 56, 56)) + + # Test ShuffleUnit with combine='concat' + block = ShuffleUnit(24, 240, groups=3, first_block=True, combine='concat') + x = torch.randn(1, 24, 56, 56) + x_out = block(x) + assert x_out.shape == torch.Size((1, 240, 28, 28)) + + # Test ShuffleUnit with checkpoint forward + block = ShuffleUnit( + 24, 24, groups=3, first_block=True, combine='add', with_cp=True) + assert block.with_cp + x = torch.randn(1, 24, 56, 56) + x.requires_grad = True + x_out = block(x) + assert x_out.shape == torch.Size((1, 24, 56, 56)) + + +def test_shufflenetv1_backbone(): + + with pytest.raises(ValueError): + # frozen_stages must be in range(-1, 4) + ShuffleNetV1(frozen_stages=10) + + with pytest.raises(ValueError): + # the item in out_indices must be in range(0, 4) + ShuffleNetV1(out_indices=[5]) + + with pytest.raises(ValueError): + # groups must be in [1, 2, 3, 4, 8] + ShuffleNetV1(groups=10) + + with pytest.raises(TypeError): + # pretrained must be str or None + model = ShuffleNetV1() + model.init_weights(pretrained=1) + + # Test ShuffleNetV1 norm state + model = ShuffleNetV1() + model.init_weights() + model.train() + assert check_norm_state(model.modules(), True) + + # Test ShuffleNetV1 with first stage frozen + frozen_stages = 1 + model = ShuffleNetV1(frozen_stages=frozen_stages, out_indices=(0, 1, 2)) + model.init_weights() + model.train() + for param in model.conv1.parameters(): + assert param.requires_grad is False + for i in range(frozen_stages): + layer = model.layers[i] + for mod in layer.modules(): + if isinstance(mod, _BatchNorm): + assert mod.training is False + for param in layer.parameters(): + assert param.requires_grad is False + + # Test ShuffleNetV1 forward with groups=1 + model = ShuffleNetV1(groups=1, out_indices=(0, 1, 2)) + model.init_weights() + model.train() + + for m in model.modules(): + if is_norm(m): + assert isinstance(m, _BatchNorm) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 3 + assert feat[0].shape == torch.Size((1, 144, 28, 28)) + assert feat[1].shape == torch.Size((1, 288, 14, 14)) + assert feat[2].shape == torch.Size((1, 576, 7, 7)) + + # Test ShuffleNetV1 forward with groups=2 + model = ShuffleNetV1(groups=2, out_indices=(0, 1, 2)) + model.init_weights() + model.train() + + for m in model.modules(): + if is_norm(m): + assert isinstance(m, _BatchNorm) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 3 + assert feat[0].shape == torch.Size((1, 200, 28, 28)) + assert feat[1].shape == torch.Size((1, 400, 14, 14)) + assert feat[2].shape == torch.Size((1, 800, 7, 7)) + + # Test ShuffleNetV1 forward with groups=3 + model = ShuffleNetV1(groups=3, out_indices=(0, 1, 2)) + model.init_weights() + model.train() + + for m in model.modules(): + if is_norm(m): + assert isinstance(m, _BatchNorm) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 3 + assert feat[0].shape == torch.Size((1, 240, 28, 28)) + assert feat[1].shape == torch.Size((1, 480, 14, 14)) + assert feat[2].shape == torch.Size((1, 960, 7, 7)) + + # Test ShuffleNetV1 forward with groups=4 + model = ShuffleNetV1(groups=4, out_indices=(0, 1, 2)) + model.init_weights() + model.train() + + for m in model.modules(): + if is_norm(m): + assert isinstance(m, _BatchNorm) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 3 + assert feat[0].shape == torch.Size((1, 272, 28, 28)) + assert feat[1].shape == torch.Size((1, 544, 14, 14)) + assert feat[2].shape == torch.Size((1, 1088, 7, 7)) + + # Test ShuffleNetV1 forward with groups=8 + model = ShuffleNetV1(groups=8, out_indices=(0, 1, 2)) + model.init_weights() + model.train() + + for m in model.modules(): + if is_norm(m): + assert isinstance(m, _BatchNorm) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 3 + assert feat[0].shape == torch.Size((1, 384, 28, 28)) + assert feat[1].shape == torch.Size((1, 768, 14, 14)) + assert feat[2].shape == torch.Size((1, 1536, 7, 7)) + + # Test ShuffleNetV1 forward with GroupNorm forward + model = ShuffleNetV1( + groups=3, + norm_cfg=dict(type='GN', num_groups=2, requires_grad=True), + out_indices=(0, 1, 2)) + model.init_weights() + model.train() + + for m in model.modules(): + if is_norm(m): + assert isinstance(m, GroupNorm) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 3 + assert feat[0].shape == torch.Size((1, 240, 28, 28)) + assert feat[1].shape == torch.Size((1, 480, 14, 14)) + assert feat[2].shape == torch.Size((1, 960, 7, 7)) + + # Test ShuffleNetV1 forward with layers 1, 2 forward + model = ShuffleNetV1(groups=3, out_indices=(1, 2)) + model.init_weights() + model.train() + + for m in model.modules(): + if is_norm(m): + assert isinstance(m, _BatchNorm) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 2 + assert feat[0].shape == torch.Size((1, 480, 14, 14)) + assert feat[1].shape == torch.Size((1, 960, 7, 7)) + + # Test ShuffleNetV1 forward with layers 2 forward + model = ShuffleNetV1(groups=3, out_indices=(2, )) + model.init_weights() + model.train() + + for m in model.modules(): + if is_norm(m): + assert isinstance(m, _BatchNorm) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 1 + assert isinstance(feat[0], torch.Tensor) + assert feat[0].shape == torch.Size((1, 960, 7, 7)) + + # Test ShuffleNetV1 forward with checkpoint forward + model = ShuffleNetV1(groups=3, with_cp=True) + for m in model.modules(): + if is_block(m): + assert m.with_cp + + # Test ShuffleNetV1 with norm_eval + model = ShuffleNetV1(norm_eval=True) + model.init_weights() + model.train() + + assert check_norm_state(model.modules(), False) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_shufflenet_v2.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_shufflenet_v2.py new file mode 100644 index 0000000000..b7ab495552 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_shufflenet_v2.py @@ -0,0 +1,205 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch +from torch.nn.modules import GroupNorm +from torch.nn.modules.batchnorm import _BatchNorm + +from mmcls.models.backbones import ShuffleNetV2 +from mmcls.models.backbones.shufflenet_v2 import InvertedResidual + + +def is_block(modules): + """Check if is ResNet building block.""" + if isinstance(modules, (InvertedResidual, )): + return True + return False + + +def is_norm(modules): + """Check if is one of the norms.""" + if isinstance(modules, (GroupNorm, _BatchNorm)): + return True + return False + + +def check_norm_state(modules, train_state): + """Check if norm layer is in correct train state.""" + for mod in modules: + if isinstance(mod, _BatchNorm): + if mod.training != train_state: + return False + return True + + +def test_shufflenetv2_invertedresidual(): + + with pytest.raises(AssertionError): + # when stride==1, in_channels should be equal to out_channels // 2 * 2 + InvertedResidual(24, 32, stride=1) + + with pytest.raises(AssertionError): + # when in_channels != out_channels // 2 * 2, stride should not be + # equal to 1. + InvertedResidual(24, 32, stride=1) + + # Test InvertedResidual forward + block = InvertedResidual(24, 48, stride=2) + x = torch.randn(1, 24, 56, 56) + x_out = block(x) + assert x_out.shape == torch.Size((1, 48, 28, 28)) + + # Test InvertedResidual with checkpoint forward + block = InvertedResidual(48, 48, stride=1, with_cp=True) + assert block.with_cp + x = torch.randn(1, 48, 56, 56) + x.requires_grad = True + x_out = block(x) + assert x_out.shape == torch.Size((1, 48, 56, 56)) + + +def test_shufflenetv2_backbone(): + + with pytest.raises(ValueError): + # groups must be in 0.5, 1.0, 1.5, 2.0] + ShuffleNetV2(widen_factor=3.0) + + with pytest.raises(ValueError): + # frozen_stages must be in [0, 1, 2, 3] + ShuffleNetV2(widen_factor=1.0, frozen_stages=4) + + with pytest.raises(ValueError): + # out_indices must be in [0, 1, 2, 3] + ShuffleNetV2(widen_factor=1.0, out_indices=(4, )) + + with pytest.raises(TypeError): + # pretrained must be str or None + model = ShuffleNetV2() + model.init_weights(pretrained=1) + + # Test ShuffleNetV2 norm state + model = ShuffleNetV2() + model.init_weights() + model.train() + assert check_norm_state(model.modules(), True) + + # Test ShuffleNetV2 with first stage frozen + frozen_stages = 1 + model = ShuffleNetV2(frozen_stages=frozen_stages) + model.init_weights() + model.train() + for param in model.conv1.parameters(): + assert param.requires_grad is False + for i in range(0, frozen_stages): + layer = model.layers[i] + for mod in layer.modules(): + if isinstance(mod, _BatchNorm): + assert mod.training is False + for param in layer.parameters(): + assert param.requires_grad is False + + # Test ShuffleNetV2 with norm_eval + model = ShuffleNetV2(norm_eval=True) + model.init_weights() + model.train() + + assert check_norm_state(model.modules(), False) + + # Test ShuffleNetV2 forward with widen_factor=0.5 + model = ShuffleNetV2(widen_factor=0.5, out_indices=(0, 1, 2, 3)) + model.init_weights() + model.train() + + for m in model.modules(): + if is_norm(m): + assert isinstance(m, _BatchNorm) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == torch.Size((1, 48, 28, 28)) + assert feat[1].shape == torch.Size((1, 96, 14, 14)) + assert feat[2].shape == torch.Size((1, 192, 7, 7)) + + # Test ShuffleNetV2 forward with widen_factor=1.0 + model = ShuffleNetV2(widen_factor=1.0, out_indices=(0, 1, 2, 3)) + model.init_weights() + model.train() + + for m in model.modules(): + if is_norm(m): + assert isinstance(m, _BatchNorm) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == torch.Size((1, 116, 28, 28)) + assert feat[1].shape == torch.Size((1, 232, 14, 14)) + assert feat[2].shape == torch.Size((1, 464, 7, 7)) + + # Test ShuffleNetV2 forward with widen_factor=1.5 + model = ShuffleNetV2(widen_factor=1.5, out_indices=(0, 1, 2, 3)) + model.init_weights() + model.train() + + for m in model.modules(): + if is_norm(m): + assert isinstance(m, _BatchNorm) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == torch.Size((1, 176, 28, 28)) + assert feat[1].shape == torch.Size((1, 352, 14, 14)) + assert feat[2].shape == torch.Size((1, 704, 7, 7)) + + # Test ShuffleNetV2 forward with widen_factor=2.0 + model = ShuffleNetV2(widen_factor=2.0, out_indices=(0, 1, 2, 3)) + model.init_weights() + model.train() + + for m in model.modules(): + if is_norm(m): + assert isinstance(m, _BatchNorm) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == torch.Size((1, 244, 28, 28)) + assert feat[1].shape == torch.Size((1, 488, 14, 14)) + assert feat[2].shape == torch.Size((1, 976, 7, 7)) + + # Test ShuffleNetV2 forward with layers 3 forward + model = ShuffleNetV2(widen_factor=1.0, out_indices=(2, )) + model.init_weights() + model.train() + + for m in model.modules(): + if is_norm(m): + assert isinstance(m, _BatchNorm) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 1 + assert isinstance(feat[0], torch.Tensor) + assert feat[0].shape == torch.Size((1, 464, 7, 7)) + + # Test ShuffleNetV2 forward with layers 1 2 forward + model = ShuffleNetV2(widen_factor=1.0, out_indices=(1, 2)) + model.init_weights() + model.train() + + for m in model.modules(): + if is_norm(m): + assert isinstance(m, _BatchNorm) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 2 + assert feat[0].shape == torch.Size((1, 232, 14, 14)) + assert feat[1].shape == torch.Size((1, 464, 7, 7)) + + # Test ShuffleNetV2 forward with checkpoint forward + model = ShuffleNetV2(widen_factor=1.0, with_cp=True) + for m in model.modules(): + if is_block(m): + assert m.with_cp diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_swin_transformer.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_swin_transformer.py new file mode 100644 index 0000000000..4895e196f7 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_swin_transformer.py @@ -0,0 +1,168 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os +import tempfile +from math import ceil + +import numpy as np +import pytest +import torch +from mmcv.runner import load_checkpoint, save_checkpoint + +from mmcls.models.backbones import SwinTransformer + + +def test_assertion(): + """Test Swin Transformer backbone.""" + with pytest.raises(AssertionError): + # Swin Transformer arch string should be in + SwinTransformer(arch='unknown') + + with pytest.raises(AssertionError): + # Swin Transformer arch dict should include 'embed_dims', + # 'depths' and 'num_head' keys. + SwinTransformer(arch=dict(embed_dims=96, depths=[2, 2, 18, 2])) + + +def test_forward(): + # Test tiny arch forward + model = SwinTransformer(arch='Tiny') + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + output = model(imgs) + assert len(output) == 1 + assert output[0].shape == (1, 768, 7, 7) + + # Test small arch forward + model = SwinTransformer(arch='small') + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + output = model(imgs) + assert len(output) == 1 + assert output[0].shape == (1, 768, 7, 7) + + # Test base arch forward + model = SwinTransformer(arch='B') + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + output = model(imgs) + assert len(output) == 1 + assert output[0].shape == (1, 1024, 7, 7) + + # Test large arch forward + model = SwinTransformer(arch='l') + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + output = model(imgs) + assert len(output) == 1 + assert output[0].shape == (1, 1536, 7, 7) + + # Test base arch with window_size=12, image_size=384 + model = SwinTransformer( + arch='base', + img_size=384, + stage_cfgs=dict(block_cfgs=dict(window_size=12))) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 384, 384) + output = model(imgs) + assert len(output) == 1 + assert output[0].shape == (1, 1024, 12, 12) + + +def test_structure(): + # Test small with use_abs_pos_embed = True + model = SwinTransformer(arch='small', use_abs_pos_embed=True) + assert model.absolute_pos_embed.shape == (1, 3136, 96) + + # Test small with use_abs_pos_embed = False + model = SwinTransformer(arch='small', use_abs_pos_embed=False) + assert not hasattr(model, 'absolute_pos_embed') + + # Test small with auto_pad = True + model = SwinTransformer( + arch='small', + auto_pad=True, + stage_cfgs=dict( + block_cfgs={'window_size': 7}, + downsample_cfg={ + 'kernel_size': (3, 2), + })) + + # stage 1 + input_h = int(224 / 4 / 3) + expect_h = ceil(input_h / 7) * 7 + input_w = int(224 / 4 / 2) + expect_w = ceil(input_w / 7) * 7 + assert model.stages[1].blocks[0].attn.pad_b == expect_h - input_h + assert model.stages[1].blocks[0].attn.pad_r == expect_w - input_w + + # stage 2 + input_h = int(224 / 4 / 3 / 3) + # input_h is smaller than window_size, shrink the window_size to input_h. + expect_h = input_h + input_w = int(224 / 4 / 2 / 2) + expect_w = ceil(input_w / input_h) * input_h + assert model.stages[2].blocks[0].attn.pad_b == expect_h - input_h + assert model.stages[2].blocks[0].attn.pad_r == expect_w - input_w + + # stage 3 + input_h = int(224 / 4 / 3 / 3 / 3) + expect_h = input_h + input_w = int(224 / 4 / 2 / 2 / 2) + expect_w = ceil(input_w / input_h) * input_h + assert model.stages[3].blocks[0].attn.pad_b == expect_h - input_h + assert model.stages[3].blocks[0].attn.pad_r == expect_w - input_w + + # Test small with auto_pad = False + with pytest.raises(AssertionError): + model = SwinTransformer( + arch='small', + auto_pad=False, + stage_cfgs=dict( + block_cfgs={'window_size': 7}, + downsample_cfg={ + 'kernel_size': (3, 2), + })) + + # Test drop_path_rate decay + model = SwinTransformer( + arch='small', + drop_path_rate=0.2, + ) + depths = model.arch_settings['depths'] + pos = 0 + for i, depth in enumerate(depths): + for j in range(depth): + block = model.stages[i].blocks[j] + expect_prob = 0.2 / (sum(depths) - 1) * pos + assert np.isclose(block.ffn.dropout_layer.drop_prob, expect_prob) + assert np.isclose(block.attn.drop.drop_prob, expect_prob) + pos += 1 + + +def test_load_checkpoint(): + model = SwinTransformer(arch='tiny') + ckpt_path = os.path.join(tempfile.gettempdir(), 'ckpt.pth') + + assert model._version == 2 + + # test load v2 checkpoint + save_checkpoint(model, ckpt_path) + load_checkpoint(model, ckpt_path, strict=True) + + # test load v1 checkpoint + setattr(model, 'norm', model.norm3) + model._version = 1 + del model.norm3 + save_checkpoint(model, ckpt_path) + model = SwinTransformer(arch='tiny') + load_checkpoint(model, ckpt_path, strict=True) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_t2t_vit.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_t2t_vit.py new file mode 100644 index 0000000000..e15f92f9ae --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_t2t_vit.py @@ -0,0 +1,84 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from copy import deepcopy + +import pytest +import torch +from torch.nn.modules import GroupNorm +from torch.nn.modules.batchnorm import _BatchNorm + +from mmcls.models.backbones import T2T_ViT + + +def is_norm(modules): + """Check if is one of the norms.""" + if isinstance(modules, (GroupNorm, _BatchNorm)): + return True + return False + + +def check_norm_state(modules, train_state): + """Check if norm layer is in correct train state.""" + for mod in modules: + if isinstance(mod, _BatchNorm): + if mod.training != train_state: + return False + return True + + +def test_vit_backbone(): + + cfg_ori = dict( + img_size=224, + in_channels=3, + embed_dims=384, + t2t_cfg=dict( + token_dims=64, + use_performer=False, + ), + num_layers=14, + layer_cfgs=dict( + num_heads=6, + feedforward_channels=3 * 384, # mlp_ratio = 3 + ), + drop_path_rate=0.1, + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=.02), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.), + ]) + + with pytest.raises(NotImplementedError): + # test if use performer + cfg = deepcopy(cfg_ori) + cfg['t2t_cfg']['use_performer'] = True + T2T_ViT(**cfg) + + # Test T2T-ViT model with input size of 224 + model = T2T_ViT(**cfg_ori) + model.init_weights() + model.train() + + assert check_norm_state(model.modules(), True) + + imgs = torch.randn(3, 3, 224, 224) + patch_token, cls_token = model(imgs)[-1] + assert cls_token.shape == (3, 384) + assert patch_token.shape == (3, 384, 14, 14) + + # Test custom arch T2T-ViT without output cls token + cfg = deepcopy(cfg_ori) + cfg['embed_dims'] = 256 + cfg['num_layers'] = 16 + cfg['layer_cfgs'] = dict(num_heads=8, feedforward_channels=1024) + cfg['output_cls_token'] = False + + model = T2T_ViT(**cfg) + patch_token = model(imgs)[-1] + assert patch_token.shape == (3, 256, 14, 14) + + # Test T2T_ViT with multi out indices + cfg = deepcopy(cfg_ori) + cfg['out_indices'] = [-3, -2, -1] + model = T2T_ViT(**cfg) + for out in model(imgs): + assert out[0].shape == (3, 384, 14, 14) + assert out[1].shape == (3, 384) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_timm_backbone.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_timm_backbone.py new file mode 100644 index 0000000000..dea62d2362 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_timm_backbone.py @@ -0,0 +1,43 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch +from torch.nn.modules.batchnorm import _BatchNorm + +from mmcls.models.backbones import TIMMBackbone + + +def check_norm_state(modules, train_state): + """Check if norm layer is in correct train state.""" + for mod in modules: + if isinstance(mod, _BatchNorm): + if mod.training != train_state: + return False + return True + + +def test_timm_backbone(): + with pytest.raises(TypeError): + # pretrained must be a string path + model = TIMMBackbone() + model.init_weights(pretrained=0) + + # Test resnet18 from timm + model = TIMMBackbone(model_name='resnet18') + model.init_weights() + model.train() + assert check_norm_state(model.modules(), True) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 1 + assert feat[0].shape == torch.Size((1, 512, 7, 7)) + + # Test efficientnet_b1 with pretrained weights + model = TIMMBackbone(model_name='efficientnet_b1', pretrained=True) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 1 + assert feat[0].shape == torch.Size((1, 1280, 7, 7)) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_tnt.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_tnt.py new file mode 100644 index 0000000000..2feffd6a75 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_tnt.py @@ -0,0 +1,50 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch +from torch.nn.modules.batchnorm import _BatchNorm + +from mmcls.models.backbones import TNT + + +def check_norm_state(modules, train_state): + """Check if norm layer is in correct train state.""" + for mod in modules: + if isinstance(mod, _BatchNorm): + if mod.training != train_state: + return False + return True + + +def test_tnt_backbone(): + with pytest.raises(TypeError): + # pretrained must be a string path + model = TNT() + model.init_weights(pretrained=0) + + # Test tnt_base_patch16_224 + model = TNT() + model.init_weights() + model.train() + assert check_norm_state(model.modules(), True) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 1 + assert feat[0].shape == torch.Size((1, 640)) + + # Test tnt with embed_dims=768 + arch = { + 'embed_dims_outer': 768, + 'embed_dims_inner': 48, + 'num_layers': 12, + 'num_heads_outer': 6, + 'num_heads_inner': 4 + } + model = TNT(arch=arch) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 1 + assert feat[0].shape == torch.Size((1, 768)) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_vgg.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_vgg.py new file mode 100644 index 0000000000..4e8177922b --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_vgg.py @@ -0,0 +1,139 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch +from mmcv.utils.parrots_wrapper import _BatchNorm + +from mmcls.models.backbones import VGG + + +def check_norm_state(modules, train_state): + """Check if norm layer is in correct train state.""" + for mod in modules: + if isinstance(mod, _BatchNorm): + if mod.training != train_state: + return False + return True + + +def test_vgg(): + """Test VGG backbone.""" + with pytest.raises(KeyError): + # VGG depth should be in [11, 13, 16, 19] + VGG(18) + + with pytest.raises(AssertionError): + # In VGG: 1 <= num_stages <= 5 + VGG(11, num_stages=0) + + with pytest.raises(AssertionError): + # In VGG: 1 <= num_stages <= 5 + VGG(11, num_stages=6) + + with pytest.raises(AssertionError): + # len(dilations) == num_stages + VGG(11, dilations=(1, 1), num_stages=3) + + with pytest.raises(TypeError): + # pretrained must be a string path + model = VGG(11) + model.init_weights(pretrained=0) + + # Test VGG11 norm_eval=True + model = VGG(11, norm_eval=True) + model.init_weights() + model.train() + assert check_norm_state(model.modules(), False) + + # Test VGG11 forward without classifiers + model = VGG(11, out_indices=(0, 1, 2, 3, 4)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 5 + assert feat[0].shape == (1, 64, 112, 112) + assert feat[1].shape == (1, 128, 56, 56) + assert feat[2].shape == (1, 256, 28, 28) + assert feat[3].shape == (1, 512, 14, 14) + assert feat[4].shape == (1, 512, 7, 7) + + # Test VGG11 forward with classifiers + model = VGG(11, num_classes=10, out_indices=(0, 1, 2, 3, 4, 5)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 6 + assert feat[0].shape == (1, 64, 112, 112) + assert feat[1].shape == (1, 128, 56, 56) + assert feat[2].shape == (1, 256, 28, 28) + assert feat[3].shape == (1, 512, 14, 14) + assert feat[4].shape == (1, 512, 7, 7) + assert feat[5].shape == (1, 10) + + # Test VGG11BN forward + model = VGG(11, norm_cfg=dict(type='BN'), out_indices=(0, 1, 2, 3, 4)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 5 + assert feat[0].shape == (1, 64, 112, 112) + assert feat[1].shape == (1, 128, 56, 56) + assert feat[2].shape == (1, 256, 28, 28) + assert feat[3].shape == (1, 512, 14, 14) + assert feat[4].shape == (1, 512, 7, 7) + + # Test VGG11BN forward with classifiers + model = VGG( + 11, + num_classes=10, + norm_cfg=dict(type='BN'), + out_indices=(0, 1, 2, 3, 4, 5)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 6 + assert feat[0].shape == (1, 64, 112, 112) + assert feat[1].shape == (1, 128, 56, 56) + assert feat[2].shape == (1, 256, 28, 28) + assert feat[3].shape == (1, 512, 14, 14) + assert feat[4].shape == (1, 512, 7, 7) + assert feat[5].shape == (1, 10) + + # Test VGG13 with layers 1, 2, 3 out forward + model = VGG(13, out_indices=(0, 1, 2)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 3 + assert feat[0].shape == (1, 64, 112, 112) + assert feat[1].shape == (1, 128, 56, 56) + assert feat[2].shape == (1, 256, 28, 28) + + # Test VGG16 with top feature maps out forward + model = VGG(16) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 1 + assert feat[0].shape == (1, 512, 7, 7) + + # Test VGG19 with classification score out forward + model = VGG(19, num_classes=10) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 1 + assert feat[0].shape == (1, 10) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_vision_transformer.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_vision_transformer.py new file mode 100644 index 0000000000..efa7375ccb --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_backbones/test_vision_transformer.py @@ -0,0 +1,162 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +import os +import tempfile +from copy import deepcopy + +import pytest +import torch +import torch.nn.functional as F +from torch.nn.modules import GroupNorm +from torch.nn.modules.batchnorm import _BatchNorm + +from mmcls.models.backbones import VisionTransformer + + +def is_norm(modules): + """Check if is one of the norms.""" + if isinstance(modules, (GroupNorm, _BatchNorm)): + return True + return False + + +def check_norm_state(modules, train_state): + """Check if norm layer is in correct train state.""" + for mod in modules: + if isinstance(mod, _BatchNorm): + if mod.training != train_state: + return False + return True + + +def test_vit_backbone(): + + cfg_ori = dict( + arch='b', + img_size=224, + patch_size=16, + drop_rate=0.1, + init_cfg=[ + dict( + type='Kaiming', + layer='Conv2d', + mode='fan_in', + nonlinearity='linear') + ]) + + with pytest.raises(AssertionError): + # test invalid arch + cfg = deepcopy(cfg_ori) + cfg['arch'] = 'unknown' + VisionTransformer(**cfg) + + with pytest.raises(AssertionError): + # test arch without essential keys + cfg = deepcopy(cfg_ori) + cfg['arch'] = { + 'num_layers': 24, + 'num_heads': 16, + 'feedforward_channels': 4096 + } + VisionTransformer(**cfg) + + # Test ViT base model with input size of 224 + # and patch size of 16 + model = VisionTransformer(**cfg_ori) + model.init_weights() + model.train() + + assert check_norm_state(model.modules(), True) + + imgs = torch.randn(3, 3, 224, 224) + patch_token, cls_token = model(imgs)[-1] + assert cls_token.shape == (3, 768) + assert patch_token.shape == (3, 768, 14, 14) + + # Test custom arch ViT without output cls token + cfg = deepcopy(cfg_ori) + cfg['arch'] = { + 'embed_dims': 128, + 'num_layers': 24, + 'num_heads': 16, + 'feedforward_channels': 1024 + } + cfg['output_cls_token'] = False + model = VisionTransformer(**cfg) + patch_token = model(imgs)[-1] + assert patch_token.shape == (3, 128, 14, 14) + + # Test ViT with multi out indices + cfg = deepcopy(cfg_ori) + cfg['out_indices'] = [-3, -2, -1] + model = VisionTransformer(**cfg) + for out in model(imgs): + assert out[0].shape == (3, 768, 14, 14) + assert out[1].shape == (3, 768) + + +def timm_resize_pos_embed(posemb, posemb_new, num_tokens=1, gs_new=()): + # Timm version pos embed resize function. + # Refers to https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py # noqa:E501 + ntok_new = posemb_new.shape[1] + if num_tokens: + posemb_tok, posemb_grid = posemb[:, :num_tokens], posemb[0, + num_tokens:] + ntok_new -= num_tokens + else: + posemb_tok, posemb_grid = posemb[:, :0], posemb[0] + gs_old = int(math.sqrt(len(posemb_grid))) + if not len(gs_new): # backwards compatibility + gs_new = [int(math.sqrt(ntok_new))] * 2 + assert len(gs_new) >= 2 + posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, + -1).permute(0, 3, 1, 2) + posemb_grid = F.interpolate( + posemb_grid, size=gs_new, mode='bicubic', align_corners=False) + posemb_grid = posemb_grid.permute(0, 2, 3, + 1).reshape(1, gs_new[0] * gs_new[1], -1) + posemb = torch.cat([posemb_tok, posemb_grid], dim=1) + return posemb + + +def test_vit_weight_init(): + # test weight init cfg + pretrain_cfg = dict( + arch='b', + img_size=224, + patch_size=16, + init_cfg=[dict(type='Constant', val=1., layer='Conv2d')]) + pretrain_model = VisionTransformer(**pretrain_cfg) + pretrain_model.init_weights() + assert torch.allclose(pretrain_model.patch_embed.projection.weight, + torch.tensor(1.)) + assert pretrain_model.pos_embed.abs().sum() > 0 + + pos_embed_weight = pretrain_model.pos_embed.detach() + tmpdir = tempfile.gettempdir() + checkpoint = os.path.join(tmpdir, 'test.pth') + torch.save(pretrain_model.state_dict(), checkpoint) + + # test load checkpoint + finetune_cfg = dict( + arch='b', + img_size=224, + patch_size=16, + init_cfg=dict(type='Pretrained', checkpoint=checkpoint)) + finetune_model = VisionTransformer(**finetune_cfg) + finetune_model.init_weights() + assert torch.allclose(finetune_model.pos_embed, pos_embed_weight) + + # test load checkpoint with different img_size + finetune_cfg = dict( + arch='b', + img_size=384, + patch_size=16, + init_cfg=dict(type='Pretrained', checkpoint=checkpoint)) + finetune_model = VisionTransformer(**finetune_cfg) + finetune_model.init_weights() + resized_pos_embed = timm_resize_pos_embed(pos_embed_weight, + finetune_model.pos_embed) + assert torch.allclose(finetune_model.pos_embed, resized_pos_embed) + + os.remove(checkpoint) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_classifiers.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_classifiers.py new file mode 100644 index 0000000000..7b5df469d4 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_classifiers.py @@ -0,0 +1,296 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +import tempfile +from copy import deepcopy + +import numpy as np +import pytest +import torch +from mmcv import ConfigDict +from mmcv.runner.base_module import BaseModule + +from mmcls.models import CLASSIFIERS +from mmcls.models.classifiers import ImageClassifier + + +def test_image_classifier(): + model_cfg = dict( + type='ImageClassifier', + backbone=dict( + type='ResNet_CIFAR', + depth=50, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=10, + in_channels=2048, + loss=dict(type='CrossEntropyLoss'))) + + imgs = torch.randn(16, 3, 32, 32) + label = torch.randint(0, 10, (16, )) + + model_cfg_ = deepcopy(model_cfg) + model = CLASSIFIERS.build(model_cfg_) + + # test property + assert model.with_neck + assert model.with_head + + # test train_step + outputs = model.train_step({'img': imgs, 'gt_label': label}, None) + assert outputs['loss'].item() > 0 + assert outputs['num_samples'] == 16 + + # test val_step + outputs = model.val_step({'img': imgs, 'gt_label': label}, None) + assert outputs['loss'].item() > 0 + assert outputs['num_samples'] == 16 + + # test forward + losses = model(imgs, return_loss=True, gt_label=label) + assert losses['loss'].item() > 0 + + # test forward_test + model_cfg_ = deepcopy(model_cfg) + model = CLASSIFIERS.build(model_cfg_) + pred = model(imgs, return_loss=False, img_metas=None) + assert isinstance(pred, list) and len(pred) == 16 + + single_img = torch.randn(1, 3, 32, 32) + pred = model(single_img, return_loss=False, img_metas=None) + assert isinstance(pred, list) and len(pred) == 1 + + # test pretrained + # TODO remove deprecated pretrained + with pytest.warns(UserWarning): + model_cfg_ = deepcopy(model_cfg) + model_cfg_['pretrained'] = 'checkpoint' + model = CLASSIFIERS.build(model_cfg_) + assert model.init_cfg == dict( + type='Pretrained', checkpoint='checkpoint') + + # test show_result + img = np.random.random_integers(0, 255, (224, 224, 3)).astype(np.uint8) + result = dict(pred_class='cat', pred_label=0, pred_score=0.9) + + with tempfile.TemporaryDirectory() as tmpdir: + out_file = osp.join(tmpdir, 'out.png') + model.show_result(img, result, out_file=out_file) + assert osp.exists(out_file) + + with tempfile.TemporaryDirectory() as tmpdir: + out_file = osp.join(tmpdir, 'out.png') + model.show_result(img, result, out_file=out_file) + assert osp.exists(out_file) + + +def test_image_classifier_with_mixup(): + # Test mixup in ImageClassifier + model_cfg = dict( + backbone=dict( + type='ResNet_CIFAR', + depth=50, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='MultiLabelLinearClsHead', + num_classes=10, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0, + use_soft=True)), + train_cfg=dict( + augments=dict( + type='BatchMixup', alpha=1., num_classes=10, prob=1.))) + img_classifier = ImageClassifier(**model_cfg) + img_classifier.init_weights() + imgs = torch.randn(16, 3, 32, 32) + label = torch.randint(0, 10, (16, )) + + losses = img_classifier.forward_train(imgs, label) + assert losses['loss'].item() > 0 + + # Considering BC-breaking + # TODO remove deprecated mixup usage. + model_cfg['train_cfg'] = dict(mixup=dict(alpha=1.0, num_classes=10)) + img_classifier = ImageClassifier(**model_cfg) + img_classifier.init_weights() + imgs = torch.randn(16, 3, 32, 32) + label = torch.randint(0, 10, (16, )) + + losses = img_classifier.forward_train(imgs, label) + assert losses['loss'].item() > 0 + + +def test_image_classifier_with_cutmix(): + + # Test cutmix in ImageClassifier + model_cfg = dict( + backbone=dict( + type='ResNet_CIFAR', + depth=50, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='MultiLabelLinearClsHead', + num_classes=10, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0, + use_soft=True)), + train_cfg=dict( + augments=dict( + type='BatchCutMix', alpha=1., num_classes=10, prob=1.))) + img_classifier = ImageClassifier(**model_cfg) + img_classifier.init_weights() + imgs = torch.randn(16, 3, 32, 32) + label = torch.randint(0, 10, (16, )) + + losses = img_classifier.forward_train(imgs, label) + assert losses['loss'].item() > 0 + + # Considering BC-breaking + # TODO remove deprecated mixup usage. + model_cfg['train_cfg'] = dict( + cutmix=dict(alpha=1.0, num_classes=10, cutmix_prob=1.0)) + img_classifier = ImageClassifier(**model_cfg) + img_classifier.init_weights() + imgs = torch.randn(16, 3, 32, 32) + label = torch.randint(0, 10, (16, )) + + losses = img_classifier.forward_train(imgs, label) + assert losses['loss'].item() > 0 + + +def test_image_classifier_with_augments(): + + imgs = torch.randn(16, 3, 32, 32) + label = torch.randint(0, 10, (16, )) + + # Test cutmix and mixup in ImageClassifier + model_cfg = dict( + backbone=dict( + type='ResNet_CIFAR', + depth=50, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='MultiLabelLinearClsHead', + num_classes=10, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0, + use_soft=True)), + train_cfg=dict(augments=[ + dict(type='BatchCutMix', alpha=1., num_classes=10, prob=0.5), + dict(type='BatchMixup', alpha=1., num_classes=10, prob=0.3), + dict(type='Identity', num_classes=10, prob=0.2) + ])) + img_classifier = ImageClassifier(**model_cfg) + img_classifier.init_weights() + + losses = img_classifier.forward_train(imgs, label) + assert losses['loss'].item() > 0 + + # Test cutmix with cutmix_minmax in ImageClassifier + model_cfg['train_cfg'] = dict( + augments=dict( + type='BatchCutMix', + alpha=1., + num_classes=10, + prob=1., + cutmix_minmax=[0.2, 0.8])) + img_classifier = ImageClassifier(**model_cfg) + img_classifier.init_weights() + + losses = img_classifier.forward_train(imgs, label) + assert losses['loss'].item() > 0 + + # Test not using train_cfg + model_cfg = dict( + backbone=dict( + type='ResNet_CIFAR', + depth=50, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=10, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0))) + img_classifier = ImageClassifier(**model_cfg) + img_classifier.init_weights() + imgs = torch.randn(16, 3, 32, 32) + label = torch.randint(0, 10, (16, )) + + losses = img_classifier.forward_train(imgs, label) + assert losses['loss'].item() > 0 + + # Test not using cutmix and mixup in ImageClassifier + model_cfg['train_cfg'] = dict(augments=None) + img_classifier = ImageClassifier(**model_cfg) + img_classifier.init_weights() + + losses = img_classifier.forward_train(imgs, label) + assert losses['loss'].item() > 0 + + +def test_image_classifier_return_tuple(): + model_cfg = ConfigDict( + type='ImageClassifier', + backbone=dict( + type='ResNet_CIFAR', + depth=50, + num_stages=4, + out_indices=(3, ), + style='pytorch', + return_tuple=False), + head=dict( + type='LinearClsHead', + num_classes=10, + in_channels=2048, + loss=dict(type='CrossEntropyLoss'))) + + imgs = torch.randn(16, 3, 32, 32) + + model_cfg_ = deepcopy(model_cfg) + with pytest.warns(DeprecationWarning): + model = CLASSIFIERS.build(model_cfg_) + + # test backbone return tensor + feat = model.extract_feat(imgs) + assert isinstance(feat, torch.Tensor) + + # test backbone return tuple + model_cfg_ = deepcopy(model_cfg) + model_cfg_.backbone.return_tuple = True + model = CLASSIFIERS.build(model_cfg_) + + feat = model.extract_feat(imgs) + assert isinstance(feat, tuple) + + # test warning if backbone return tensor + class ToyBackbone(BaseModule): + + def __init__(self): + super().__init__() + self.conv = torch.nn.Conv2d(3, 16, 3) + + def forward(self, x): + return self.conv(x) + + model_cfg_ = deepcopy(model_cfg) + model_cfg_.backbone.return_tuple = True + model = CLASSIFIERS.build(model_cfg_) + model.backbone = ToyBackbone() + + with pytest.warns(DeprecationWarning): + model.extract_feat(imgs) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_heads.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_heads.py new file mode 100644 index 0000000000..c5c84e011a --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_heads.py @@ -0,0 +1,152 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from unittest.mock import patch + +import pytest +import torch + +from mmcls.models.heads import (ClsHead, LinearClsHead, MultiLabelClsHead, + MultiLabelLinearClsHead, StackedLinearClsHead, + VisionTransformerClsHead) + + +@pytest.mark.parametrize('feat', [torch.rand(4, 3), (torch.rand(4, 3), )]) +def test_cls_head(feat): + + # test ClsHead with cal_acc=False + head = ClsHead() + fake_gt_label = torch.randint(0, 2, (4, )) + + losses = head.forward_train(feat, fake_gt_label) + assert losses['loss'].item() > 0 + + # test ClsHead with cal_acc=True + head = ClsHead(cal_acc=True) + feat = torch.rand(4, 3) + fake_gt_label = torch.randint(0, 2, (4, )) + + losses = head.forward_train(feat, fake_gt_label) + assert losses['loss'].item() > 0 + + +@pytest.mark.parametrize('feat', [torch.rand(4, 3), (torch.rand(4, 3), )]) +def test_linear_head(feat): + + fake_gt_label = torch.randint(0, 10, (4, )) + + # test LinearClsHead forward + head = LinearClsHead(10, 3) + losses = head.forward_train(feat, fake_gt_label) + assert losses['loss'].item() > 0 + + # test init weights + head = LinearClsHead(10, 3) + head.init_weights() + assert abs(head.fc.weight).sum() > 0 + + # test simple_test + head = LinearClsHead(10, 3) + pred = head.simple_test(feat) + assert isinstance(pred, list) and len(pred) == 4 + + with patch('torch.onnx.is_in_onnx_export', return_value=True): + head = LinearClsHead(10, 3) + pred = head.simple_test(feat) + assert pred.shape == (4, 10) + + +@pytest.mark.parametrize('feat', [torch.rand(4, 3), (torch.rand(4, 3), )]) +def test_multilabel_head(feat): + head = MultiLabelClsHead() + fake_gt_label = torch.randint(0, 2, (4, 3)) + + losses = head.forward_train(feat, fake_gt_label) + assert losses['loss'].item() > 0 + + +@pytest.mark.parametrize('feat', [torch.rand(4, 5), (torch.rand(4, 5), )]) +def test_multilabel_linear_head(feat): + head = MultiLabelLinearClsHead(3, 5) + fake_gt_label = torch.randint(0, 2, (4, 3)) + + head.init_weights() + losses = head.forward_train(feat, fake_gt_label) + assert losses['loss'].item() > 0 + + +@pytest.mark.parametrize('feat', [torch.rand(4, 5), (torch.rand(4, 5), )]) +def test_stacked_linear_cls_head(feat): + # test assertion + with pytest.raises(AssertionError): + StackedLinearClsHead(num_classes=3, in_channels=5, mid_channels=10) + + with pytest.raises(AssertionError): + StackedLinearClsHead(num_classes=-1, in_channels=5, mid_channels=[10]) + + fake_gt_label = torch.randint(0, 2, (4, )) # B, num_classes + + # test forward with default setting + head = StackedLinearClsHead( + num_classes=3, in_channels=5, mid_channels=[10]) + head.init_weights() + + losses = head.forward_train(feat, fake_gt_label) + assert losses['loss'].item() > 0 + + # test simple test + pred = head.simple_test(feat) + assert len(pred) == 4 + + # test simple test in tracing + with patch('torch.onnx.is_in_onnx_export', return_value=True): + pred = head.simple_test(feat) + assert pred.shape == torch.Size((4, 3)) + + # test forward with full function + head = StackedLinearClsHead( + num_classes=3, + in_channels=5, + mid_channels=[8, 10], + dropout_rate=0.2, + norm_cfg=dict(type='BN1d'), + act_cfg=dict(type='HSwish')) + head.init_weights() + + losses = head.forward_train(feat, fake_gt_label) + assert losses['loss'].item() > 0 + + +def test_vit_head(): + fake_features = ([torch.rand(4, 7, 7, 16), torch.rand(4, 100)], ) + fake_gt_label = torch.randint(0, 10, (4, )) + + # test vit head forward + head = VisionTransformerClsHead(10, 100) + losses = head.forward_train(fake_features, fake_gt_label) + assert not hasattr(head.layers, 'pre_logits') + assert not hasattr(head.layers, 'act') + assert losses['loss'].item() > 0 + + # test vit head forward with hidden layer + head = VisionTransformerClsHead(10, 100, hidden_dim=20) + losses = head.forward_train(fake_features, fake_gt_label) + assert hasattr(head.layers, 'pre_logits') and hasattr(head.layers, 'act') + assert losses['loss'].item() > 0 + + # test vit head init_weights + head = VisionTransformerClsHead(10, 100, hidden_dim=20) + head.init_weights() + assert abs(head.layers.pre_logits.weight).sum() > 0 + + # test simple_test + head = VisionTransformerClsHead(10, 100, hidden_dim=20) + pred = head.simple_test(fake_features) + assert isinstance(pred, list) and len(pred) == 4 + + with patch('torch.onnx.is_in_onnx_export', return_value=True): + head = VisionTransformerClsHead(10, 100, hidden_dim=20) + pred = head.simple_test(fake_features) + assert pred.shape == (4, 10) + + # test assertion + with pytest.raises(ValueError): + VisionTransformerClsHead(-1, 100) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_neck.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_neck.py new file mode 100644 index 0000000000..c7c3644366 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_neck.py @@ -0,0 +1,39 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmcls.models.necks import GlobalAveragePooling + + +def test_gap_neck(): + + # test 1d gap_neck + neck = GlobalAveragePooling(dim=1) + # batch_size, num_features, feature_size + fake_input = torch.rand(1, 16, 24) + + output = neck(fake_input) + # batch_size, num_features + assert output.shape == (1, 16) + + # test 1d gap_neck + neck = GlobalAveragePooling(dim=2) + # batch_size, num_features, feature_size(2) + fake_input = torch.rand(1, 16, 24, 24) + + output = neck(fake_input) + # batch_size, num_features + assert output.shape == (1, 16) + + # test 1d gap_neck + neck = GlobalAveragePooling(dim=3) + # batch_size, num_features, feature_size(3) + fake_input = torch.rand(1, 16, 24, 24, 5) + + output = neck(fake_input) + # batch_size, num_features + assert output.shape == (1, 16) + + with pytest.raises(AssertionError): + # dim must in [1, 2, 3] + GlobalAveragePooling(dim='other') diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_utils/test_attention.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_utils/test_attention.py new file mode 100644 index 0000000000..271df90fe7 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_utils/test_attention.py @@ -0,0 +1,178 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import torch + +from mmcls.models.utils.attention import ShiftWindowMSA, WindowMSA + + +def get_relative_position_index(window_size): + """Method from original code of Swin-Transformer.""" + coords_h = torch.arange(window_size[0]) + coords_w = torch.arange(window_size[1]) + coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww + coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww + # 2, Wh*Ww, Wh*Ww + relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] + # Wh*Ww, Wh*Ww, 2 + relative_coords = relative_coords.permute(1, 2, 0).contiguous() + relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0 + relative_coords[:, :, 1] += window_size[1] - 1 + relative_coords[:, :, 0] *= 2 * window_size[1] - 1 + relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww + return relative_position_index + + +def test_window_msa(): + batch_size = 1 + num_windows = (4, 4) + embed_dims = 96 + window_size = (7, 7) + num_heads = 4 + attn = WindowMSA( + embed_dims=embed_dims, window_size=window_size, num_heads=num_heads) + inputs = torch.rand((batch_size * num_windows[0] * num_windows[1], + window_size[0] * window_size[1], embed_dims)) + + # test forward + output = attn(inputs) + assert output.shape == inputs.shape + assert attn.relative_position_bias_table.shape == ( + (2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads) + + # test relative_position_bias_table init + attn.init_weights() + assert abs(attn.relative_position_bias_table).sum() > 0 + + # test non-square window_size + window_size = (6, 7) + attn = WindowMSA( + embed_dims=embed_dims, window_size=window_size, num_heads=num_heads) + inputs = torch.rand((batch_size * num_windows[0] * num_windows[1], + window_size[0] * window_size[1], embed_dims)) + output = attn(inputs) + assert output.shape == inputs.shape + + # test relative_position_index + expected_rel_pos_index = get_relative_position_index(window_size) + assert (attn.relative_position_index == expected_rel_pos_index).all() + + # test qkv_bias=True + attn = WindowMSA( + embed_dims=embed_dims, + window_size=window_size, + num_heads=num_heads, + qkv_bias=True) + assert attn.qkv.bias.shape == (embed_dims * 3, ) + + # test qkv_bias=False + attn = WindowMSA( + embed_dims=embed_dims, + window_size=window_size, + num_heads=num_heads, + qkv_bias=False) + assert attn.qkv.bias is None + + # test default qk_scale + attn = WindowMSA( + embed_dims=embed_dims, + window_size=window_size, + num_heads=num_heads, + qk_scale=None) + head_dims = embed_dims // num_heads + assert np.isclose(attn.scale, head_dims**-0.5) + + # test specified qk_scale + attn = WindowMSA( + embed_dims=embed_dims, + window_size=window_size, + num_heads=num_heads, + qk_scale=0.3) + assert attn.scale == 0.3 + + # test attn_drop + attn = WindowMSA( + embed_dims=embed_dims, + window_size=window_size, + num_heads=num_heads, + attn_drop=1.0) + inputs = torch.rand((batch_size * num_windows[0] * num_windows[1], + window_size[0] * window_size[1], embed_dims)) + # drop all attn output, output shuold be equal to proj.bias + assert torch.allclose(attn(inputs), attn.proj.bias) + + # test prob_drop + attn = WindowMSA( + embed_dims=embed_dims, + window_size=window_size, + num_heads=num_heads, + proj_drop=1.0) + assert (attn(inputs) == 0).all() + + +def test_shift_window_msa(): + batch_size = 1 + embed_dims = 96 + input_resolution = (14, 14) + num_heads = 4 + window_size = 7 + + # test forward + attn = ShiftWindowMSA( + embed_dims=embed_dims, + input_resolution=input_resolution, + num_heads=num_heads, + window_size=window_size) + inputs = torch.rand( + (batch_size, input_resolution[0] * input_resolution[1], embed_dims)) + output = attn(inputs) + assert output.shape == (inputs.shape) + assert attn.w_msa.relative_position_bias_table.shape == ((2 * window_size - + 1)**2, num_heads) + + # test forward with shift_size + attn = ShiftWindowMSA( + embed_dims=embed_dims, + input_resolution=input_resolution, + num_heads=num_heads, + window_size=window_size, + shift_size=1) + output = attn(inputs) + assert output.shape == (inputs.shape) + + # test relative_position_bias_table init + attn.init_weights() + assert abs(attn.w_msa.relative_position_bias_table).sum() > 0 + + # test dropout_layer + attn = ShiftWindowMSA( + embed_dims=embed_dims, + input_resolution=input_resolution, + num_heads=num_heads, + window_size=window_size, + dropout_layer=dict(type='DropPath', drop_prob=0.5)) + torch.manual_seed(0) + output = attn(inputs) + assert (output == 0).all() + + # test auto_pad + input_resolution = (19, 18) + attn = ShiftWindowMSA( + embed_dims=embed_dims, + input_resolution=input_resolution, + num_heads=num_heads, + window_size=window_size, + auto_pad=True) + assert attn.pad_r == 3 + assert attn.pad_b == 2 + + # test small input_resolution + input_resolution = (5, 6) + attn = ShiftWindowMSA( + embed_dims=embed_dims, + input_resolution=input_resolution, + num_heads=num_heads, + window_size=window_size, + shift_size=3, + auto_pad=True) + assert attn.window_size == 5 + assert attn.shift_size == 0 diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_utils/test_augment.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_utils/test_augment.py new file mode 100644 index 0000000000..dd7e1e0bba --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_utils/test_augment.py @@ -0,0 +1,52 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + +from mmcls.models.utils import Augments + + +def test_augments(): + imgs = torch.randn(4, 3, 32, 32) + labels = torch.randint(0, 10, (4, )) + + # Test cutmix + augments_cfg = dict(type='BatchCutMix', alpha=1., num_classes=10, prob=1.) + augs = Augments(augments_cfg) + mixed_imgs, mixed_labels = augs(imgs, labels) + assert mixed_imgs.shape == torch.Size((4, 3, 32, 32)) + assert mixed_labels.shape == torch.Size((4, 10)) + + # Test mixup + augments_cfg = dict(type='BatchMixup', alpha=1., num_classes=10, prob=1.) + augs = Augments(augments_cfg) + mixed_imgs, mixed_labels = augs(imgs, labels) + assert mixed_imgs.shape == torch.Size((4, 3, 32, 32)) + assert mixed_labels.shape == torch.Size((4, 10)) + + # Test cutmixup + augments_cfg = [ + dict(type='BatchCutMix', alpha=1., num_classes=10, prob=0.5), + dict(type='BatchMixup', alpha=1., num_classes=10, prob=0.3) + ] + augs = Augments(augments_cfg) + mixed_imgs, mixed_labels = augs(imgs, labels) + assert mixed_imgs.shape == torch.Size((4, 3, 32, 32)) + assert mixed_labels.shape == torch.Size((4, 10)) + + augments_cfg = [ + dict(type='BatchCutMix', alpha=1., num_classes=10, prob=0.5), + dict(type='BatchMixup', alpha=1., num_classes=10, prob=0.5) + ] + augs = Augments(augments_cfg) + mixed_imgs, mixed_labels = augs(imgs, labels) + assert mixed_imgs.shape == torch.Size((4, 3, 32, 32)) + assert mixed_labels.shape == torch.Size((4, 10)) + + augments_cfg = [ + dict(type='BatchCutMix', alpha=1., num_classes=10, prob=0.5), + dict(type='BatchMixup', alpha=1., num_classes=10, prob=0.3), + dict(type='Identity', num_classes=10, prob=0.2) + ] + augs = Augments(augments_cfg) + mixed_imgs, mixed_labels = augs(imgs, labels) + assert mixed_imgs.shape == torch.Size((4, 3, 32, 32)) + assert mixed_labels.shape == torch.Size((4, 10)) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_utils/test_embed.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_utils/test_embed.py new file mode 100644 index 0000000000..8dba06065d --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_utils/test_embed.py @@ -0,0 +1,83 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmcls.models.backbones import VGG +from mmcls.models.utils import HybridEmbed, PatchEmbed, PatchMerging + + +def cal_unfold_dim(dim, kernel_size, stride, padding=0, dilation=1): + return (dim + 2 * padding - dilation * (kernel_size - 1) - 1) // stride + 1 + + +def test_patch_embed(): + # Test PatchEmbed + patch_embed = PatchEmbed() + img = torch.randn(1, 3, 224, 224) + img = patch_embed(img) + assert img.shape == torch.Size((1, 196, 768)) + + # Test PatchEmbed with stride = 8 + conv_cfg = dict(kernel_size=16, stride=8) + patch_embed = PatchEmbed(conv_cfg=conv_cfg) + img = torch.randn(1, 3, 224, 224) + img = patch_embed(img) + assert img.shape == torch.Size((1, 729, 768)) + + +def test_hybrid_embed(): + # Test VGG11 HybridEmbed + backbone = VGG(11, norm_eval=True) + backbone.init_weights() + patch_embed = HybridEmbed(backbone) + img = torch.randn(1, 3, 224, 224) + img = patch_embed(img) + assert img.shape == torch.Size((1, 49, 768)) + + +def test_patch_merging(): + settings = dict( + input_resolution=(56, 56), in_channels=16, expansion_ratio=2) + downsample = PatchMerging(**settings) + + # test forward with wrong dims + with pytest.raises(AssertionError): + inputs = torch.rand((1, 16, 56 * 56)) + downsample(inputs) + + # test patch merging forward + inputs = torch.rand((1, 56 * 56, 16)) + out = downsample(inputs) + assert downsample.output_resolution == (28, 28) + assert out.shape == (1, 28 * 28, 32) + + # test different kernel_size in each direction + downsample = PatchMerging(kernel_size=(2, 3), **settings) + out = downsample(inputs) + expected_dim = cal_unfold_dim(56, 2, 2) * cal_unfold_dim(56, 3, 3) + assert downsample.sampler.kernel_size == (2, 3) + assert downsample.output_resolution == (cal_unfold_dim(56, 2, 2), + cal_unfold_dim(56, 3, 3)) + assert out.shape == (1, expected_dim, 32) + + # test default stride + downsample = PatchMerging(kernel_size=6, **settings) + assert downsample.sampler.stride == (6, 6) + + # test stride=3 + downsample = PatchMerging(kernel_size=6, stride=3, **settings) + out = downsample(inputs) + assert downsample.sampler.stride == (3, 3) + assert out.shape == (1, cal_unfold_dim(56, 6, stride=3)**2, 32) + + # test padding + downsample = PatchMerging(kernel_size=6, padding=2, **settings) + out = downsample(inputs) + assert downsample.sampler.padding == (2, 2) + assert out.shape == (1, cal_unfold_dim(56, 6, 6, padding=2)**2, 32) + + # test dilation + downsample = PatchMerging(kernel_size=6, dilation=2, **settings) + out = downsample(inputs) + assert downsample.sampler.dilation == (2, 2) + assert out.shape == (1, cal_unfold_dim(56, 6, 6, dilation=2)**2, 32) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_utils/test_inverted_residual.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_utils/test_inverted_residual.py new file mode 100644 index 0000000000..8c363279e2 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_utils/test_inverted_residual.py @@ -0,0 +1,82 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch +from torch.nn.modules import GroupNorm +from torch.nn.modules.batchnorm import _BatchNorm + +from mmcls.models.utils import InvertedResidual, SELayer + + +def is_norm(modules): + """Check if is one of the norms.""" + if isinstance(modules, (GroupNorm, _BatchNorm)): + return True + return False + + +def test_inverted_residual(): + + with pytest.raises(AssertionError): + # stride must be in [1, 2] + InvertedResidual(16, 16, 32, stride=3) + + with pytest.raises(AssertionError): + # se_cfg must be None or dict + InvertedResidual(16, 16, 32, se_cfg=list()) + + # Add expand conv if in_channels and mid_channels is not the same + assert InvertedResidual(32, 16, 32).with_expand_conv is False + assert InvertedResidual(16, 16, 32).with_expand_conv is True + + # Test InvertedResidual forward, stride=1 + block = InvertedResidual(16, 16, 32, stride=1) + x = torch.randn(1, 16, 56, 56) + x_out = block(x) + assert getattr(block, 'se', None) is None + assert block.with_res_shortcut + assert x_out.shape == torch.Size((1, 16, 56, 56)) + + # Test InvertedResidual forward, stride=2 + block = InvertedResidual(16, 16, 32, stride=2) + x = torch.randn(1, 16, 56, 56) + x_out = block(x) + assert not block.with_res_shortcut + assert x_out.shape == torch.Size((1, 16, 28, 28)) + + # Test InvertedResidual forward with se layer + se_cfg = dict(channels=32) + block = InvertedResidual(16, 16, 32, stride=1, se_cfg=se_cfg) + x = torch.randn(1, 16, 56, 56) + x_out = block(x) + assert isinstance(block.se, SELayer) + assert x_out.shape == torch.Size((1, 16, 56, 56)) + + # Test InvertedResidual forward without expand conv + block = InvertedResidual(32, 16, 32) + x = torch.randn(1, 32, 56, 56) + x_out = block(x) + assert getattr(block, 'expand_conv', None) is None + assert x_out.shape == torch.Size((1, 16, 56, 56)) + + # Test InvertedResidual forward with GroupNorm + block = InvertedResidual( + 16, 16, 32, norm_cfg=dict(type='GN', num_groups=2)) + x = torch.randn(1, 16, 56, 56) + x_out = block(x) + for m in block.modules(): + if is_norm(m): + assert isinstance(m, GroupNorm) + assert x_out.shape == torch.Size((1, 16, 56, 56)) + + # Test InvertedResidual forward with HSigmoid + block = InvertedResidual(16, 16, 32, act_cfg=dict(type='HSigmoid')) + x = torch.randn(1, 16, 56, 56) + x_out = block(x) + assert x_out.shape == torch.Size((1, 16, 56, 56)) + + # Test InvertedResidual forward with checkpoint + block = InvertedResidual(16, 16, 32, with_cp=True) + x = torch.randn(1, 16, 56, 56) + x_out = block(x) + assert block.with_cp + assert x_out.shape == torch.Size((1, 16, 56, 56)) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_utils/test_misc.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_utils/test_misc.py new file mode 100644 index 0000000000..f9a9a97ffd --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_utils/test_misc.py @@ -0,0 +1,60 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from distutils.version import LooseVersion + +import pytest +import torch + +from mmcls.models.utils import channel_shuffle, is_tracing, make_divisible + + +def test_make_divisible(): + # test min_value is None + result = make_divisible(34, 8, None) + assert result == 32 + + # test when new_value > min_ratio * value + result = make_divisible(10, 8, min_ratio=0.9) + assert result == 16 + + # test min_value = 0.8 + result = make_divisible(33, 8, min_ratio=0.8) + assert result == 32 + + +def test_channel_shuffle(): + x = torch.randn(1, 24, 56, 56) + with pytest.raises(AssertionError): + # num_channels should be divisible by groups + channel_shuffle(x, 7) + + groups = 3 + batch_size, num_channels, height, width = x.size() + channels_per_group = num_channels // groups + out = channel_shuffle(x, groups) + # test the output value when groups = 3 + for b in range(batch_size): + for c in range(num_channels): + c_out = c % channels_per_group * groups + c // channels_per_group + for i in range(height): + for j in range(width): + assert x[b, c, i, j] == out[b, c_out, i, j] + + +@pytest.mark.skipif( + LooseVersion(torch.__version__) < LooseVersion('1.6.0'), + reason='torch.jit.is_tracing is not available before 1.6.0') +def test_is_tracing(): + + def foo(x): + if is_tracing(): + return x + else: + return x.tolist() + + x = torch.rand(3) + # test without trace + assert isinstance(foo(x), list) + + # test with trace + traced_foo = torch.jit.trace(foo, (torch.rand(1), )) + assert isinstance(traced_foo(x), torch.Tensor) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_utils/test_se.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_utils/test_se.py new file mode 100644 index 0000000000..988db04860 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_models/test_utils/test_se.py @@ -0,0 +1,94 @@ +import pytest +import torch +from torch.nn.modules import GroupNorm +from torch.nn.modules.batchnorm import _BatchNorm + +from mmcls.models.utils import SELayer + + +def is_norm(modules): + """Check if is one of the norms.""" + if isinstance(modules, (GroupNorm, _BatchNorm)): + return True + return False + + +def test_se(): + with pytest.raises(AssertionError): + # base_channels must be a number + SELayer(16, squeeze_channels='32') + + with pytest.raises(AssertionError): + # base_channels must be None or a number larger than 0 + SELayer(16, squeeze_channels=-1) + + with pytest.raises(AssertionError): + # act_cfg must be two dict tuple + SELayer( + 16, + act_cfg=(dict(type='ReLU'), dict(type='Sigmoid'), + dict(type='ReLU'))) + + # Test SELayer forward, channels=64 + input = torch.randn((4, 64, 112, 112)) + se = SELayer(64) + output = se(input) + assert se.conv1.out_channels == 8 + assert se.conv2.in_channels == 8 + assert output.shape == torch.Size((4, 64, 112, 112)) + + # Test SELayer forward, ratio=4 + input = torch.randn((4, 128, 112, 112)) + se = SELayer(128, ratio=4) + output = se(input) + assert se.conv1.out_channels == 32 + assert se.conv2.in_channels == 32 + assert output.shape == torch.Size((4, 128, 112, 112)) + + # Test SELayer forward, channels=54, ratio=4 + # channels cannot be divisible by ratio + input = torch.randn((1, 54, 76, 103)) + se = SELayer(54, ratio=4) + output = se(input) + assert se.conv1.out_channels == 16 + assert se.conv2.in_channels == 16 + assert output.shape == torch.Size((1, 54, 76, 103)) + + # Test SELayer forward, divisor=2 + se = SELayer(54, ratio=4, divisor=2) + output = se(input) + assert se.conv1.out_channels == 14 + assert se.conv2.in_channels == 14 + assert output.shape == torch.Size((1, 54, 76, 103)) + + # Test SELayer forward, squeeze_channels=25 + input = torch.randn((1, 128, 56, 56)) + se = SELayer(128, squeeze_channels=25) + output = se(input) + assert se.conv1.out_channels == 25 + assert se.conv2.in_channels == 25 + assert output.shape == torch.Size((1, 128, 56, 56)) + + # Test SELayer forward, not used ratio and divisor + input = torch.randn((1, 128, 56, 56)) + se = SELayer( + 128, + squeeze_channels=13, + ratio=4, + divisor=8, + ) + output = se(input) + assert se.conv1.out_channels == 13 + assert se.conv2.in_channels == 13 + assert output.shape == torch.Size((1, 128, 56, 56)) + + # Test SELayer with HSigmoid activate layer + input = torch.randn((4, 128, 56, 56)) + se = SELayer( + 128, + squeeze_channels=25, + act_cfg=(dict(type='ReLU'), dict(type='HSigmoid'))) + output = se(input) + assert se.conv1.out_channels == 25 + assert se.conv2.in_channels == 25 + assert output.shape == torch.Size((4, 128, 56, 56)) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_runtime/test_eval_hook.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_runtime/test_eval_hook.py new file mode 100644 index 0000000000..e096414b4b --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_runtime/test_eval_hook.py @@ -0,0 +1,219 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import logging +import tempfile +import warnings +from unittest.mock import MagicMock, patch + +import mmcv.runner +import pytest +import torch +import torch.nn as nn +from mmcv.runner import obj_from_dict +from torch.utils.data import DataLoader, Dataset + +from mmcls.apis import single_gpu_test + +# TODO import eval hooks from mmcv and delete them from mmcls +try: + from mmcv.runner.hooks import EvalHook, DistEvalHook + use_mmcv_hook = True +except ImportError: + warnings.warn('DeprecationWarning: EvalHook and DistEvalHook from mmcls ' + 'will be deprecated.' + 'Please install mmcv through master branch.') + from mmcls.core import EvalHook, DistEvalHook + use_mmcv_hook = False + + +class ExampleDataset(Dataset): + + def __getitem__(self, idx): + results = dict(img=torch.tensor([1]), img_metas=dict()) + return results + + def __len__(self): + return 1 + + +class ExampleModel(nn.Module): + + def __init__(self): + super(ExampleModel, self).__init__() + self.test_cfg = None + self.conv = nn.Conv2d(3, 3, 3) + + def forward(self, img, img_metas, test_mode=False, **kwargs): + return img + + def train_step(self, data_batch, optimizer): + loss = self.forward(**data_batch) + return dict(loss=loss) + + +def test_iter_eval_hook(): + with pytest.raises(TypeError): + test_dataset = ExampleModel() + data_loader = [ + DataLoader( + test_dataset, + batch_size=1, + sampler=None, + num_worker=0, + shuffle=False) + ] + EvalHook(data_loader, by_epoch=False) + + test_dataset = ExampleDataset() + test_dataset.evaluate = MagicMock(return_value=dict(test='success')) + loader = DataLoader(test_dataset, batch_size=1) + model = ExampleModel() + data_loader = DataLoader( + test_dataset, batch_size=1, sampler=None, num_workers=0, shuffle=False) + optim_cfg = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) + optimizer = obj_from_dict(optim_cfg, torch.optim, + dict(params=model.parameters())) + + # test EvalHook + with tempfile.TemporaryDirectory() as tmpdir: + eval_hook = EvalHook(data_loader, by_epoch=False) + runner = mmcv.runner.IterBasedRunner( + model=model, + optimizer=optimizer, + work_dir=tmpdir, + logger=logging.getLogger(), + max_iters=1) + runner.register_hook(eval_hook) + runner.run([loader], [('train', 1)], 1) + test_dataset.evaluate.assert_called_with([torch.tensor([1])], + logger=runner.logger) + + +def test_epoch_eval_hook(): + with pytest.raises(TypeError): + test_dataset = ExampleModel() + data_loader = [ + DataLoader( + test_dataset, + batch_size=1, + sampler=None, + num_worker=0, + shuffle=False) + ] + EvalHook(data_loader, by_epoch=True) + + test_dataset = ExampleDataset() + test_dataset.evaluate = MagicMock(return_value=dict(test='success')) + loader = DataLoader(test_dataset, batch_size=1) + model = ExampleModel() + data_loader = DataLoader( + test_dataset, batch_size=1, sampler=None, num_workers=0, shuffle=False) + optim_cfg = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) + optimizer = obj_from_dict(optim_cfg, torch.optim, + dict(params=model.parameters())) + + # test EvalHook with interval + with tempfile.TemporaryDirectory() as tmpdir: + eval_hook = EvalHook(data_loader, by_epoch=True, interval=2) + runner = mmcv.runner.EpochBasedRunner( + model=model, + optimizer=optimizer, + work_dir=tmpdir, + logger=logging.getLogger(), + max_epochs=2) + runner.register_hook(eval_hook) + runner.run([loader], [('train', 1)]) + test_dataset.evaluate.assert_called_once_with([torch.tensor([1])], + logger=runner.logger) + + +def multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=False): + results = single_gpu_test(model, data_loader) + return results + + +@patch('mmcls.apis.multi_gpu_test', multi_gpu_test) +def test_dist_eval_hook(): + with pytest.raises(TypeError): + test_dataset = ExampleModel() + data_loader = [ + DataLoader( + test_dataset, + batch_size=1, + sampler=None, + num_worker=0, + shuffle=False) + ] + DistEvalHook(data_loader, by_epoch=False) + + test_dataset = ExampleDataset() + test_dataset.evaluate = MagicMock(return_value=dict(test='success')) + loader = DataLoader(test_dataset, batch_size=1) + model = ExampleModel() + data_loader = DataLoader( + test_dataset, batch_size=1, sampler=None, num_workers=0, shuffle=False) + optim_cfg = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) + optimizer = obj_from_dict(optim_cfg, torch.optim, + dict(params=model.parameters())) + + # test DistEvalHook + with tempfile.TemporaryDirectory() as tmpdir: + if use_mmcv_hook: + p = patch('mmcv.engine.multi_gpu_test', multi_gpu_test) + p.start() + eval_hook = DistEvalHook(data_loader, by_epoch=False) + runner = mmcv.runner.IterBasedRunner( + model=model, + optimizer=optimizer, + work_dir=tmpdir, + logger=logging.getLogger(), + max_iters=1) + runner.register_hook(eval_hook) + runner.run([loader], [('train', 1)]) + test_dataset.evaluate.assert_called_with([torch.tensor([1])], + logger=runner.logger) + if use_mmcv_hook: + p.stop() + + +@patch('mmcls.apis.multi_gpu_test', multi_gpu_test) +def test_dist_eval_hook_epoch(): + with pytest.raises(TypeError): + test_dataset = ExampleModel() + data_loader = [ + DataLoader( + test_dataset, + batch_size=1, + sampler=None, + num_worker=0, + shuffle=False) + ] + DistEvalHook(data_loader) + + test_dataset = ExampleDataset() + test_dataset.evaluate = MagicMock(return_value=dict(test='success')) + loader = DataLoader(test_dataset, batch_size=1) + model = ExampleModel() + data_loader = DataLoader( + test_dataset, batch_size=1, sampler=None, num_workers=0, shuffle=False) + optim_cfg = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) + optimizer = obj_from_dict(optim_cfg, torch.optim, + dict(params=model.parameters())) + + # test DistEvalHook + with tempfile.TemporaryDirectory() as tmpdir: + if use_mmcv_hook: + p = patch('mmcv.engine.multi_gpu_test', multi_gpu_test) + p.start() + eval_hook = DistEvalHook(data_loader, by_epoch=True, interval=2) + runner = mmcv.runner.EpochBasedRunner( + model=model, + optimizer=optimizer, + work_dir=tmpdir, + logger=logging.getLogger(), + max_epochs=2) + runner.register_hook(eval_hook) + runner.run([loader], [('train', 1)]) + test_dataset.evaluate.assert_called_with([torch.tensor([1])], + logger=runner.logger) + if use_mmcv_hook: + p.stop() diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_utils/test_version_utils.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_utils/test_version_utils.py new file mode 100644 index 0000000000..f4bb389228 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_utils/test_version_utils.py @@ -0,0 +1,21 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmcls import digit_version + + +def test_digit_version(): + assert digit_version('0.2.16') == (0, 2, 16, 0, 0, 0) + assert digit_version('1.2.3') == (1, 2, 3, 0, 0, 0) + assert digit_version('1.2.3rc0') == (1, 2, 3, 0, -1, 0) + assert digit_version('1.2.3rc1') == (1, 2, 3, 0, -1, 1) + assert digit_version('1.0rc0') == (1, 0, 0, 0, -1, 0) + assert digit_version('1.0') == digit_version('1.0.0') + assert digit_version('1.5.0+cuda90_cudnn7.6.3_lms') == digit_version('1.5') + assert digit_version('1.0.0dev') < digit_version('1.0.0a') + assert digit_version('1.0.0a') < digit_version('1.0.0a1') + assert digit_version('1.0.0a') < digit_version('1.0.0b') + assert digit_version('1.0.0b') < digit_version('1.0.0rc') + assert digit_version('1.0.0rc1') < digit_version('1.0.0') + assert digit_version('1.0.0') < digit_version('1.0.0post') + assert digit_version('1.0.0post') < digit_version('1.0.0post1') + assert digit_version('v1') == (1, 0, 0, 0, 0, 0) + assert digit_version('v1.1.5') == (1, 1, 5, 0, 0, 0) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_utils/test_visualization.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_utils/test_visualization.py new file mode 100644 index 0000000000..8be4719a65 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tests/test_utils/test_visualization.py @@ -0,0 +1,106 @@ +# Copyright (c) Open-MMLab. All rights reserved. +import os +import os.path as osp +import shutil +import tempfile +from unittest.mock import Mock, patch + +import matplotlib.pyplot as plt +import mmcv +import numpy as np +import pytest + +from mmcls.core import visualization as vis + + +def test_color(): + assert vis.color_val_matplotlib(mmcv.Color.blue) == (0., 0., 1.) + assert vis.color_val_matplotlib('green') == (0., 1., 0.) + assert vis.color_val_matplotlib((1, 2, 3)) == (3 / 255, 2 / 255, 1 / 255) + assert vis.color_val_matplotlib(100) == (100 / 255, 100 / 255, 100 / 255) + assert vis.color_val_matplotlib(np.zeros(3, dtype=int)) == (0., 0., 0.) + # forbid white color + with pytest.raises(TypeError): + vis.color_val_matplotlib([255, 255, 255]) + # forbid float + with pytest.raises(TypeError): + vis.color_val_matplotlib(1.0) + # overflowed + with pytest.raises(AssertionError): + vis.color_val_matplotlib((0, 0, 500)) + + +def test_imshow_infos(): + tmp_dir = osp.join(tempfile.gettempdir(), 'image_infos') + tmp_filename = osp.join(tmp_dir, 'image.jpg') + + image = np.ones((10, 10, 3), np.uint8) + result = {'pred_label': 1, 'pred_class': 'bird', 'pred_score': 0.98} + out_image = vis.imshow_infos( + image, result, out_file=tmp_filename, show=False) + assert osp.isfile(tmp_filename) + assert image.shape == out_image.shape + assert not np.allclose(image, out_image) + os.remove(tmp_filename) + + # test grayscale images + image = np.ones((10, 10), np.uint8) + result = {'pred_label': 1, 'pred_class': 'bird', 'pred_score': 0.98} + out_image = vis.imshow_infos( + image, result, out_file=tmp_filename, show=False) + assert osp.isfile(tmp_filename) + assert image.shape == out_image.shape[:2] + os.remove(tmp_filename) + + # test show=True + image = np.ones((10, 10, 3), np.uint8) + result = {'pred_label': 1, 'pred_class': 'bird', 'pred_score': 0.98} + + def mock_blocking_input(self, n=1, timeout=30): + keypress = Mock() + keypress.key = ' ' + out_path = osp.join(tmp_dir, '_'.join([str(n), str(timeout)])) + with open(out_path, 'w') as f: + f.write('test') + return [keypress] + + with patch('matplotlib.blocking_input.BlockingInput.__call__', + mock_blocking_input): + vis.imshow_infos(image, result, show=True, wait_time=5) + assert osp.exists(osp.join(tmp_dir, '1_0')) + + shutil.rmtree(tmp_dir) + + +@patch( + 'matplotlib.blocking_input.BlockingInput.__call__', + return_value=[Mock(key=' ')]) +def test_context_manager(mock_blocking_input): + # test show multiple images with the same figure. + images = [ + np.random.randint(0, 255, (100, 100, 3), np.uint8) for _ in range(5) + ] + result = {'pred_label': 1, 'pred_class': 'bird', 'pred_score': 0.98} + + with vis.ImshowInfosContextManager() as manager: + fig_show = manager.fig_show + fig_save = manager.fig_save + for image in images: + out_image = manager.put_img_infos(image, result, show=True) + assert image.shape == out_image.shape + assert not np.allclose(image, out_image) + assert fig_show is manager.fig_show + assert fig_save is manager.fig_save + + # test rebuild figure if user destroyed it. + with vis.ImshowInfosContextManager() as manager: + fig_save = manager.fig_save + for image in images: + fig_show = manager.fig_show + plt.close(manager.fig_show) + + out_image = manager.put_img_infos(image, result, show=True) + assert image.shape == out_image.shape + assert not np.allclose(image, out_image) + assert not (fig_show is manager.fig_show) + assert fig_save is manager.fig_save diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/analysis_tools/analyze_logs.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/analysis_tools/analyze_logs.py new file mode 100644 index 0000000000..00444e9a3c --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/analysis_tools/analyze_logs.py @@ -0,0 +1,183 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import json +from collections import defaultdict + +import matplotlib.pyplot as plt +import numpy as np +import seaborn as sns + + +def cal_train_time(log_dicts, args): + """Compute the average time per training iteration.""" + for i, log_dict in enumerate(log_dicts): + print(f'{"-" * 5}Analyze train time of {args.json_logs[i]}{"-" * 5}') + all_times = [] + for epoch in log_dict.keys(): + if args.include_outliers: + all_times.append(log_dict[epoch]['time']) + else: + all_times.append(log_dict[epoch]['time'][1:]) + all_times = np.array(all_times) + epoch_ave_time = all_times.mean(-1) + slowest_epoch = epoch_ave_time.argmax() + fastest_epoch = epoch_ave_time.argmin() + std_over_epoch = epoch_ave_time.std() + print(f'slowest epoch {slowest_epoch + 1}, ' + f'average time is {epoch_ave_time[slowest_epoch]:.4f}') + print(f'fastest epoch {fastest_epoch + 1}, ' + f'average time is {epoch_ave_time[fastest_epoch]:.4f}') + print(f'time std over epochs is {std_over_epoch:.4f}') + print(f'average iter time: {np.mean(all_times):.4f} s/iter') + print() + + +def plot_curve(log_dicts, args): + """Plot train metric-iter graph.""" + if args.backend is not None: + plt.switch_backend(args.backend) + sns.set_style(args.style) + # if legend is None, use {filename}_{key} as legend + legend = args.legend + if legend is None: + legend = [] + for json_log in args.json_logs: + for metric in args.keys: + legend.append(f'{json_log}_{metric}') + assert len(legend) == (len(args.json_logs) * len(args.keys)) + metrics = args.keys + + num_metrics = len(metrics) + for i, log_dict in enumerate(log_dicts): + epochs = list(log_dict.keys()) + for j, metric in enumerate(metrics): + print(f'plot curve of {args.json_logs[i]}, metric is {metric}') + if metric not in log_dict[epochs[0]]: + raise KeyError( + f'{args.json_logs[i]} does not contain metric {metric} ' + f'in train mode') + + if 'mAP' in metric: + xs = np.arange(1, max(epochs) + 1) + ys = [] + for epoch in epochs: + ys += log_dict[epoch][metric] + ax = plt.gca() + ax.set_xticks(xs) + plt.xlabel('epoch') + plt.plot(xs, ys, label=legend[i * num_metrics + j], marker='o') + else: + xs = [] + ys = [] + num_iters_per_epoch = log_dict[epochs[0]]['iter'][-1] + for epoch in epochs: + iters = log_dict[epoch]['iter'] + if log_dict[epoch]['mode'][-1] == 'val': + iters = iters[:-1] + xs.append( + np.array(iters) + (epoch - 1) * num_iters_per_epoch) + ys.append(np.array(log_dict[epoch][metric][:len(iters)])) + xs = np.concatenate(xs) + ys = np.concatenate(ys) + plt.xlabel('iter') + plt.plot( + xs, ys, label=legend[i * num_metrics + j], linewidth=0.5) + plt.legend() + if args.title is not None: + plt.title(args.title) + if args.out is None: + plt.show() + else: + print(f'save curve to: {args.out}') + plt.savefig(args.out) + plt.cla() + + +def add_plot_parser(subparsers): + parser_plt = subparsers.add_parser( + 'plot_curve', help='parser for plotting curves') + parser_plt.add_argument( + 'json_logs', + type=str, + nargs='+', + help='path of train log in json format') + parser_plt.add_argument( + '--keys', + type=str, + nargs='+', + default=['loss'], + help='the metric that you want to plot') + parser_plt.add_argument('--title', type=str, help='title of figure') + parser_plt.add_argument( + '--legend', + type=str, + nargs='+', + default=None, + help='legend of each plot') + parser_plt.add_argument( + '--backend', type=str, default=None, help='backend of plt') + parser_plt.add_argument( + '--style', type=str, default='dark', help='style of plt') + parser_plt.add_argument('--out', type=str, default=None) + + +def add_time_parser(subparsers): + parser_time = subparsers.add_parser( + 'cal_train_time', + help='parser for computing the average time per training iteration') + parser_time.add_argument( + 'json_logs', + type=str, + nargs='+', + help='path of train log in json format') + parser_time.add_argument( + '--include-outliers', + action='store_true', + help='include the first value of every epoch when computing ' + 'the average time') + + +def parse_args(): + parser = argparse.ArgumentParser(description='Analyze Json Log') + # currently only support plot curve and calculate average train time + subparsers = parser.add_subparsers(dest='task', help='task parser') + add_plot_parser(subparsers) + add_time_parser(subparsers) + args = parser.parse_args() + return args + + +def load_json_logs(json_logs): + # load and convert json_logs to log_dict, key is epoch, value is a sub dict + # keys of sub dict is different metrics, e.g. memory, bbox_mAP + # value of sub dict is a list of corresponding values of all iterations + log_dicts = [dict() for _ in json_logs] + for json_log, log_dict in zip(json_logs, log_dicts): + with open(json_log, 'r') as log_file: + for line in log_file: + log = json.loads(line.strip()) + # skip lines without `epoch` field + if 'epoch' not in log: + continue + epoch = log.pop('epoch') + if epoch not in log_dict: + log_dict[epoch] = defaultdict(list) + for k, v in log.items(): + log_dict[epoch][k].append(v) + return log_dicts + + +def main(): + args = parse_args() + + json_logs = args.json_logs + for json_log in json_logs: + assert json_log.endswith('.json') + + log_dicts = load_json_logs(json_logs) + + eval(args.task)(log_dicts, args) + + +if __name__ == '__main__': + main() diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/analysis_tools/analyze_results.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/analysis_tools/analyze_results.py new file mode 100644 index 0000000000..9f63439e9c --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/analysis_tools/analyze_results.py @@ -0,0 +1,126 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os.path as osp +import warnings + +import mmcv +from mmcv import DictAction + +from mmcls.datasets import build_dataset +from mmcls.models import build_classifier + + +def parse_args(): + parser = argparse.ArgumentParser( + description='MMCls evaluate prediction success/fail') + parser.add_argument('config', help='test config file path') + parser.add_argument('result', help='test result json/pkl file') + parser.add_argument('--out-dir', help='dir to store output files') + parser.add_argument( + '--topk', + default=20, + type=int, + help='Number of images to select for success/fail') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + parser.add_argument( + '--options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file (deprecate), ' + 'change to --cfg-options instead.') + args = parser.parse_args() + + if args.options and args.cfg_options: + raise ValueError( + '--options and --cfg-options cannot be both ' + 'specified, --options is deprecated in favor of --cfg-options') + if args.options: + warnings.warn('--options is deprecated in favor of --cfg-options') + args.cfg_options = args.options + + return args + + +def save_imgs(result_dir, folder_name, results, model): + full_dir = osp.join(result_dir, folder_name) + mmcv.mkdir_or_exist(full_dir) + mmcv.dump(results, osp.join(full_dir, folder_name + '.json')) + + # save imgs + show_keys = ['pred_score', 'pred_class', 'gt_class'] + for result in results: + result_show = dict((k, v) for k, v in result.items() if k in show_keys) + outfile = osp.join(full_dir, osp.basename(result['filename'])) + model.show_result(result['filename'], result_show, out_file=outfile) + + +def main(): + args = parse_args() + + # load test results + outputs = mmcv.load(args.result) + assert ('pred_score' in outputs and 'pred_class' in outputs + and 'pred_label' in outputs), \ + 'No "pred_label", "pred_score" or "pred_class" in result file, ' \ + 'please set "--out-items" in test.py' + + cfg = mmcv.Config.fromfile(args.config) + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + + model = build_classifier(cfg.model) + + # build the dataloader + dataset = build_dataset(cfg.data.test) + filenames = list() + for info in dataset.data_infos: + if info['img_prefix'] is not None: + filename = osp.join(info['img_prefix'], + info['img_info']['filename']) + else: + filename = info['img_info']['filename'] + filenames.append(filename) + gt_labels = list(dataset.get_gt_labels()) + gt_classes = [dataset.CLASSES[x] for x in gt_labels] + + outputs['filename'] = filenames + outputs['gt_label'] = gt_labels + outputs['gt_class'] = gt_classes + + outputs_list = list() + for i in range(len(gt_labels)): + output = dict() + for k in outputs.keys(): + output[k] = outputs[k][i] + outputs_list.append(output) + + # sort result + outputs_list = sorted(outputs_list, key=lambda x: x['pred_score']) + + success = list() + fail = list() + for output in outputs_list: + if output['pred_label'] == output['gt_label']: + success.append(output) + else: + fail.append(output) + + success = success[:args.topk] + fail = fail[:args.topk] + + save_imgs(args.out_dir, 'success', success, model) + save_imgs(args.out_dir, 'fail', fail, model) + + +if __name__ == '__main__': + main() diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/analysis_tools/eval_metric.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/analysis_tools/eval_metric.py new file mode 100644 index 0000000000..c5a5c7a6d4 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/analysis_tools/eval_metric.py @@ -0,0 +1,75 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse + +import mmcv +from mmcv import Config, DictAction + +from mmcls.datasets import build_dataset + + +def parse_args(): + parser = argparse.ArgumentParser(description='Evaluate metric of the ' + 'results saved in pkl format') + parser.add_argument('config', help='Config of the model') + parser.add_argument('pkl_results', help='Results in pickle format') + parser.add_argument( + '--metrics', + type=str, + nargs='+', + help='Evaluation metrics, which depends on the dataset, e.g., ' + '"accuracy", "precision", "recall" and "support".') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + parser.add_argument( + '--eval-options', + nargs='+', + action=DictAction, + help='custom options for evaluation, the key-value pair in xxx=yyy ' + 'format will be kwargs for dataset.evaluate() function') + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + + outputs = mmcv.load(args.pkl_results) + assert 'class_scores' in outputs, \ + 'No "class_scores" in result file, please set "--out-items" in test.py' + + cfg = Config.fromfile(args.config) + assert args.metrics, ( + 'Please specify at least one metric the argument "--metrics".') + + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + # import modules from string list. + if cfg.get('custom_imports', None): + from mmcv.utils import import_modules_from_strings + import_modules_from_strings(**cfg['custom_imports']) + cfg.data.test.test_mode = True + + dataset = build_dataset(cfg.data.test) + pred_score = outputs['class_scores'] + + kwargs = {} if args.eval_options is None else args.eval_options + eval_kwargs = cfg.get('evaluation', {}).copy() + # hard-code way to remove EvalHook args + for key in [ + 'interval', 'tmpdir', 'start', 'gpu_collect', 'save_best', 'rule' + ]: + eval_kwargs.pop(key, None) + eval_kwargs.update(dict(metric=args.metrics, **kwargs)) + print(dataset.evaluate(pred_score, **eval_kwargs)) + + +if __name__ == '__main__': + main() diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/analysis_tools/get_flops.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/analysis_tools/get_flops.py new file mode 100644 index 0000000000..58683d9823 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/analysis_tools/get_flops.py @@ -0,0 +1,55 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse + +from mmcv import Config +from mmcv.cnn.utils import get_model_complexity_info + +from mmcls.models import build_classifier + + +def parse_args(): + parser = argparse.ArgumentParser(description='Get model flops and params') + parser.add_argument('config', help='config file path') + parser.add_argument( + '--shape', + type=int, + nargs='+', + default=[224, 224], + help='input image size') + args = parser.parse_args() + return args + + +def main(): + + args = parse_args() + + if len(args.shape) == 1: + input_shape = (3, args.shape[0], args.shape[0]) + elif len(args.shape) == 2: + input_shape = (3, ) + tuple(args.shape) + else: + raise ValueError('invalid input shape') + + cfg = Config.fromfile(args.config) + model = build_classifier(cfg.model) + model.eval() + + if hasattr(model, 'extract_feat'): + model.forward = model.extract_feat + else: + raise NotImplementedError( + 'FLOPs counter is currently not currently supported with {}'. + format(model.__class__.__name__)) + + flops, params = get_model_complexity_info(model, input_shape) + split_line = '=' * 30 + print(f'{split_line}\nInput shape: {input_shape}\n' + f'Flops: {flops}\nParams: {params}\n{split_line}') + print('!!!Please be cautious if you use the results in papers. ' + 'You may need to check if all ops are supported and verify that the ' + 'flops computation is correct.') + + +if __name__ == '__main__': + main() diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/convert_models/mobilenetv2_to_mmcls.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/convert_models/mobilenetv2_to_mmcls.py new file mode 100644 index 0000000000..7f6654eda7 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/convert_models/mobilenetv2_to_mmcls.py @@ -0,0 +1,135 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +from collections import OrderedDict + +import torch + + +def convert_conv1(model_key, model_weight, state_dict, converted_names): + if model_key.find('features.0.0') >= 0: + new_key = model_key.replace('features.0.0', 'backbone.conv1.conv') + else: + new_key = model_key.replace('features.0.1', 'backbone.conv1.bn') + state_dict[new_key] = model_weight + converted_names.add(model_key) + print(f'Convert {model_key} to {new_key}') + + +def convert_conv5(model_key, model_weight, state_dict, converted_names): + if model_key.find('features.18.0') >= 0: + new_key = model_key.replace('features.18.0', 'backbone.conv2.conv') + else: + new_key = model_key.replace('features.18.1', 'backbone.conv2.bn') + state_dict[new_key] = model_weight + converted_names.add(model_key) + print(f'Convert {model_key} to {new_key}') + + +def convert_head(model_key, model_weight, state_dict, converted_names): + new_key = model_key.replace('classifier.1', 'head.fc') + state_dict[new_key] = model_weight + converted_names.add(model_key) + print(f'Convert {model_key} to {new_key}') + + +def convert_block(model_key, model_weight, state_dict, converted_names): + split_keys = model_key.split('.') + layer_id = int(split_keys[1]) + new_layer_id = 0 + sub_id = 0 + if layer_id == 1: + new_layer_id = 1 + sub_id = 0 + elif layer_id in range(2, 4): + new_layer_id = 2 + sub_id = layer_id - 2 + elif layer_id in range(4, 7): + new_layer_id = 3 + sub_id = layer_id - 4 + elif layer_id in range(7, 11): + new_layer_id = 4 + sub_id = layer_id - 7 + elif layer_id in range(11, 14): + new_layer_id = 5 + sub_id = layer_id - 11 + elif layer_id in range(14, 17): + new_layer_id = 6 + sub_id = layer_id - 14 + elif layer_id == 17: + new_layer_id = 7 + sub_id = 0 + + new_key = model_key.replace(f'features.{layer_id}', + f'backbone.layer{new_layer_id}.{sub_id}') + if new_layer_id == 1: + if new_key.find('conv.0.0') >= 0: + new_key = new_key.replace('conv.0.0', 'conv.0.conv') + elif new_key.find('conv.0.1') >= 0: + new_key = new_key.replace('conv.0.1', 'conv.0.bn') + elif new_key.find('conv.1') >= 0: + new_key = new_key.replace('conv.1', 'conv.1.conv') + elif new_key.find('conv.2') >= 0: + new_key = new_key.replace('conv.2', 'conv.1.bn') + else: + raise ValueError(f'Unsupported conversion of key {model_key}') + else: + if new_key.find('conv.0.0') >= 0: + new_key = new_key.replace('conv.0.0', 'conv.0.conv') + elif new_key.find('conv.0.1') >= 0: + new_key = new_key.replace('conv.0.1', 'conv.0.bn') + elif new_key.find('conv.1.0') >= 0: + new_key = new_key.replace('conv.1.0', 'conv.1.conv') + elif new_key.find('conv.1.1') >= 0: + new_key = new_key.replace('conv.1.1', 'conv.1.bn') + elif new_key.find('conv.2') >= 0: + new_key = new_key.replace('conv.2', 'conv.2.conv') + elif new_key.find('conv.3') >= 0: + new_key = new_key.replace('conv.3', 'conv.2.bn') + else: + raise ValueError(f'Unsupported conversion of key {model_key}') + print(f'Convert {model_key} to {new_key}') + state_dict[new_key] = model_weight + converted_names.add(model_key) + + +def convert(src, dst): + """Convert keys in torchvision pretrained MobileNetV2 models to mmcls + style.""" + + # load pytorch model + blobs = torch.load(src, map_location='cpu') + + # convert to pytorch style + state_dict = OrderedDict() + converted_names = set() + + for key, weight in blobs.items(): + if 'features.0' in key: + convert_conv1(key, weight, state_dict, converted_names) + elif 'classifier' in key: + convert_head(key, weight, state_dict, converted_names) + elif 'features.18' in key: + convert_conv5(key, weight, state_dict, converted_names) + else: + convert_block(key, weight, state_dict, converted_names) + + # check if all layers are converted + for key in blobs: + if key not in converted_names: + print(f'not converted: {key}') + # save checkpoint + checkpoint = dict() + checkpoint['state_dict'] = state_dict + torch.save(checkpoint, dst) + + +def main(): + parser = argparse.ArgumentParser(description='Convert model keys') + parser.add_argument('src', help='src detectron model path') + parser.add_argument('dst', help='save path') + args = parser.parse_args() + convert(args.src, args.dst) + + +if __name__ == '__main__': + main() diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/convert_models/publish_model.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/convert_models/publish_model.py new file mode 100644 index 0000000000..a80f3e2964 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/convert_models/publish_model.py @@ -0,0 +1,55 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import datetime +import subprocess +from pathlib import Path + +import torch +from mmcv import digit_version + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Process a checkpoint to be published') + parser.add_argument('in_file', help='input checkpoint filename') + parser.add_argument('out_file', help='output checkpoint filename') + args = parser.parse_args() + return args + + +def process_checkpoint(in_file, out_file): + checkpoint = torch.load(in_file, map_location='cpu') + # remove optimizer for smaller file size + if 'optimizer' in checkpoint: + del checkpoint['optimizer'] + # if it is necessary to remove some sensitive data in checkpoint['meta'], + # add the code here. + if digit_version(torch.__version__) >= digit_version('1.6'): + torch.save(checkpoint, out_file, _use_new_zipfile_serialization=False) + else: + torch.save(checkpoint, out_file) + + sha = subprocess.check_output(['sha256sum', out_file]).decode() + if out_file.endswith('.pth'): + out_file_name = out_file[:-4] + else: + out_file_name = out_file + + current_date = datetime.datetime.now().strftime('%Y%m%d') + final_file = out_file_name + f'_{current_date}-{sha[:8]}.pth' + subprocess.Popen(['mv', out_file, final_file]) + + print(f'Successfully generated the publish-ckpt as {final_file}.') + + +def main(): + args = parse_args() + out_dir = Path(args.out_file).parent + if not out_dir.exists(): + raise ValueError(f'Directory {out_dir} does not exist, ' + 'please generate it manually.') + process_checkpoint(args.in_file, args.out_file) + + +if __name__ == '__main__': + main() diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/convert_models/reparameterize_repvgg.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/convert_models/reparameterize_repvgg.py new file mode 100644 index 0000000000..0eb7b203b4 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/convert_models/reparameterize_repvgg.py @@ -0,0 +1,46 @@ +import argparse +from pathlib import Path + +import torch + +from mmcls.apis import init_model + + +def convert_repvggblock_param(config_path, checkpoint_path, save_path): + model = init_model(config_path, checkpoint=checkpoint_path) + print('Converting...') + + model.backbone.switch_to_deploy() + torch.save(model.state_dict(), save_path) + + print('Done! Save at path "{}"'.format(save_path)) + + +def main(): + parser = argparse.ArgumentParser( + description='Convert the parameters of the repvgg block ' + 'from training mode to deployment mode.') + parser.add_argument( + 'config_path', + help='The path to the configuration file of the network ' + 'containing the repvgg block.') + parser.add_argument( + 'checkpoint_path', + help='The path to the checkpoint file corresponding to the model.') + parser.add_argument( + 'save_path', + help='The path where the converted checkpoint file is stored.') + args = parser.parse_args() + + save_path = Path(args.save_path) + if save_path.suffix != '.pth': + print('The path should contain the name of the pth format file.') + exit() + save_path.parent.mkdir(parents=True, exist_ok=True) + + convert_repvggblock_param(args.config_path, args.checkpoint_path, + args.save_path) + + +if __name__ == '__main__': + main() diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/convert_models/repvgg_to_mmcls.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/convert_models/repvgg_to_mmcls.py new file mode 100644 index 0000000000..fb28101083 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/convert_models/repvgg_to_mmcls.py @@ -0,0 +1,59 @@ +import argparse +from collections import OrderedDict +from pathlib import Path + +import torch + + +def convert(src, dst): + print('Converting...') + blobs = torch.load(src, map_location='cpu') + converted_state_dict = OrderedDict() + + for key in blobs: + splited_key = key.split('.') + splited_key = ['norm' if i == 'bn' else i for i in splited_key] + splited_key = [ + 'branch_norm' if i == 'rbr_identity' else i for i in splited_key + ] + splited_key = [ + 'branch_1x1' if i == 'rbr_1x1' else i for i in splited_key + ] + splited_key = [ + 'branch_3x3' if i == 'rbr_dense' else i for i in splited_key + ] + splited_key = [ + 'backbone.stem' if i[:6] == 'stage0' else i for i in splited_key + ] + splited_key = [ + 'backbone.stage_' + i[5] if i[:5] == 'stage' else i + for i in splited_key + ] + splited_key = ['se_layer' if i == 'se' else i for i in splited_key] + splited_key = ['conv1.conv' if i == 'down' else i for i in splited_key] + splited_key = ['conv2.conv' if i == 'up' else i for i in splited_key] + splited_key = ['head.fc' if i == 'linear' else i for i in splited_key] + new_key = '.'.join(splited_key) + converted_state_dict[new_key] = blobs[key] + + torch.save(converted_state_dict, dst) + print('Done!') + + +def main(): + parser = argparse.ArgumentParser(description='Convert model keys') + parser.add_argument('src', help='src detectron model path') + parser.add_argument('dst', help='save path') + args = parser.parse_args() + + dst = Path(args.dst) + if dst.suffix != '.pth': + print('The path should contain the name of the pth format file.') + exit() + dst.parent.mkdir(parents=True, exist_ok=True) + + convert(args.src, args.dst) + + +if __name__ == '__main__': + main() diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/convert_models/shufflenetv2_to_mmcls.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/convert_models/shufflenetv2_to_mmcls.py new file mode 100644 index 0000000000..69046c364c --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/convert_models/shufflenetv2_to_mmcls.py @@ -0,0 +1,113 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +from collections import OrderedDict + +import torch + + +def convert_conv1(model_key, model_weight, state_dict, converted_names): + if model_key.find('conv1.0') >= 0: + new_key = model_key.replace('conv1.0', 'backbone.conv1.conv') + else: + new_key = model_key.replace('conv1.1', 'backbone.conv1.bn') + state_dict[new_key] = model_weight + converted_names.add(model_key) + print(f'Convert {model_key} to {new_key}') + + +def convert_conv5(model_key, model_weight, state_dict, converted_names): + if model_key.find('conv5.0') >= 0: + new_key = model_key.replace('conv5.0', 'backbone.layers.3.conv') + else: + new_key = model_key.replace('conv5.1', 'backbone.layers.3.bn') + state_dict[new_key] = model_weight + converted_names.add(model_key) + print(f'Convert {model_key} to {new_key}') + + +def convert_head(model_key, model_weight, state_dict, converted_names): + new_key = model_key.replace('fc', 'head.fc') + state_dict[new_key] = model_weight + converted_names.add(model_key) + print(f'Convert {model_key} to {new_key}') + + +def convert_block(model_key, model_weight, state_dict, converted_names): + split_keys = model_key.split('.') + layer, block, branch = split_keys[:3] + layer_id = int(layer[-1]) - 2 + new_key = model_key.replace(layer, f'backbone.layers.{layer_id}') + + if branch == 'branch1': + if new_key.find('branch1.0') >= 0: + new_key = new_key.replace('branch1.0', 'branch1.0.conv') + elif new_key.find('branch1.1') >= 0: + new_key = new_key.replace('branch1.1', 'branch1.0.bn') + elif new_key.find('branch1.2') >= 0: + new_key = new_key.replace('branch1.2', 'branch1.1.conv') + elif new_key.find('branch1.3') >= 0: + new_key = new_key.replace('branch1.3', 'branch1.1.bn') + elif branch == 'branch2': + + if new_key.find('branch2.0') >= 0: + new_key = new_key.replace('branch2.0', 'branch2.0.conv') + elif new_key.find('branch2.1') >= 0: + new_key = new_key.replace('branch2.1', 'branch2.0.bn') + elif new_key.find('branch2.3') >= 0: + new_key = new_key.replace('branch2.3', 'branch2.1.conv') + elif new_key.find('branch2.4') >= 0: + new_key = new_key.replace('branch2.4', 'branch2.1.bn') + elif new_key.find('branch2.5') >= 0: + new_key = new_key.replace('branch2.5', 'branch2.2.conv') + elif new_key.find('branch2.6') >= 0: + new_key = new_key.replace('branch2.6', 'branch2.2.bn') + else: + raise ValueError(f'Unsupported conversion of key {model_key}') + else: + raise ValueError(f'Unsupported conversion of key {model_key}') + print(f'Convert {model_key} to {new_key}') + state_dict[new_key] = model_weight + converted_names.add(model_key) + + +def convert(src, dst): + """Convert keys in torchvision pretrained ShuffleNetV2 models to mmcls + style.""" + + # load pytorch model + blobs = torch.load(src, map_location='cpu') + + # convert to pytorch style + state_dict = OrderedDict() + converted_names = set() + + for key, weight in blobs.items(): + if 'conv1' in key: + convert_conv1(key, weight, state_dict, converted_names) + elif 'fc' in key: + convert_head(key, weight, state_dict, converted_names) + elif key.startswith('s'): + convert_block(key, weight, state_dict, converted_names) + elif 'conv5' in key: + convert_conv5(key, weight, state_dict, converted_names) + + # check if all layers are converted + for key in blobs: + if key not in converted_names: + print(f'not converted: {key}') + # save checkpoint + checkpoint = dict() + checkpoint['state_dict'] = state_dict + torch.save(checkpoint, dst) + + +def main(): + parser = argparse.ArgumentParser(description='Convert model keys') + parser.add_argument('src', help='src detectron model path') + parser.add_argument('dst', help='save path') + args = parser.parse_args() + convert(args.src, args.dst) + + +if __name__ == '__main__': + main() diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/convert_models/vgg_to_mmcls.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/convert_models/vgg_to_mmcls.py new file mode 100644 index 0000000000..56f5caf401 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/convert_models/vgg_to_mmcls.py @@ -0,0 +1,117 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os +from collections import OrderedDict + +import torch + + +def get_layer_maps(layer_num, with_bn): + layer_maps = {'conv': {}, 'bn': {}} + if with_bn: + if layer_num == 11: + layer_idxs = [0, 4, 8, 11, 15, 18, 22, 25] + elif layer_num == 13: + layer_idxs = [0, 3, 7, 10, 14, 17, 21, 24, 28, 31] + elif layer_num == 16: + layer_idxs = [0, 3, 7, 10, 14, 17, 20, 24, 27, 30, 34, 37, 40] + elif layer_num == 19: + layer_idxs = [ + 0, 3, 7, 10, 14, 17, 20, 23, 27, 30, 33, 36, 40, 43, 46, 49 + ] + else: + raise ValueError(f'Invalid number of layers: {layer_num}') + for i, layer_idx in enumerate(layer_idxs): + if i == 0: + new_layer_idx = layer_idx + else: + new_layer_idx += int((layer_idx - layer_idxs[i - 1]) / 2) + layer_maps['conv'][layer_idx] = new_layer_idx + layer_maps['bn'][layer_idx + 1] = new_layer_idx + else: + if layer_num == 11: + layer_idxs = [0, 3, 6, 8, 11, 13, 16, 18] + new_layer_idxs = [0, 2, 4, 5, 7, 8, 10, 11] + elif layer_num == 13: + layer_idxs = [0, 2, 5, 7, 10, 12, 15, 17, 20, 22] + new_layer_idxs = [0, 1, 3, 4, 6, 7, 9, 10, 12, 13] + elif layer_num == 16: + layer_idxs = [0, 2, 5, 7, 10, 12, 14, 17, 19, 21, 24, 26, 28] + new_layer_idxs = [0, 1, 3, 4, 6, 7, 8, 10, 11, 12, 14, 15, 16] + elif layer_num == 19: + layer_idxs = [ + 0, 2, 5, 7, 10, 12, 14, 16, 19, 21, 23, 25, 28, 30, 32, 34 + ] + new_layer_idxs = [ + 0, 1, 3, 4, 6, 7, 8, 9, 11, 12, 13, 14, 16, 17, 18, 19 + ] + else: + raise ValueError(f'Invalid number of layers: {layer_num}') + + layer_maps['conv'] = { + layer_idx: new_layer_idx + for layer_idx, new_layer_idx in zip(layer_idxs, new_layer_idxs) + } + + return layer_maps + + +def convert(src, dst, layer_num, with_bn=False): + """Convert keys in torchvision pretrained VGG models to mmcls style.""" + + # load pytorch model + assert os.path.isfile(src), f'no checkpoint found at {src}' + blobs = torch.load(src, map_location='cpu') + + # convert to pytorch style + state_dict = OrderedDict() + + layer_maps = get_layer_maps(layer_num, with_bn) + + prefix = 'backbone' + delimiter = '.' + for key, weight in blobs.items(): + if 'features' in key: + module, layer_idx, weight_type = key.split(delimiter) + new_key = delimiter.join([prefix, key]) + layer_idx = int(layer_idx) + for layer_key, maps in layer_maps.items(): + if layer_idx in maps: + new_layer_idx = maps[layer_idx] + new_key = delimiter.join([ + prefix, 'features', + str(new_layer_idx), layer_key, weight_type + ]) + state_dict[new_key] = weight + print(f'Convert {key} to {new_key}') + elif 'classifier' in key: + new_key = delimiter.join([prefix, key]) + state_dict[new_key] = weight + print(f'Convert {key} to {new_key}') + else: + state_dict[key] = weight + + # save checkpoint + checkpoint = dict() + checkpoint['state_dict'] = state_dict + torch.save(checkpoint, dst) + + +def main(): + parser = argparse.ArgumentParser(description='Convert model keys') + parser.add_argument('src', help='src torchvision model path') + parser.add_argument('dst', help='save path') + parser.add_argument( + '--bn', action='store_true', help='whether original vgg has BN') + parser.add_argument( + '--layer_num', + type=int, + choices=[11, 13, 16, 19], + default=11, + help='number of VGG layers') + args = parser.parse_args() + convert(args.src, args.dst, layer_num=args.layer_num, with_bn=args.bn) + + +if __name__ == '__main__': + main() diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/deployment/mmcls2torchserve.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/deployment/mmcls2torchserve.py new file mode 100644 index 0000000000..b4ab14d8e8 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/deployment/mmcls2torchserve.py @@ -0,0 +1,111 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from argparse import ArgumentParser, Namespace +from pathlib import Path +from tempfile import TemporaryDirectory + +import mmcv + +try: + from model_archiver.model_packaging import package_model + from model_archiver.model_packaging_utils import ModelExportUtils +except ImportError: + package_model = None + + +def mmcls2torchserve( + config_file: str, + checkpoint_file: str, + output_folder: str, + model_name: str, + model_version: str = '1.0', + force: bool = False, +): + """Converts mmclassification model (config + checkpoint) to TorchServe + `.mar`. + + Args: + config_file: + In MMClassification config format. + The contents vary for each task repository. + checkpoint_file: + In MMClassification checkpoint format. + The contents vary for each task repository. + output_folder: + Folder where `{model_name}.mar` will be created. + The file created will be in TorchServe archive format. + model_name: + If not None, used for naming the `{model_name}.mar` file + that will be created under `output_folder`. + If None, `{Path(checkpoint_file).stem}` will be used. + model_version: + Model's version. + force: + If True, if there is an existing `{model_name}.mar` + file under `output_folder` it will be overwritten. + """ + mmcv.mkdir_or_exist(output_folder) + + config = mmcv.Config.fromfile(config_file) + + with TemporaryDirectory() as tmpdir: + config.dump(f'{tmpdir}/config.py') + + args = Namespace( + **{ + 'model_file': f'{tmpdir}/config.py', + 'serialized_file': checkpoint_file, + 'handler': f'{Path(__file__).parent}/mmcls_handler.py', + 'model_name': model_name or Path(checkpoint_file).stem, + 'version': model_version, + 'export_path': output_folder, + 'force': force, + 'requirements_file': None, + 'extra_files': None, + 'runtime': 'python', + 'archive_format': 'default' + }) + manifest = ModelExportUtils.generate_manifest_json(args) + package_model(args, manifest) + + +def parse_args(): + parser = ArgumentParser( + description='Convert mmcls models to TorchServe `.mar` format.') + parser.add_argument('config', type=str, help='config file path') + parser.add_argument('checkpoint', type=str, help='checkpoint file path') + parser.add_argument( + '--output-folder', + type=str, + required=True, + help='Folder where `{model_name}.mar` will be created.') + parser.add_argument( + '--model-name', + type=str, + default=None, + help='If not None, used for naming the `{model_name}.mar`' + 'file that will be created under `output_folder`.' + 'If None, `{Path(checkpoint_file).stem}` will be used.') + parser.add_argument( + '--model-version', + type=str, + default='1.0', + help='Number used for versioning.') + parser.add_argument( + '-f', + '--force', + action='store_true', + help='overwrite the existing `{model_name}.mar`') + args = parser.parse_args() + + return args + + +if __name__ == '__main__': + args = parse_args() + + if package_model is None: + raise ImportError('`torch-model-archiver` is required.' + 'Try: pip install torch-model-archiver') + + mmcls2torchserve(args.config, args.checkpoint, args.output_folder, + args.model_name, args.model_version, args.force) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/deployment/mmcls_handler.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/deployment/mmcls_handler.py new file mode 100644 index 0000000000..68815e96de --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/deployment/mmcls_handler.py @@ -0,0 +1,51 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import base64 +import os + +import mmcv +import torch +from ts.torch_handler.base_handler import BaseHandler + +from mmcls.apis import inference_model, init_model + + +class MMclsHandler(BaseHandler): + + def initialize(self, context): + properties = context.system_properties + self.map_location = 'cuda' if torch.cuda.is_available() else 'cpu' + self.device = torch.device(self.map_location + ':' + + str(properties.get('gpu_id')) if torch.cuda. + is_available() else self.map_location) + self.manifest = context.manifest + + model_dir = properties.get('model_dir') + serialized_file = self.manifest['model']['serializedFile'] + checkpoint = os.path.join(model_dir, serialized_file) + self.config_file = os.path.join(model_dir, 'config.py') + + self.model = init_model(self.config_file, checkpoint, self.device) + self.initialized = True + + def preprocess(self, data): + images = [] + + for row in data: + image = row.get('data') or row.get('body') + if isinstance(image, str): + image = base64.b64decode(image) + image = mmcv.imfrombytes(image) + images.append(image) + + return images + + def inference(self, data, *args, **kwargs): + results = [] + for image in data: + results.append(inference_model(self.model, image)) + return results + + def postprocess(self, data): + for result in data: + result['pred_label'] = int(result['pred_label']) + return data diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/deployment/onnx2tensorrt.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/deployment/onnx2tensorrt.py new file mode 100644 index 0000000000..489364e2d1 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/deployment/onnx2tensorrt.py @@ -0,0 +1,142 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os +import os.path as osp + +import numpy as np + + +def get_GiB(x: int): + """return x GiB.""" + return x * (1 << 30) + + +def onnx2tensorrt(onnx_file, + trt_file, + input_shape, + max_batch_size, + fp16_mode=False, + verify=False, + workspace_size=1): + """Create tensorrt engine from onnx model. + + Args: + onnx_file (str): Filename of the input ONNX model file. + trt_file (str): Filename of the output TensorRT engine file. + input_shape (list[int]): Input shape of the model. + eg [1, 3, 224, 224]. + max_batch_size (int): Max batch size of the model. + verify (bool, optional): Whether to verify the converted model. + Defaults to False. + workspace_size (int, optional): Maximum workspace of GPU. + Defaults to 1. + """ + import onnx + from mmcv.tensorrt import TRTWraper, onnx2trt, save_trt_engine + + onnx_model = onnx.load(onnx_file) + # create trt engine and wrapper + assert max_batch_size >= 1 + max_shape = [max_batch_size] + list(input_shape[1:]) + opt_shape_dict = {'input': [input_shape, input_shape, max_shape]} + max_workspace_size = get_GiB(workspace_size) + trt_engine = onnx2trt( + onnx_model, + opt_shape_dict, + fp16_mode=fp16_mode, + max_workspace_size=max_workspace_size) + save_dir, _ = osp.split(trt_file) + if save_dir: + os.makedirs(save_dir, exist_ok=True) + save_trt_engine(trt_engine, trt_file) + print(f'Successfully created TensorRT engine: {trt_file}') + + if verify: + import torch + import onnxruntime as ort + + input_img = torch.randn(*input_shape) + input_img_cpu = input_img.detach().cpu().numpy() + input_img_cuda = input_img.cuda() + + # Get results from ONNXRuntime + session_options = ort.SessionOptions() + sess = ort.InferenceSession(onnx_file, session_options) + + # get input and output names + input_names = [_.name for _ in sess.get_inputs()] + output_names = [_.name for _ in sess.get_outputs()] + + onnx_outputs = sess.run(None, { + input_names[0]: input_img_cpu, + }) + + # Get results from TensorRT + trt_model = TRTWraper(trt_file, input_names, output_names) + with torch.no_grad(): + trt_outputs = trt_model({input_names[0]: input_img_cuda}) + trt_outputs = [ + trt_outputs[_].detach().cpu().numpy() for _ in output_names + ] + + # Compare results + np.testing.assert_allclose( + onnx_outputs[0], trt_outputs[0], rtol=1e-05, atol=1e-05) + print('The numerical values are the same ' + + 'between ONNXRuntime and TensorRT') + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Convert MMClassification models from ONNX to TensorRT') + parser.add_argument('model', help='Filename of the input ONNX model') + parser.add_argument( + '--trt-file', + type=str, + default='tmp.trt', + help='Filename of the output TensorRT engine') + parser.add_argument( + '--verify', + action='store_true', + help='Verify the outputs of ONNXRuntime and TensorRT') + parser.add_argument( + '--shape', + type=int, + nargs='+', + default=[224, 224], + help='Input size of the model') + parser.add_argument( + '--max-batch-size', + type=int, + default=1, + help='Maximum batch size of TensorRT model.') + parser.add_argument('--fp16', action='store_true', help='Enable fp16 mode') + parser.add_argument( + '--workspace-size', + type=int, + default=1, + help='Max workspace size of GPU in GiB') + args = parser.parse_args() + return args + + +if __name__ == '__main__': + + args = parse_args() + + if len(args.shape) == 1: + input_shape = (1, 3, args.shape[0], args.shape[0]) + elif len(args.shape) == 2: + input_shape = (1, 3) + tuple(args.shape) + else: + raise ValueError('invalid input shape') + + # Create TensorRT engine + onnx2tensorrt( + args.model, + args.trt_file, + input_shape, + args.max_batch_size, + fp16_mode=args.fp16, + verify=args.verify, + workspace_size=args.workspace_size) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/deployment/pytorch2onnx.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/deployment/pytorch2onnx.py new file mode 100644 index 0000000000..adb612d8f8 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/deployment/pytorch2onnx.py @@ -0,0 +1,233 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +from functools import partial + +import mmcv +import numpy as np +import onnxruntime as rt +import torch +from mmcv.onnx import register_extra_symbolics +from mmcv.runner import load_checkpoint + +from mmcls.models import build_classifier + +torch.manual_seed(3) + + +def _demo_mm_inputs(input_shape, num_classes): + """Create a superset of inputs needed to run test or train batches. + + Args: + input_shape (tuple): + input batch dimensions + num_classes (int): + number of semantic classes + """ + (N, C, H, W) = input_shape + rng = np.random.RandomState(0) + imgs = rng.rand(*input_shape) + gt_labels = rng.randint( + low=0, high=num_classes, size=(N, 1)).astype(np.uint8) + mm_inputs = { + 'imgs': torch.FloatTensor(imgs).requires_grad_(True), + 'gt_labels': torch.LongTensor(gt_labels), + } + return mm_inputs + + +def pytorch2onnx(model, + input_shape, + opset_version=11, + dynamic_export=False, + show=False, + output_file='tmp.onnx', + do_simplify=False, + verify=False): + """Export Pytorch model to ONNX model and verify the outputs are same + between Pytorch and ONNX. + + Args: + model (nn.Module): Pytorch model we want to export. + input_shape (tuple): Use this input shape to construct + the corresponding dummy input and execute the model. + opset_version (int): The onnx op version. Default: 11. + show (bool): Whether print the computation graph. Default: False. + output_file (string): The path to where we store the output ONNX model. + Default: `tmp.onnx`. + verify (bool): Whether compare the outputs between Pytorch and ONNX. + Default: False. + """ + model.cpu().eval() + + if hasattr(model.head, 'num_classes'): + num_classes = model.head.num_classes + # Some backbones use `num_classes=-1` to disable top classifier. + elif getattr(model.backbone, 'num_classes', -1) > 0: + num_classes = model.backbone.num_classes + else: + raise AttributeError('Cannot find "num_classes" in both head and ' + 'backbone, please check the config file.') + + mm_inputs = _demo_mm_inputs(input_shape, num_classes) + + imgs = mm_inputs.pop('imgs') + img_list = [img[None, :] for img in imgs] + + # replace original forward function + origin_forward = model.forward + model.forward = partial(model.forward, img_metas={}, return_loss=False) + register_extra_symbolics(opset_version) + + # support dynamic shape export + if dynamic_export: + dynamic_axes = { + 'input': { + 0: 'batch', + 2: 'width', + 3: 'height' + }, + 'probs': { + 0: 'batch' + } + } + else: + dynamic_axes = {} + + with torch.no_grad(): + torch.onnx.export( + model, (img_list, ), + output_file, + input_names=['input'], + output_names=['probs'], + export_params=True, + keep_initializers_as_inputs=True, + dynamic_axes=dynamic_axes, + verbose=show, + opset_version=opset_version) + print(f'Successfully exported ONNX model: {output_file}') + model.forward = origin_forward + + if do_simplify: + from mmcv import digit_version + import onnxsim + import onnx + + min_required_version = '0.3.0' + assert digit_version(mmcv.__version__) >= digit_version( + min_required_version + ), f'Requires to install onnx-simplify>={min_required_version}' + + if dynamic_axes: + input_shape = (input_shape[0], input_shape[1], input_shape[2] * 2, + input_shape[3] * 2) + else: + input_shape = (input_shape[0], input_shape[1], input_shape[2], + input_shape[3]) + imgs = _demo_mm_inputs(input_shape, model.head.num_classes).pop('imgs') + input_dic = {'input': imgs.detach().cpu().numpy()} + input_shape_dic = {'input': list(input_shape)} + + model_opt, check_ok = onnxsim.simplify( + output_file, + input_shapes=input_shape_dic, + input_data=input_dic, + dynamic_input_shape=dynamic_export) + if check_ok: + onnx.save(model_opt, output_file) + print(f'Successfully simplified ONNX model: {output_file}') + else: + print('Failed to simplify ONNX model.') + if verify: + # check by onnx + import onnx + onnx_model = onnx.load(output_file) + onnx.checker.check_model(onnx_model) + + # test the dynamic model + if dynamic_export: + dynamic_test_inputs = _demo_mm_inputs( + (input_shape[0], input_shape[1], input_shape[2] * 2, + input_shape[3] * 2), model.head.num_classes) + imgs = dynamic_test_inputs.pop('imgs') + img_list = [img[None, :] for img in imgs] + + # check the numerical value + # get pytorch output + pytorch_result = model(img_list, img_metas={}, return_loss=False)[0] + + # get onnx output + input_all = [node.name for node in onnx_model.graph.input] + input_initializer = [ + node.name for node in onnx_model.graph.initializer + ] + net_feed_input = list(set(input_all) - set(input_initializer)) + assert (len(net_feed_input) == 1) + sess = rt.InferenceSession(output_file) + onnx_result = sess.run( + None, {net_feed_input[0]: img_list[0].detach().numpy()})[0] + if not np.allclose(pytorch_result, onnx_result): + raise ValueError( + 'The outputs are different between Pytorch and ONNX') + print('The outputs are same between Pytorch and ONNX') + + +def parse_args(): + parser = argparse.ArgumentParser(description='Convert MMCls to ONNX') + parser.add_argument('config', help='test config file path') + parser.add_argument('--checkpoint', help='checkpoint file', default=None) + parser.add_argument('--show', action='store_true', help='show onnx graph') + parser.add_argument( + '--verify', action='store_true', help='verify the onnx model') + parser.add_argument('--output-file', type=str, default='tmp.onnx') + parser.add_argument('--opset-version', type=int, default=11) + parser.add_argument( + '--simplify', + action='store_true', + help='Whether to simplify onnx model.') + parser.add_argument( + '--shape', + type=int, + nargs='+', + default=[224, 224], + help='input image size') + parser.add_argument( + '--dynamic-export', + action='store_true', + help='Whether to export ONNX with dynamic input shape. \ + Defaults to False.') + args = parser.parse_args() + return args + + +if __name__ == '__main__': + args = parse_args() + + if len(args.shape) == 1: + input_shape = (1, 3, args.shape[0], args.shape[0]) + elif len(args.shape) == 2: + input_shape = ( + 1, + 3, + ) + tuple(args.shape) + else: + raise ValueError('invalid input shape') + + cfg = mmcv.Config.fromfile(args.config) + cfg.model.pretrained = None + + # build the model and load checkpoint + classifier = build_classifier(cfg.model) + + if args.checkpoint: + load_checkpoint(classifier, args.checkpoint, map_location='cpu') + + # convert model to onnx file + pytorch2onnx( + classifier, + input_shape, + opset_version=args.opset_version, + show=args.show, + dynamic_export=args.dynamic_export, + output_file=args.output_file, + do_simplify=args.simplify, + verify=args.verify) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/deployment/pytorch2torchscript.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/deployment/pytorch2torchscript.py new file mode 100644 index 0000000000..f261b7c952 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/deployment/pytorch2torchscript.py @@ -0,0 +1,139 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os +import os.path as osp +from functools import partial + +import mmcv +import numpy as np +import torch +from mmcv.runner import load_checkpoint +from torch import nn + +from mmcls.models import build_classifier + +torch.manual_seed(3) + + +def _demo_mm_inputs(input_shape: tuple, num_classes: int): + """Create a superset of inputs needed to run test or train batches. + + Args: + input_shape (tuple): + input batch dimensions + num_classes (int): + number of semantic classes + """ + (N, C, H, W) = input_shape + rng = np.random.RandomState(0) + imgs = rng.rand(*input_shape) + gt_labels = rng.randint( + low=0, high=num_classes, size=(N, 1)).astype(np.uint8) + mm_inputs = { + 'imgs': torch.FloatTensor(imgs).requires_grad_(False), + 'gt_labels': torch.LongTensor(gt_labels), + } + return mm_inputs + + +def pytorch2torchscript(model: nn.Module, input_shape: tuple, output_file: str, + verify: bool): + """Export Pytorch model to TorchScript model through torch.jit.trace and + verify the outputs are same between Pytorch and TorchScript. + + Args: + model (nn.Module): Pytorch model we want to export. + input_shape (tuple): Use this input shape to construct + the corresponding dummy input and execute the model. + show (bool): Whether print the computation graph. Default: False. + output_file (string): The path to where we store the output + TorchScript model. + verify (bool): Whether compare the outputs between Pytorch + and TorchScript through loading generated output_file. + """ + model.cpu().eval() + + num_classes = model.head.num_classes + mm_inputs = _demo_mm_inputs(input_shape, num_classes) + + imgs = mm_inputs.pop('imgs') + img_list = [img[None, :] for img in imgs] + + # replace original forward function + origin_forward = model.forward + model.forward = partial(model.forward, img_metas={}, return_loss=False) + + with torch.no_grad(): + trace_model = torch.jit.trace(model, img_list[0]) + save_dir, _ = osp.split(output_file) + if save_dir: + os.makedirs(save_dir, exist_ok=True) + trace_model.save(output_file) + print(f'Successfully exported TorchScript model: {output_file}') + model.forward = origin_forward + + if verify: + # load by torch.jit + jit_model = torch.jit.load(output_file) + + # check the numerical value + # get pytorch output + pytorch_result = model(img_list, img_metas={}, return_loss=False)[0] + + # get jit output + jit_result = jit_model(img_list[0])[0].detach().numpy() + if not np.allclose(pytorch_result, jit_result): + raise ValueError( + 'The outputs are different between Pytorch and TorchScript') + print('The outputs are same between Pytorch and TorchScript') + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Convert MMCls to TorchScript') + parser.add_argument('config', help='test config file path') + parser.add_argument('--checkpoint', help='checkpoint file', type=str) + parser.add_argument( + '--verify', + action='store_true', + help='verify the TorchScript model', + default=False) + parser.add_argument('--output-file', type=str, default='tmp.pt') + parser.add_argument( + '--shape', + type=int, + nargs='+', + default=[224, 224], + help='input image size') + args = parser.parse_args() + return args + + +if __name__ == '__main__': + args = parse_args() + + if len(args.shape) == 1: + input_shape = (1, 3, args.shape[0], args.shape[0]) + elif len(args.shape) == 2: + input_shape = ( + 1, + 3, + ) + tuple(args.shape) + else: + raise ValueError('invalid input shape') + + cfg = mmcv.Config.fromfile(args.config) + cfg.model.pretrained = None + + # build the model and load checkpoint + classifier = build_classifier(cfg.model) + + if args.checkpoint: + load_checkpoint(classifier, args.checkpoint, map_location='cpu') + + # convert model to TorchScript file + pytorch2torchscript( + classifier, + input_shape, + output_file=args.output_file, + verify=args.verify) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/deployment/test.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/deployment/test.py new file mode 100644 index 0000000000..c13ef9a108 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/deployment/test.py @@ -0,0 +1,116 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import warnings + +import mmcv +import numpy as np +from mmcv import DictAction +from mmcv.parallel import MMDataParallel + +from mmcls.apis import single_gpu_test +from mmcls.core.export import ONNXRuntimeClassifier, TensorRTClassifier +from mmcls.datasets import build_dataloader, build_dataset + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Test (and eval) an ONNX model using ONNXRuntime.') + parser.add_argument('config', help='model config file') + parser.add_argument('model', help='filename of the input ONNX model') + parser.add_argument( + '--backend', + help='Backend of the model.', + choices=['onnxruntime', 'tensorrt']) + parser.add_argument( + '--out', type=str, help='output result file in pickle format') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file.') + parser.add_argument( + '--metrics', + type=str, + nargs='+', + help='evaluation metrics, which depends on the dataset, e.g., ' + '"accuracy", "precision", "recall", "f1_score", "support" for single ' + 'label dataset, and "mAP", "CP", "CR", "CF1", "OP", "OR", "OF1" for ' + 'multi-label dataset') + parser.add_argument( + '--metric-options', + nargs='+', + action=DictAction, + default={}, + help='custom options for evaluation, the key-value pair in xxx=yyy ' + 'format will be parsed as a dict metric_options for dataset.evaluate()' + ' function.') + parser.add_argument('--show', action='store_true', help='show results') + parser.add_argument( + '--show-dir', help='directory where painted images will be saved') + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + + if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): + raise ValueError('The output file must be a pkl file.') + + cfg = mmcv.Config.fromfile(args.config) + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + + # build dataset and dataloader + dataset = build_dataset(cfg.data.test) + data_loader = build_dataloader( + dataset, + samples_per_gpu=cfg.data.samples_per_gpu, + workers_per_gpu=cfg.data.workers_per_gpu, + shuffle=False, + round_up=False) + + # build onnxruntime model and run inference. + if args.backend == 'onnxruntime': + model = ONNXRuntimeClassifier( + args.model, class_names=dataset.CLASSES, device_id=0) + elif args.backend == 'tensorrt': + model = TensorRTClassifier( + args.model, class_names=dataset.CLASSES, device_id=0) + else: + print('Unknown backend: {}.'.format(args.model)) + exit() + + model = MMDataParallel(model, device_ids=[0]) + model.CLASSES = dataset.CLASSES + outputs = single_gpu_test(model, data_loader, args.show, args.show_dir) + + if args.metrics: + results = dataset.evaluate(outputs, args.metrics, args.metric_options) + for k, v in results.items(): + print(f'\n{k} : {v:.2f}') + else: + warnings.warn('Evaluation metrics are not specified.') + scores = np.vstack(outputs) + pred_score = np.max(scores, axis=1) + pred_label = np.argmax(scores, axis=1) + pred_class = [dataset.CLASSES[lb] for lb in pred_label] + results = { + 'pred_score': pred_score, + 'pred_label': pred_label, + 'pred_class': pred_class + } + if not args.out: + print('\nthe predicted result for the first element is ' + f'pred_score = {pred_score[0]:.2f}, ' + f'pred_label = {pred_label[0]} ' + f'and pred_class = {pred_class[0]}. ' + 'Specify --out to save all results to files.') + if args.out: + print(f'\nwriting results to {args.out}') + mmcv.dump(results, args.out) + + +if __name__ == '__main__': + main() diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/deployment/test_torchserver.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/deployment/test_torchserver.py new file mode 100644 index 0000000000..91ec9b8095 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/deployment/test_torchserver.py @@ -0,0 +1,44 @@ +from argparse import ArgumentParser + +import numpy as np +import requests + +from mmcls.apis import inference_model, init_model, show_result_pyplot + + +def parse_args(): + parser = ArgumentParser() + parser.add_argument('img', help='Image file') + parser.add_argument('config', help='Config file') + parser.add_argument('checkpoint', help='Checkpoint file') + parser.add_argument('model_name', help='The model name in the server') + parser.add_argument( + '--inference-addr', + default='127.0.0.1:8080', + help='Address and port of the inference server') + parser.add_argument( + '--device', default='cuda:0', help='Device used for inference') + args = parser.parse_args() + return args + + +def main(args): + # Inference single image by native apis. + model = init_model(args.config, args.checkpoint, device=args.device) + model_result = inference_model(model, args.img) + show_result_pyplot(model, args.img, model_result, title='pytorch_result') + + # Inference single image by torchserve engine. + url = 'http://' + args.inference_addr + '/predictions/' + args.model_name + with open(args.img, 'rb') as image: + response = requests.post(url, image) + server_result = response.json() + show_result_pyplot(model, args.img, server_result, title='server_result') + + assert np.allclose(model_result['pred_score'], server_result['pred_score']) + print('Test complete, the results of PyTorch and TorchServe are the same.') + + +if __name__ == '__main__': + args = parse_args() + main(args) diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/dist_test.sh b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/dist_test.sh new file mode 100644 index 0000000000..3c74ec6ecd --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/dist_test.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash + +CONFIG=$1 +CHECKPOINT=$2 +GPUS=$3 +PORT=${PORT:-29500} + +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ +python -m torch.distributed.launch --nproc_per_node=$GPUS --master_port=$PORT \ + $(dirname "$0")/test.py $CONFIG $CHECKPOINT --launcher pytorch ${@:4} diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/dist_train.sh b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/dist_train.sh new file mode 100644 index 0000000000..5b43fffbf2 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/dist_train.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash + +CONFIG=$1 +GPUS=$2 +PORT=${PORT:-29500} + +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ +python -m torch.distributed.launch --nproc_per_node=$GPUS --master_port=$PORT \ + $(dirname "$0")/train.py $CONFIG --launcher pytorch ${@:3} diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/misc/print_config.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/misc/print_config.py new file mode 100644 index 0000000000..e44cda0623 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/misc/print_config.py @@ -0,0 +1,55 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import warnings + +from mmcv import Config, DictAction + + +def parse_args(): + parser = argparse.ArgumentParser(description='Print the whole config') + parser.add_argument('config', help='config file path') + parser.add_argument( + '--options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file (deprecate), ' + 'change to --cfg-options instead.') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + args = parser.parse_args() + + if args.options and args.cfg_options: + raise ValueError( + '--options and --cfg-options cannot be both ' + 'specified, --options is deprecated in favor of --cfg-options') + if args.options: + warnings.warn('--options is deprecated in favor of --cfg-options') + args.cfg_options = args.options + + return args + + +def main(): + args = parse_args() + + cfg = Config.fromfile(args.config) + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + # import modules from string list. + if cfg.get('custom_imports', None): + from mmcv.utils import import_modules_from_strings + import_modules_from_strings(**cfg['custom_imports']) + print(f'Config:\n{cfg.pretty_text}') + + +if __name__ == '__main__': + main() diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/misc/verify_dataset.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/misc/verify_dataset.py new file mode 100644 index 0000000000..6114adb152 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/misc/verify_dataset.py @@ -0,0 +1,131 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import fcntl +import os +from pathlib import Path + +from mmcv import Config, DictAction, track_parallel_progress, track_progress + +from mmcls.datasets import PIPELINES, build_dataset + + +def parse_args(): + parser = argparse.ArgumentParser(description='Verify Dataset') + parser.add_argument('config', help='config file path') + parser.add_argument( + '--out-path', + type=str, + default='brokenfiles.log', + help='output path of all the broken files. If the specified path ' + 'already exists, delete the previous file ') + parser.add_argument( + '--phase', + default='train', + type=str, + choices=['train', 'test', 'val'], + help='phase of dataset to visualize, accept "train" "test" and "val".') + parser.add_argument( + '--num-process', type=int, default=1, help='number of process to use') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + args = parser.parse_args() + assert args.out_path is not None + assert args.num_process > 0 + return args + + +class DatasetValidator(): + """the dataset tool class to check if all file are broken.""" + + def __init__(self, dataset_cfg, log_file_path, phase): + super(DatasetValidator, self).__init__() + # keep only LoadImageFromFile pipeline + assert dataset_cfg.data[phase].pipeline[0][ + 'type'] == 'LoadImageFromFile', 'This tool is only for dataset ' \ + 'that needs to load image from files.' + self.pipeline = PIPELINES.build(dataset_cfg.data[phase].pipeline[0]) + dataset_cfg.data[phase].pipeline = [] + dataset = build_dataset(dataset_cfg.data[phase]) + + self.dataset = dataset + self.log_file_path = log_file_path + + def valid_idx(self, idx): + item = self.dataset[idx] + try: + item = self.pipeline(item) + except Exception: + with open(self.log_file_path, 'a') as f: + # add file lock to prevent multi-process writing errors + fcntl.flock(f.fileno(), fcntl.LOCK_EX) + filepath = os.path.join(item['img_prefix'], + item['img_info']['filename']) + f.write(filepath + '\n') + print(f'{filepath} cannot be read correctly, please check it.') + # Release files lock automatic using with + + def __len__(self): + return len(self.dataset) + + +def print_info(log_file_path): + """print some information and do extra action.""" + print() + with open(log_file_path, 'r') as f: + context = f.read().strip() + if context == '': + print('There is no broken file found.') + os.remove(log_file_path) + else: + num_file = len(context.split('\n')) + print(f'{num_file} broken files found, name list save in file:' + f'{log_file_path}') + print() + + +def main(): + # parse cfg and args + args = parse_args() + cfg = Config.fromfile(args.config) + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + + # touch output file to save broken files list. + output_path = Path(args.out_path) + if not output_path.parent.exists(): + raise Exception('log_file parent directory not found.') + if output_path.exists(): + os.remove(output_path) + output_path.touch() + + # do valid + validator = DatasetValidator(cfg, output_path, args.phase) + + if args.num_process > 1: + # The default chunksize calcuation method of Pool.map + chunksize, extra = divmod(len(validator), args.num_process * 8) + if extra: + chunksize += 1 + + track_parallel_progress( + validator.valid_idx, + list(range(len(validator))), + args.num_process, + chunksize=chunksize, + keep_order=False) + else: + track_progress(validator.valid_idx, list(range(len(validator)))) + + print_info(output_path) + + +if __name__ == '__main__': + main() diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/slurm_test.sh b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/slurm_test.sh new file mode 100644 index 0000000000..6dd67e5744 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/slurm_test.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash + +set -x + +PARTITION=$1 +JOB_NAME=$2 +CONFIG=$3 +CHECKPOINT=$4 +GPUS=${GPUS:-8} +GPUS_PER_NODE=${GPUS_PER_NODE:-8} +CPUS_PER_TASK=${CPUS_PER_TASK:-5} +PY_ARGS=${@:5} +SRUN_ARGS=${SRUN_ARGS:-""} + +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ +srun -p ${PARTITION} \ + --job-name=${JOB_NAME} \ + --gres=gpu:${GPUS_PER_NODE} \ + --ntasks=${GPUS} \ + --ntasks-per-node=${GPUS_PER_NODE} \ + --cpus-per-task=${CPUS_PER_TASK} \ + --kill-on-bad-exit=1 \ + ${SRUN_ARGS} \ + python -u tools/test.py ${CONFIG} ${CHECKPOINT} --launcher="slurm" ${PY_ARGS} diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/slurm_train.sh b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/slurm_train.sh new file mode 100644 index 0000000000..b3feb3d9c7 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/slurm_train.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash + +set -x + +PARTITION=$1 +JOB_NAME=$2 +CONFIG=$3 +WORK_DIR=$4 +GPUS=${GPUS:-8} +GPUS_PER_NODE=${GPUS_PER_NODE:-8} +CPUS_PER_TASK=${CPUS_PER_TASK:-5} +SRUN_ARGS=${SRUN_ARGS:-""} +PY_ARGS=${@:5} + +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ +srun -p ${PARTITION} \ + --job-name=${JOB_NAME} \ + --gres=gpu:${GPUS_PER_NODE} \ + --ntasks=${GPUS} \ + --ntasks-per-node=${GPUS_PER_NODE} \ + --cpus-per-task=${CPUS_PER_TASK} \ + --kill-on-bad-exit=1 \ + ${SRUN_ARGS} \ + python -u tools/train.py ${CONFIG} --work-dir=${WORK_DIR} --launcher="slurm" ${PY_ARGS} diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/test.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/test.py new file mode 100644 index 0000000000..1eafc20470 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/test.py @@ -0,0 +1,219 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os +import warnings +from numbers import Number + +import mmcv +import numpy as np +import torch +from mmcv import DictAction +from mmcv.parallel import MMDataParallel, MMDistributedDataParallel +from mmcv.runner import get_dist_info, init_dist, load_checkpoint + +from mmcls.apis import multi_gpu_test, single_gpu_test +from mmcls.datasets import build_dataloader, build_dataset +from mmcls.models import build_classifier + +# TODO import `wrap_fp16_model` from mmcv and delete them from mmcls +try: + from mmcv.runner import wrap_fp16_model +except ImportError: + warnings.warn('wrap_fp16_model from mmcls will be deprecated.' + 'Please install mmcv>=1.1.4.') + from mmcls.core import wrap_fp16_model + + +def parse_args(): + parser = argparse.ArgumentParser(description='mmcls test model') + parser.add_argument('config', help='test config file path') + parser.add_argument('checkpoint', help='checkpoint file') + parser.add_argument('--out', help='output result file') + out_options = ['class_scores', 'pred_score', 'pred_label', 'pred_class'] + parser.add_argument( + '--out-items', + nargs='+', + default=['all'], + choices=out_options + ['none', 'all'], + help='Besides metrics, what items will be included in the output ' + f'result file. You can choose some of ({", ".join(out_options)}), ' + 'or use "all" to include all above, or use "none" to disable all of ' + 'above. Defaults to output all.', + metavar='') + parser.add_argument( + '--metrics', + type=str, + nargs='+', + help='evaluation metrics, which depends on the dataset, e.g., ' + '"accuracy", "precision", "recall", "f1_score", "support" for single ' + 'label dataset, and "mAP", "CP", "CR", "CF1", "OP", "OR", "OF1" for ' + 'multi-label dataset') + parser.add_argument('--show', action='store_true', help='show results') + parser.add_argument( + '--show-dir', help='directory where painted images will be saved') + parser.add_argument( + '--gpu-collect', + action='store_true', + help='whether to use gpu to collect results') + parser.add_argument('--tmpdir', help='tmp dir for writing some results') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + parser.add_argument( + '--options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file (deprecate), ' + 'change to --cfg-options instead.') + parser.add_argument( + '--metric-options', + nargs='+', + action=DictAction, + default={}, + help='custom options for evaluation, the key-value pair in xxx=yyy ' + 'format will be parsed as a dict metric_options for dataset.evaluate()' + ' function.') + parser.add_argument( + '--show-options', + nargs='+', + action=DictAction, + help='custom options for show_result. key-value pair in xxx=yyy.' + 'Check available options in `model.show_result`.') + parser.add_argument( + '--launcher', + choices=['none', 'pytorch', 'slurm', 'mpi'], + default='none', + help='job launcher') + parser.add_argument('--local_rank', type=int, default=0) + parser.add_argument( + '--device', + choices=['cpu', 'cuda'], + default='cuda', + help='device used for testing') + args = parser.parse_args() + if 'LOCAL_RANK' not in os.environ: + os.environ['LOCAL_RANK'] = str(args.local_rank) + + if args.options and args.cfg_options: + raise ValueError( + '--options and --cfg-options cannot be both ' + 'specified, --options is deprecated in favor of --cfg-options') + if args.options: + warnings.warn('--options is deprecated in favor of --cfg-options') + args.cfg_options = args.options + + return args + + +def main(): + args = parse_args() + + cfg = mmcv.Config.fromfile(args.config) + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + # set cudnn_benchmark + if cfg.get('cudnn_benchmark', False): + torch.backends.cudnn.benchmark = True + cfg.model.pretrained = None + cfg.data.test.test_mode = True + + assert args.metrics or args.out, \ + 'Please specify at least one of output path and evaluation metrics.' + + # init distributed env first, since logger depends on the dist info. + if args.launcher == 'none': + distributed = False + else: + distributed = True + init_dist(args.launcher, **cfg.dist_params) + + # build the dataloader + dataset = build_dataset(cfg.data.test) + # the extra round_up data will be removed during gpu/cpu collect + data_loader = build_dataloader( + dataset, + samples_per_gpu=cfg.data.samples_per_gpu, + workers_per_gpu=cfg.data.workers_per_gpu, + dist=distributed, + shuffle=False, + round_up=True) + + # build the model and load checkpoint + model = build_classifier(cfg.model) + fp16_cfg = cfg.get('fp16', None) + if fp16_cfg is not None: + wrap_fp16_model(model) + checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu') + + if 'CLASSES' in checkpoint.get('meta', {}): + CLASSES = checkpoint['meta']['CLASSES'] + else: + from mmcls.datasets import ImageNet + warnings.simplefilter('once') + warnings.warn('Class names are not saved in the checkpoint\'s ' + 'meta data, use imagenet by default.') + CLASSES = ImageNet.CLASSES + + if not distributed: + if args.device == 'cpu': + model = model.cpu() + else: + model = MMDataParallel(model, device_ids=[0]) + model.CLASSES = CLASSES + show_kwargs = {} if args.show_options is None else args.show_options + outputs = single_gpu_test(model, data_loader, args.show, args.show_dir, + **show_kwargs) + else: + model = MMDistributedDataParallel( + model.cuda(), + device_ids=[torch.cuda.current_device()], + broadcast_buffers=False) + outputs = multi_gpu_test(model, data_loader, args.tmpdir, + args.gpu_collect) + + rank, _ = get_dist_info() + if rank == 0: + results = {} + if args.metrics: + eval_results = dataset.evaluate(outputs, args.metrics, + args.metric_options) + results.update(eval_results) + for k, v in eval_results.items(): + if isinstance(v, np.ndarray): + v = [round(out, 2) for out in v.tolist()] + elif isinstance(v, Number): + v = round(v, 2) + else: + raise ValueError(f'Unsupport metric type: {type(v)}') + print(f'\n{k} : {v}') + if args.out: + if 'none' not in args.out_items: + scores = np.vstack(outputs) + pred_score = np.max(scores, axis=1) + pred_label = np.argmax(scores, axis=1) + pred_class = [CLASSES[lb] for lb in pred_label] + res_items = { + 'class_scores': scores, + 'pred_score': pred_score, + 'pred_label': pred_label, + 'pred_class': pred_class + } + if 'all' in args.out_items: + results.update(res_items) + else: + for key in args.out_items: + results[key] = res_items[key] + print(f'\ndumping results to {args.out}') + mmcv.dump(results, args.out) + + +if __name__ == '__main__': + main() diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/train.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/train.py new file mode 100644 index 0000000000..790294bd27 --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/train.py @@ -0,0 +1,185 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import copy +import os +import os.path as osp +import time +import warnings + +import mmcv +import torch +import torch_npu +from torch_npu.contrib import transfer_to_npu + +from mmcv import Config, DictAction +from mmcv.runner import get_dist_info, init_dist + +from mmcls import __version__ +from mmcls.apis import set_random_seed, train_model +from mmcls.datasets import build_dataset +from mmcls.models import build_classifier +from mmcls.utils import collect_env, get_root_logger + + +def parse_args(): + parser = argparse.ArgumentParser(description='Train a model') + parser.add_argument('config', help='train config file path') + parser.add_argument('--work-dir', help='the dir to save logs and models') + parser.add_argument( + '--resume-from', help='the checkpoint file to resume from') + parser.add_argument( + '--no-validate', + action='store_true', + help='whether not to evaluate the checkpoint during training') + group_gpus = parser.add_mutually_exclusive_group() + group_gpus.add_argument('--device', help='device used for training') + group_gpus.add_argument( + '--gpus', + type=int, + help='number of gpus to use ' + '(only applicable to non-distributed training)') + group_gpus.add_argument( + '--gpu-ids', + type=int, + nargs='+', + help='ids of gpus to use ' + '(only applicable to non-distributed training)') + parser.add_argument('--seed', type=int, default=None, help='random seed') + parser.add_argument( + '--deterministic', + action='store_true', + help='whether to set deterministic options for CUDNN backend.') + parser.add_argument( + '--options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file (deprecate), ' + 'change to --cfg-options instead.') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + parser.add_argument( + '--launcher', + choices=['none', 'pytorch', 'slurm', 'mpi'], + default='none', + help='job launcher') + parser.add_argument('--local_rank', type=int, default=0) + args = parser.parse_args() + if 'LOCAL_RANK' not in os.environ: + os.environ['LOCAL_RANK'] = str(args.local_rank) + + if args.options and args.cfg_options: + raise ValueError( + '--options and --cfg-options cannot be both ' + 'specified, --options is deprecated in favor of --cfg-options') + if args.options: + warnings.warn('--options is deprecated in favor of --cfg-options') + args.cfg_options = args.options + + return args + + +def main(): + args = parse_args() + + cfg = Config.fromfile(args.config) + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + # set cudnn_benchmark + if cfg.get('cudnn_benchmark', False): + torch.backends.cudnn.benchmark = True + + # work_dir is determined in this priority: CLI > segment in file > filename + if args.work_dir is not None: + # update configs according to CLI args if args.work_dir is not None + cfg.work_dir = args.work_dir + elif cfg.get('work_dir', None) is None: + # use config filename as default work_dir if cfg.work_dir is None + cfg.work_dir = osp.join('./work_dirs', + osp.splitext(osp.basename(args.config))[0]) + if args.resume_from is not None: + cfg.resume_from = args.resume_from + if args.gpu_ids is not None: + cfg.gpu_ids = args.gpu_ids + else: + cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus) + + # init distributed env first, since logger depends on the dist info. + if args.launcher == 'none': + distributed = False + else: + distributed = True + init_dist(args.launcher, **cfg.dist_params) + _, world_size = get_dist_info() + cfg.gpu_ids = range(world_size) + + # create work_dir + mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir)) + # dump config + cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config))) + # init the logger before other steps + timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime()) + log_file = osp.join(cfg.work_dir, f'{timestamp}.log') + logger = get_root_logger(log_file=log_file, log_level=cfg.log_level) + + # init the meta dict to record some important information such as + # environment info and seed, which will be logged + meta = dict() + # log env info + env_info_dict = collect_env() + env_info = '\n'.join([(f'{k}: {v}') for k, v in env_info_dict.items()]) + dash_line = '-' * 60 + '\n' + logger.info('Environment info:\n' + dash_line + env_info + '\n' + + dash_line) + meta['env_info'] = env_info + + # log some basic info + logger.info(f'Distributed training: {distributed}') + logger.info(f'Config:\n{cfg.pretty_text}') + + # set random seeds + if args.seed is not None: + logger.info(f'Set random seed to {args.seed}, ' + f'deterministic: {args.deterministic}') + set_random_seed(args.seed, deterministic=args.deterministic) + cfg.seed = args.seed + meta['seed'] = args.seed + + model = build_classifier(cfg.model) + model.init_weights() + + datasets = [build_dataset(cfg.data.train)] + if len(cfg.workflow) == 2: + val_dataset = copy.deepcopy(cfg.data.val) + val_dataset.pipeline = cfg.data.train.pipeline + datasets.append(build_dataset(val_dataset)) + if cfg.checkpoint_config is not None: + # save mmcls version, config file content and class names in + # checkpoints as meta data + cfg.checkpoint_config.meta = dict( + mmcls_version=__version__, + config=cfg.pretty_text, + CLASSES=datasets[0].CLASSES) + # add an attribute for visualization convenience + train_model( + model, + datasets, + cfg, + distributed=distributed, + validate=(not args.no_validate), + timestamp=timestamp, + # device='cpu' if args.device == 'cpu' else 'cuda', + device=args.device, + meta=meta) + + +if __name__ == '__main__': + main() diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/visualizations/vis_pipeline.py b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/visualizations/vis_pipeline.py new file mode 100644 index 0000000000..59a4338c3d --- /dev/null +++ b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/tools/visualizations/vis_pipeline.py @@ -0,0 +1,257 @@ +import argparse +import itertools +import os +import re +import sys +from pathlib import Path + +import mmcv +import numpy as np +from mmcv import Config, DictAction, ProgressBar + +from mmcls.core import visualization as vis +from mmcls.datasets.builder import build_dataset +from mmcls.datasets.pipelines import Compose + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Visualize a Dataset Pipeline') + parser.add_argument('config', help='config file path') + parser.add_argument( + '--skip-type', + type=str, + nargs='*', + default=['ToTensor', 'Normalize', 'ImageToTensor', 'Collect'], + help='the pipelines to skip when visualizing') + parser.add_argument( + '--output-dir', + default='', + type=str, + help='folder to save output pictures, if not set, do not save.') + parser.add_argument( + '--phase', + default='train', + type=str, + choices=['train', 'test', 'val'], + help='phase of dataset to visualize, accept "train" "test" and "val".' + ' Default train.') + parser.add_argument( + '--number', + type=int, + default=sys.maxsize, + help='number of images selected to visualize, must bigger than 0. if ' + 'the number is bigger than length of dataset, show all the images in ' + 'dataset; default "sys.maxsize", show all images in dataset') + parser.add_argument( + '--mode', + default='concat', + type=str, + choices=['original', 'pipeline', 'concat'], + help='display mode; display original pictures or transformed pictures' + ' or comparison pictures. "original" means show images load from disk;' + ' "pipeline" means to show images after pipeline; "concat" means show ' + 'images stitched by "original" and "pipeline" images. Default concat.') + parser.add_argument( + '--show', + default=False, + action='store_true', + help='whether to display images in pop-up window. Default False.') + parser.add_argument( + '--adaptive', + default=False, + action='store_true', + help='whether to automatically adjust the visualization image size') + parser.add_argument( + '--min-edge-length', + default=200, + type=int, + help='the min edge length when visualizing images, used when ' + '"--adaptive" is true. Default 200.') + parser.add_argument( + '--max-edge-length', + default=1000, + type=int, + help='the max edge length when visualizing images, used when ' + '"--adaptive" is true. Default 1000.') + parser.add_argument( + '--bgr2rgb', + default=False, + action='store_true', + help='flip the color channel order of images') + parser.add_argument( + '--window-size', + default='12*7', + help='size of the window to display images, in format of "$W*$H".') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + parser.add_argument( + '--show-options', + nargs='+', + action=DictAction, + help='custom options for display. key-value pair in xxx=yyy. options ' + 'in `mmcls.core.visualization.ImshowInfosContextManager.put_img_infos`' + ) + args = parser.parse_args() + + assert args.number > 0, "'args.number' must be larger than zero." + if args.window_size != '': + assert re.match(r'\d+\*\d+', args.window_size), \ + "'window-size' must be in format 'W*H'." + if args.output_dir == '' and not args.show: + raise ValueError("if '--output-dir' and '--show' are not set, " + 'nothing will happen when the program running.') + + if args.show_options is None: + args.show_options = {} + return args + + +def retrieve_data_cfg(config_path, skip_type, cfg_options, phase): + cfg = Config.fromfile(config_path) + if cfg_options is not None: + cfg.merge_from_dict(cfg_options) + # import modules from string list. + if cfg.get('custom_imports', None): + from mmcv.utils import import_modules_from_strings + import_modules_from_strings(**cfg['custom_imports']) + data_cfg = cfg.data[phase] + while 'dataset' in data_cfg: + data_cfg = data_cfg['dataset'] + data_cfg['pipeline'] = [ + x for x in data_cfg.pipeline if x['type'] not in skip_type + ] + + return cfg + + +def build_dataset_pipeline(cfg, phase): + """build dataset and pipeline from config. + + Separate the pipeline except 'LoadImageFromFile' step if + 'LoadImageFromFile' in the pipeline. + """ + data_cfg = cfg.data[phase] + loadimage_pipeline = [] + if len(data_cfg.pipeline + ) != 0 and data_cfg.pipeline[0]['type'] == 'LoadImageFromFile': + loadimage_pipeline.append(data_cfg.pipeline.pop(0)) + origin_pipeline = data_cfg.pipeline + data_cfg.pipeline = loadimage_pipeline + dataset = build_dataset(data_cfg) + pipeline = Compose(origin_pipeline) + + return dataset, pipeline + + +def put_img(board, img, center): + """put a image into a big board image with the anchor center.""" + center_x, center_y = center + img_h, img_w, _ = img.shape + xmin, ymin = int(center_x - img_w // 2), int(center_y - img_h // 2) + board[ymin:ymin + img_h, xmin:xmin + img_w, :] = img + return board + + +def concat(left_img, right_img): + """Concat two pictures into a single big picture, accepts two images with + diffenert shapes.""" + GAP = 10 + left_h, left_w, _ = left_img.shape + right_h, right_w, _ = right_img.shape + # create a big board to contain images with shape (board_h, board_w*2+10) + board_h, board_w = max(left_h, right_h), max(left_w, right_w) + board = np.ones([board_h, 2 * board_w + GAP, 3], np.uint8) * 255 + + put_img(board, left_img, (int(board_w // 2), int(board_h // 2))) + put_img(board, right_img, + (int(board_w // 2) + board_w + GAP // 2, int(board_h // 2))) + return board + + +def adaptive_size(mode, image, min_edge_length, max_edge_length): + """rescale image if image is too small to put text like cifra.""" + assert min_edge_length >= 0 and max_edge_length >= 0 + assert max_edge_length >= min_edge_length + + image_h, image_w, *_ = image.shape + image_w = image_w // 2 if mode == 'concat' else image_w + + if image_h < min_edge_length or image_w < min_edge_length: + image = mmcv.imrescale( + image, min(min_edge_length / image_h, min_edge_length / image_h)) + if image_h > max_edge_length or image_w > max_edge_length: + image = mmcv.imrescale( + image, max(max_edge_length / image_h, max_edge_length / image_w)) + return image + + +def get_display_img(item, pipeline, mode, bgr2rgb): + """get image to display.""" + if bgr2rgb: + item['img'] = mmcv.bgr2rgb(item['img']) + src_image = item['img'].copy() + # get transformed picture + if mode in ['pipeline', 'concat']: + item = pipeline(item) + trans_image = item['img'] + trans_image = np.ascontiguousarray(trans_image, dtype=np.uint8) + + if mode == 'concat': + image = concat(src_image, trans_image) + elif mode == 'original': + image = src_image + elif mode == 'pipeline': + image = trans_image + return image + + +def main(): + args = parse_args() + wind_w, wind_h = args.window_size.split('*') + wind_w, wind_h = int(wind_w), int(wind_h) + cfg = retrieve_data_cfg(args.config, args.skip_type, args.cfg_options, + args.phase) + + dataset, pipeline = build_dataset_pipeline(cfg, args.phase) + CLASSES = dataset.CLASSES + display_number = min(args.number, len(dataset)) + progressBar = ProgressBar(display_number) + + with vis.ImshowInfosContextManager(fig_size=(wind_w, wind_h)) as manager: + for i, item in enumerate(itertools.islice(dataset, display_number)): + image = get_display_img(item, pipeline, args.mode, args.bgr2rgb) + if args.adaptive: + image = adaptive_size(args.mode, image, args.min_edge_length, + args.max_edge_length) + + # dist_path is None as default, means not save pictures + dist_path = None + if args.output_dir: + # some datasets do not have filename, such as cifar, use id + src_path = item.get('filename', '{}.jpg'.format(i)) + dist_path = os.path.join(args.output_dir, Path(src_path).name) + + infos = dict(label=CLASSES[item['gt_label']]) + + manager.put_img_infos( + image, + infos, + font_size=20, + out_file=dist_path, + show=args.show, + **args.show_options) + + progressBar.update() + + +if __name__ == '__main__': + main() -- Gitee From 86feb290bf9b1a8b8f853248747625a292382638 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=BB=8E=E6=9C=A8=E6=9E=97?= <762129126@qq.com> Date: Fri, 7 Apr 2023 05:45:06 +0000 Subject: [PATCH 2/4] add PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/data. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: 黎木林 <762129126@qq.com> --- PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/data | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/data diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/data b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/data new file mode 100644 index 0000000000..e69de29bb2 -- Gitee From 76405ae422f29cf8124a31be0d2e1da198dfbc92 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=BB=8E=E6=9C=A8=E6=9E=97?= <762129126@qq.com> Date: Fri, 7 Apr 2023 05:45:45 +0000 Subject: [PATCH 3/4] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20PyTo?= =?UTF-8?q?rch/dev/cv/image=5Fclassification/Resnet50=5Ffor=5FPyTorch/data?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/data | 0 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/data diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/data b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/data deleted file mode 100644 index e69de29bb2..0000000000 -- Gitee From 92d47c5c3376467bbdedf20e85ab97866046b2c2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=BB=8E=E6=9C=A8=E6=9E=97?= <762129126@qq.com> Date: Fri, 7 Apr 2023 05:46:04 +0000 Subject: [PATCH 4/4] =?UTF-8?q?=E6=96=B0=E5=BB=BA=20data?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../dev/cv/image_classification/Resnet50_for_PyTorch/data/.keep | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/data/.keep diff --git a/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/data/.keep b/PyTorch/dev/cv/image_classification/Resnet50_for_PyTorch/data/.keep new file mode 100644 index 0000000000..e69de29bb2 -- Gitee