diff --git a/PyTorch/dev/cv/image_classification/FSRCNN_ID2990_for_PyTorch/.gitignore b/PyTorch/dev/cv/image_classification/FSRCNN_ID2990_for_PyTorch/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..3d725761b024d751249c39109b359366c6931bc6
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/FSRCNN_ID2990_for_PyTorch/.gitignore
@@ -0,0 +1,2 @@
+.DS_Store
+.idea
\ No newline at end of file
diff --git a/PyTorch/dev/cv/image_classification/FSRCNN_ID2990_for_PyTorch/LICENSE b/PyTorch/dev/cv/image_classification/FSRCNN_ID2990_for_PyTorch/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..09d493bf1fc257505c1336f3f87425568ab9da3c
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/FSRCNN_ID2990_for_PyTorch/LICENSE
@@ -0,0 +1,29 @@
+BSD 3-Clause License
+
+Copyright (c) 2017,
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+* Neither the name of the copyright holder nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/PyTorch/dev/cv/image_classification/FSRCNN_ID2990_for_PyTorch/README.md b/PyTorch/dev/cv/image_classification/FSRCNN_ID2990_for_PyTorch/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..50495da093094e164bee2fc222cf1c83c1df0b68
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/FSRCNN_ID2990_for_PyTorch/README.md
@@ -0,0 +1,110 @@
+# FSRCNN
+
+This repository is implementation of the ["Accelerating the Super-Resolution Convolutional Neural Network"](https://arxiv.org/abs/1608.00367).
+
+
+
+## Differences from the original
+
+- Added the zero-padding
+- Used the Adam instead of the SGD
+
+## Requirements
+
+- PyTorch 1.0.0
+- Numpy 1.15.4
+- Pillow 5.4.1
+- h5py 2.8.0
+- tqdm 4.30.0
+
+## Train
+
+The 91-image, Set5 dataset converted to HDF5 can be downloaded from the links below.
+
+| Dataset | Scale | Type | Link |
+|---------|-------|------|------|
+| 91-image | 2 | Train | [Download](https://www.dropbox.com/s/01z95js39kgw1qv/91-image_x2.h5?dl=0) |
+| 91-image | 3 | Train | [Download](https://www.dropbox.com/s/qx4swlt2j7u4twr/91-image_x3.h5?dl=0) |
+| 91-image | 4 | Train | [Download](https://www.dropbox.com/s/vobvi2nlymtvezb/91-image_x4.h5?dl=0) |
+| Set5 | 2 | Eval | [Download](https://www.dropbox.com/s/4kzqmtqzzo29l1x/Set5_x2.h5?dl=0) |
+| Set5 | 3 | Eval | [Download](https://www.dropbox.com/s/kyhbhyc5a0qcgnp/Set5_x3.h5?dl=0) |
+| Set5 | 4 | Eval | [Download](https://www.dropbox.com/s/ihtv1acd48cof14/Set5_x4.h5?dl=0) |
+
+Otherwise, you can use `prepare.py` to create custom dataset.
+
+```bash
+python train.py --train-file "BLAH_BLAH/91-image_x3.h5" \
+ --eval-file "BLAH_BLAH/Set5_x3.h5" \
+ --outputs-dir "BLAH_BLAH/outputs" \
+ --scale 3 \
+ --lr 1e-3 \
+ --batch-size 16 \
+ --num-epochs 20 \
+ --num-workers 8 \
+ --seed 123
+```
+
+## Test
+
+Pre-trained weights can be downloaded from the links below.
+
+| Model | Scale | Link |
+|-------|-------|------|
+| FSRCNN(56,12,4) | 2 | [Download](https://www.dropbox.com/s/1k3dker6g7hz76s/fsrcnn_x2.pth?dl=0) |
+| FSRCNN(56,12,4) | 3 | [Download](https://www.dropbox.com/s/pm1ed2nyboulz5z/fsrcnn_x3.pth?dl=0) |
+| FSRCNN(56,12,4) | 4 | [Download](https://www.dropbox.com/s/vsvumpopupdpmmu/fsrcnn_x4.pth?dl=0) |
+
+The results are stored in the same path as the query image.
+
+```bash
+python test.py --weights-file "BLAH_BLAH/fsrcnn_x3.pth" \
+ --image-file "data/butterfly_GT.bmp" \
+ --scale 3
+```
+
+## Results
+
+PSNR was calculated on the Y channel.
+
+### Set5
+
+| Eval. Mat | Scale | Paper | Ours (91-image) |
+|-----------|-------|-------|-----------------|
+| PSNR | 2 | 36.94 | 37.12 |
+| PSNR | 3 | 33.06 | 33.22 |
+| PSNR | 4 | 30.55 | 30.50 |
+
+
+
+ Original |
+ BICUBIC x3 |
+ FSRCNN x3 (34.66 dB) |
+
+
+
+
+ |
+
+
+ |
+
+
+ |
+
+
+ Original |
+ BICUBIC x3 |
+ FSRCNN x3 (28.55 dB) |
+
+
+
+
+ |
+
+
+ |
+
+
+ |
+
+
diff --git a/PyTorch/dev/cv/image_classification/FSRCNN_ID2990_for_PyTorch/datasets.py b/PyTorch/dev/cv/image_classification/FSRCNN_ID2990_for_PyTorch/datasets.py
new file mode 100644
index 0000000000000000000000000000000000000000..90320652fea0c22394ca0c2d2eeab72bd5cfb798
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/FSRCNN_ID2990_for_PyTorch/datasets.py
@@ -0,0 +1,71 @@
+#
+# BSD 3-Clause License
+#
+# Copyright (c) 2017 xxxx
+# All rights reserved.
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright notice, this
+# list of conditions and the following disclaimer.
+#
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# * Neither the name of the copyright holder nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# ============================================================================
+#
+import h5py
+import numpy as np
+from torch.utils.data import Dataset
+import torch.npu
+import os
+NPU_CALCULATE_DEVICE = 0
+if os.getenv('NPU_CALCULATE_DEVICE') and str.isdigit(os.getenv('NPU_CALCULATE_DEVICE')):
+ NPU_CALCULATE_DEVICE = int(os.getenv('NPU_CALCULATE_DEVICE'))
+if torch.npu.current_device() != NPU_CALCULATE_DEVICE:
+ torch.npu.set_device(f'npu:{NPU_CALCULATE_DEVICE}')
+
+
+class TrainDataset(Dataset):
+ def __init__(self, h5_file):
+ super(TrainDataset, self).__init__()
+ self.h5_file = h5_file
+
+ def __getitem__(self, idx):
+ with h5py.File(self.h5_file, 'r') as f:
+ return np.expand_dims(f['lr'][idx] / 255., 0), np.expand_dims(f['hr'][idx] / 255., 0)
+
+ def __len__(self):
+ with h5py.File(self.h5_file, 'r') as f:
+ return len(f['lr'])
+
+
+class EvalDataset(Dataset):
+ def __init__(self, h5_file):
+ super(EvalDataset, self).__init__()
+ self.h5_file = h5_file
+
+ def __getitem__(self, idx):
+ with h5py.File(self.h5_file, 'r') as f:
+ return np.expand_dims(f['lr'][str(idx)][:, :] / 255., 0), np.expand_dims(f['hr'][str(idx)][:, :] / 255., 0)
+
+ def __len__(self):
+ with h5py.File(self.h5_file, 'r') as f:
+ return len(f['lr'])
diff --git a/PyTorch/dev/cv/image_classification/FSRCNN_ID2990_for_PyTorch/models.py b/PyTorch/dev/cv/image_classification/FSRCNN_ID2990_for_PyTorch/models.py
new file mode 100644
index 0000000000000000000000000000000000000000..ea45faa8b5f4d65e7ec3177c0c11255ccd94706d
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/FSRCNN_ID2990_for_PyTorch/models.py
@@ -0,0 +1,80 @@
+#
+# BSD 3-Clause License
+#
+# Copyright (c) 2017 xxxx
+# All rights reserved.
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright notice, this
+# list of conditions and the following disclaimer.
+#
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# * Neither the name of the copyright holder nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# ============================================================================
+#
+import math
+from torch import nn
+import torch.npu
+import os
+NPU_CALCULATE_DEVICE = 0
+if os.getenv('NPU_CALCULATE_DEVICE') and str.isdigit(os.getenv('NPU_CALCULATE_DEVICE')):
+ NPU_CALCULATE_DEVICE = int(os.getenv('NPU_CALCULATE_DEVICE'))
+if torch.npu.current_device() != NPU_CALCULATE_DEVICE:
+ torch.npu.set_device(f'npu:{NPU_CALCULATE_DEVICE}')
+
+
+class FSRCNN(nn.Module):
+ def __init__(self, scale_factor, num_channels=1, d=56, s=12, m=4):
+ super(FSRCNN, self).__init__()
+ self.first_part = nn.Sequential(
+ nn.Conv2d(num_channels, d, kernel_size=5, padding=5//2),
+ nn.PReLU(d)
+ )
+ self.mid_part = [nn.Conv2d(d, s, kernel_size=1), nn.PReLU(s)]
+ for _ in range(m):
+ self.mid_part.extend([nn.Conv2d(s, s, kernel_size=3, padding=3//2), nn.PReLU(s)])
+ self.mid_part.extend([nn.Conv2d(s, d, kernel_size=1), nn.PReLU(d)])
+ self.mid_part = nn.Sequential(*self.mid_part)
+ self.last_part = nn.ConvTranspose2d(d, num_channels, kernel_size=9, stride=scale_factor, padding=9//2,
+ output_padding=scale_factor-1)
+
+ self._initialize_weights()
+
+ def _initialize_weights(self):
+ for m in self.first_part:
+ if isinstance(m, nn.Conv2d):
+ nn.init.normal_(m.weight.data, mean=0.0, std=math.sqrt(2/(m.out_channels*m.weight.data[0][0].numel())))
+ nn.init.zeros_(m.bias.data)
+ for m in self.mid_part:
+ if isinstance(m, nn.Conv2d):
+ nn.init.normal_(m.weight.data, mean=0.0, std=math.sqrt(2/(m.out_channels*m.weight.data[0][0].numel())))
+ nn.init.zeros_(m.bias.data)
+ nn.init.normal_(self.last_part.weight.data, mean=0.0, std=0.001)
+ nn.init.zeros_(self.last_part.bias.data)
+
+ def forward(self, x):
+ x = self.first_part(x)
+ x = self.mid_part(x)
+ x = self.last_part(x)
+ return x
+
+
diff --git a/PyTorch/dev/cv/image_classification/FSRCNN_ID2990_for_PyTorch/modelzoo_level.txt b/PyTorch/dev/cv/image_classification/FSRCNN_ID2990_for_PyTorch/modelzoo_level.txt
new file mode 100644
index 0000000000000000000000000000000000000000..a420c6fe9a0c2afe0ff630176f7ac94a8d6bfe44
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/FSRCNN_ID2990_for_PyTorch/modelzoo_level.txt
@@ -0,0 +1,3 @@
+FuncStatus:OK
+PerfStatus:OK
+PrecisionStatus: OK
diff --git a/PyTorch/dev/cv/image_classification/FSRCNN_ID2990_for_PyTorch/prepare.py b/PyTorch/dev/cv/image_classification/FSRCNN_ID2990_for_PyTorch/prepare.py
new file mode 100644
index 0000000000000000000000000000000000000000..f82ce841486b008ced3d729dcd00fa7bd69ba2bc
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/FSRCNN_ID2990_for_PyTorch/prepare.py
@@ -0,0 +1,125 @@
+#
+# BSD 3-Clause License
+#
+# Copyright (c) 2017 xxxx
+# All rights reserved.
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright notice, this
+# list of conditions and the following disclaimer.
+#
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# * Neither the name of the copyright holder nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# ============================================================================
+#
+import argparse
+import glob
+import h5py
+import numpy as np
+import PIL.Image as pil_image
+from utils import calc_patch_size, convert_rgb_to_y
+import os
+NPU_CALCULATE_DEVICE = 0
+if os.getenv('NPU_CALCULATE_DEVICE') and str.isdigit(os.getenv('NPU_CALCULATE_DEVICE')):
+ NPU_CALCULATE_DEVICE = int(os.getenv('NPU_CALCULATE_DEVICE'))
+
+
+@calc_patch_size
+def train(args):
+ h5_file = h5py.File(args.output_path, 'w')
+
+ lr_patches = []
+ hr_patches = []
+
+ for image_path in sorted(glob.glob('{}/*'.format(args.images_dir))):
+ hr = pil_image.open(image_path).convert('RGB')
+ hr_images = []
+
+ if args.with_aug:
+ for s in [1.0, 0.9, 0.8, 0.7, 0.6]:
+ for r in [0, 90, 180, 270]:
+ tmp = hr.resize((int(hr.width * s), int(hr.height * s)), resample=pil_image.BICUBIC)
+ tmp = tmp.rotate(r, expand=True)
+ hr_images.append(tmp)
+ else:
+ hr_images.append(hr)
+
+ for hr in hr_images:
+ hr_width = (hr.width // args.scale) * args.scale
+ hr_height = (hr.height // args.scale) * args.scale
+ hr = hr.resize((hr_width, hr_height), resample=pil_image.BICUBIC)
+ lr = hr.resize((hr.width // args.scale, hr_height // args.scale), resample=pil_image.BICUBIC)
+ hr = np.array(hr).astype(np.float32)
+ lr = np.array(lr).astype(np.float32)
+ hr = convert_rgb_to_y(hr)
+ lr = convert_rgb_to_y(lr)
+
+ for i in range(0, lr.shape[0] - args.patch_size + 1, args.scale):
+ for j in range(0, lr.shape[1] - args.patch_size + 1, args.scale):
+ lr_patches.append(lr[i:i+args.patch_size, j:j+args.patch_size])
+ hr_patches.append(hr[i*args.scale:i*args.scale+args.patch_size*args.scale, j*args.scale:j*args.scale+args.patch_size*args.scale])
+
+ lr_patches = np.array(lr_patches)
+ hr_patches = np.array(hr_patches)
+
+ h5_file.create_dataset('lr', data=lr_patches)
+ h5_file.create_dataset('hr', data=hr_patches)
+
+ h5_file.close()
+
+
+def eval(args):
+ h5_file = h5py.File(args.output_path, 'w')
+
+ lr_group = h5_file.create_group('lr')
+ hr_group = h5_file.create_group('hr')
+
+ for i, image_path in enumerate(sorted(glob.glob('{}/*'.format(args.images_dir)))):
+ hr = pil_image.open(image_path).convert('RGB')
+ hr_width = (hr.width // args.scale) * args.scale
+ hr_height = (hr.height // args.scale) * args.scale
+ hr = hr.resize((hr_width, hr_height), resample=pil_image.BICUBIC)
+ lr = hr.resize((hr.width // args.scale, hr_height // args.scale), resample=pil_image.BICUBIC)
+ hr = np.array(hr).astype(np.float32)
+ lr = np.array(lr).astype(np.float32)
+ hr = convert_rgb_to_y(hr)
+ lr = convert_rgb_to_y(lr)
+
+ lr_group.create_dataset(str(i), data=lr)
+ hr_group.create_dataset(str(i), data=hr)
+
+ h5_file.close()
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--images-dir', type=str, required=True)
+ parser.add_argument('--output-path', type=str, required=True)
+ parser.add_argument('--scale', type=int, default=2)
+ parser.add_argument('--with-aug', action='store_true')
+ parser.add_argument('--eval', action='store_true')
+ args = parser.parse_args()
+
+ if not args.eval:
+ train(args)
+ else:
+ eval(args)
diff --git a/PyTorch/dev/cv/image_classification/FSRCNN_ID2990_for_PyTorch/requirements.txt b/PyTorch/dev/cv/image_classification/FSRCNN_ID2990_for_PyTorch/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/PyTorch/dev/cv/image_classification/FSRCNN_ID2990_for_PyTorch/test.py b/PyTorch/dev/cv/image_classification/FSRCNN_ID2990_for_PyTorch/test.py
new file mode 100644
index 0000000000000000000000000000000000000000..0c1036fb74f5987d85ac8aa9b472d0231f6c8089
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/FSRCNN_ID2990_for_PyTorch/test.py
@@ -0,0 +1,98 @@
+#
+# BSD 3-Clause License
+#
+# Copyright (c) 2017 xxxx
+# All rights reserved.
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright notice, this
+# list of conditions and the following disclaimer.
+#
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# * Neither the name of the copyright holder nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# ============================================================================
+#
+import argparse
+
+import torch
+import torch.backends.cudnn as cudnn
+import numpy as np
+import PIL.Image as pil_image
+
+from models import FSRCNN
+from utils import convert_ycbcr_to_rgb, preprocess, calc_psnr
+import torch.npu
+import os
+NPU_CALCULATE_DEVICE = 0
+if os.getenv('NPU_CALCULATE_DEVICE') and str.isdigit(os.getenv('NPU_CALCULATE_DEVICE')):
+ NPU_CALCULATE_DEVICE = int(os.getenv('NPU_CALCULATE_DEVICE'))
+if torch.npu.current_device() != NPU_CALCULATE_DEVICE:
+ torch.npu.set_device(f'npu:{NPU_CALCULATE_DEVICE}')
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--weights-file', type=str, required=True)
+ parser.add_argument('--image-file', type=str, required=True)
+ parser.add_argument('--scale', type=int, default=3)
+ args = parser.parse_args()
+
+ cudnn.benchmark = True
+ device = torch.device(f'npu:{NPU_CALCULATE_DEVICE}')
+
+ model = FSRCNN(scale_factor=args.scale).to(f'npu:{NPU_CALCULATE_DEVICE}')
+
+ state_dict = model.state_dict()
+ for n, p in torch.load(args.weights_file, map_location=f'npu:{NPU_CALCULATE_DEVICE}').items():
+ if n in state_dict.keys():
+ state_dict[n].copy_(p)
+ else:
+ raise KeyError(n)
+
+ model.eval()
+
+ image = pil_image.open(args.image_file).convert('RGB')
+
+ image_width = (image.width // args.scale) * args.scale
+ image_height = (image.height // args.scale) * args.scale
+
+ hr = image.resize((image_width, image_height), resample=pil_image.BICUBIC)
+ lr = hr.resize((hr.width // args.scale, hr.height // args.scale), resample=pil_image.BICUBIC)
+ bicubic = lr.resize((lr.width * args.scale, lr.height * args.scale), resample=pil_image.BICUBIC)
+ bicubic.save(args.image_file.replace('.', '_bicubic_x{}.'.format(args.scale)))
+
+ lr, _ = preprocess(lr, device)
+ hr, _ = preprocess(hr, device)
+ _, ycbcr = preprocess(bicubic, device)
+
+ with torch.no_grad():
+ preds = model(lr).clamp(0.0, 1.0)
+
+ psnr = calc_psnr(hr, preds)
+ print('PSNR: {:.2f}'.format(psnr))
+
+ preds = preds.mul(255.0).cpu().numpy().squeeze(0).squeeze(0)
+
+ output = np.array([preds, ycbcr[..., 1], ycbcr[..., 2]]).transpose([1, 2, 0])
+ output = np.clip(convert_ycbcr_to_rgb(output), 0.0, 255.0).astype(np.uint8)
+ output = pil_image.fromarray(output)
+ output.save(args.image_file.replace('.', '_fsrcnn_x{}.'.format(args.scale)))
diff --git a/PyTorch/dev/cv/image_classification/FSRCNN_ID2990_for_PyTorch/test/train_full_1p.sh b/PyTorch/dev/cv/image_classification/FSRCNN_ID2990_for_PyTorch/test/train_full_1p.sh
new file mode 100644
index 0000000000000000000000000000000000000000..19e178e9d7e587649aa6fcc886c6788ffff13049
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/FSRCNN_ID2990_for_PyTorch/test/train_full_1p.sh
@@ -0,0 +1,195 @@
+#!/bin/bash
+
+#当前路径,不需要修改
+cur_path=`pwd`
+#export ASCEND_SLOG_PRINT_TO_STDOUT=1
+export NPU_CALCULATE_DEVICE=$ASCEND_DEVICE_ID
+
+#集合通信参数,不需要修改
+export RANK_SIZE=1
+export JOB_ID=10087
+RANK_ID_START=0
+
+
+# 数据集路径,保持为空,不需要修改
+data_path=""
+
+#基础参数,需要模型审视修改
+#网络名称,同目录名称
+Network="FSRCNN_ID2990_for_PyTorch"
+#训练epoch
+train_epochs=20
+#训练batch_size
+batch_size=16
+#训练step
+#train_steps=`expr 1281167 / ${batch_size}`
+#学习率
+learning_rate=0.495
+
+#TF2.X独有,不需要修改
+#export NPU_LOOP_SIZE=${train_steps}
+
+#维测参数,precision_mode需要模型审视修改
+precision_mode="allow_mix_precision"
+#维持参数,以下不需要修改
+over_dump=False
+data_dump_flag=False
+data_dump_step="10"
+profiling=False
+autotune=False
+
+# 帮助信息,不h需要修改
+if [[ $1 == --help || $1 == -h ]];then
+ echo"usage:./train_full_1p.sh "
+ echo " "
+ echo "parameter explain:
+ --precision_mode precision mode(allow_fp32_to_fp16/force_fp16/must_keep_origin_dtype/allow_mix_precision)
+ --over_dump if or not over detection, default is False
+ --data_dump_flag data dump flag, default is False
+ --data_dump_step data dump step, default is 10
+ --profiling if or not profiling for performance debug, default is False
+ --data_path source data of training
+ -h/--help show help message
+ "
+ exit 1
+fi
+
+#参数校验,不需要修改
+for para in $*
+do
+ if [[ $para == --precision_mode* ]];then
+ precision_mode=`echo ${para#*=}`
+ elif [[ $para == --over_dump* ]];then
+ over_dump=`echo ${para#*=}`
+ over_dump_path=${cur_path}/output/overflow_dump
+ mkdir -p ${over_dump_path}
+ elif [[ $para == --data_dump_flag* ]];then
+ data_dump_flag=`echo ${para#*=}`
+ data_dump_path=${cur_path}/output/data_dump
+ mkdir -p ${data_dump_path}
+ elif [[ $para == --data_dump_step* ]];then
+ data_dump_step=`echo ${para#*=}`
+ elif [[ $para == --profiling* ]];then
+ profiling=`echo ${para#*=}`
+ profiling_dump_path=${cur_path}/output/profiling
+ mkdir -p ${profiling_dump_path}
+ elif [[ $para == --data_path* ]];then
+ data_path=`echo ${para#*=}`
+ fi
+done
+
+#校验是否传入data_path,不需要修改
+if [[ $data_path == "" ]];then
+ echo "[Error] para \"data_path\" must be confing"
+ exit 1
+fi
+
+#训练开始时间,不需要修改
+start_time=$(date +%s)
+
+#进入训练脚本目录,需要模型审视修改
+cd $cur_path/../
+
+
+#sed -i "s|./data|$data_path|g" mmoe.py
+#sed -i "s|n_epochs = 80|n_epochs = 1|g" mmoe.py
+#sed -i "s|pass|break|g" main.py
+
+#python3 setup.py install
+#mkdir -p checkpoints
+#mkdir -p /root/.cache/torch/hub/checkpoints
+#cp $data_path/fcn_* /root/.cache/torch/hub/checkpoints
+
+for((RANK_ID=$RANK_ID_START;RANK_ID<$((RANK_SIZE+RANK_ID_START));RANK_ID++));
+do
+ #设置环境变量,不需要修改
+ echo "Device ID: $ASCEND_DEVICE_ID"
+ export RANK_ID=$RANK_ID
+
+
+
+ #创建DeviceID输出目录,不需要修改
+ if [ -d ${cur_path}/output/${ASCEND_DEVICE_ID} ];then
+ rm -rf ${cur_path}/output/${ASCEND_DEVICE_ID}
+ mkdir -p ${cur_path}/output/$ASCEND_DEVICE_ID/ckpt
+ else
+ mkdir -p ${cur_path}/output/$ASCEND_DEVICE_ID/ckpt
+ fi
+
+ #绑核,不需要绑核的模型删除,需要绑核的模型根据实际修改
+ #cpucount=`lscpu | grep "CPU(s):" | head -n 1 | awk '{print $2}'`
+ #cpustep=`expr $cpucount / 8`
+ #echo "taskset c steps:" $cpustep
+ #let a=RANK_ID*$cpustep
+ #let b=RANK_ID+1
+ #let c=b*$cpustep-1
+
+ #执行训练脚本,以下传参不需要修改,其他需要模型审视修改
+ nohup python3 train.py --train-file "$data_path/91-image_x3.h5" \
+ --eval-file "$data_path/Set5_x3.h5" \
+ --outputs-dir "outputs" \
+ --scale 3 \
+ --lr 1e-3 \
+ --batch-size $batch_size \
+ --num-epochs $train_epochs \
+ --num-workers 8 \
+ --seed 123 > ${cur_path}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log 2>&1 &
+done
+wait
+
+#恢复参数
+#sed -i "s|$data_path|./data|g" mmoe.py
+#sed -i "s|n_epochs = 1|n_epochs = 80|g" mmoe.py
+#sed -i "s|break|pass|g" main.py
+
+#conda deactivate
+#训练结束时间,不需要修改
+end_time=$(date +%s)
+e2e_time=$(( $end_time - $start_time ))
+
+#结果打印,不需要修改
+echo "------------------ Final result ------------------"
+#输出性能FPS,需要模型审视修改
+FPS=`grep "FPS" $cur_path/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log|awk -F "FPS:" '{print $2}'|tail -n +2|awk '{sum+=$1} END {print"",sum/NR}'|sed s/[[:space:]]//g`
+#FPS=`awk 'BEGIN{printf "%.2f\n",'${batch_size}'/'${time}'}'`
+
+
+#打印,不需要修改
+echo "Final Performance images/sec : $FPS"
+
+#输出训练精度,需要模型审视修改
+#train_accuracy=`grep eval_accuracy $cur_path/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log|grep -v mlp_log|awk 'END {print $5}'| sed 's/,//g' |cut -c 1-5`
+train_accuracy=`grep "best epoch" $cur_path/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|awk -F "psnr:" '{print $2}'|awk 'NR==1{max=$1;next}{max=max>$1?max:$1}END{print max}'`
+#打印,不需要修改
+echo "Final Train Accuracy : ${train_accuracy}"
+echo "E2E Training Duration sec : $e2e_time"
+
+#稳定性精度看护结果汇总
+#训练用例信息,不需要修改
+BatchSize=${batch_size}
+DeviceType=`uname -m`
+CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'acc'
+
+##获取性能数据
+#吞吐量,不需要修改
+ActualFPS=${FPS}
+#单迭代训练时长,不需要修改
+TrainingTime=`awk 'BEGIN{printf "%.2f\n",'${BatchSize}'*1000/'${FPS}'}'`
+
+#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视
+grep "FPS" $cur_path/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|awk -F "Loss:" '{print $2}'|awk -F "," '{print $1}' >> $cur_path/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt
+
+#最后一个迭代loss值,不需要修改
+ActualLoss=`awk 'END {print}' $cur_path/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt`
+
+#关键信息打印到${CaseName}.log中,不需要修改
+echo "Network = ${Network}" > $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "RankSize = ${RANK_SIZE}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "BatchSize = ${BatchSize}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "DeviceType = ${DeviceType}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "CaseName = ${CaseName}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "ActualFPS = ${ActualFPS}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "TrainingTime = ${TrainingTime}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "ActualLoss = ${ActualLoss}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
diff --git a/PyTorch/dev/cv/image_classification/FSRCNN_ID2990_for_PyTorch/test/train_performance_1p.sh b/PyTorch/dev/cv/image_classification/FSRCNN_ID2990_for_PyTorch/test/train_performance_1p.sh
new file mode 100644
index 0000000000000000000000000000000000000000..054174c51153a02553f1312b84e1b77ab2347e43
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/FSRCNN_ID2990_for_PyTorch/test/train_performance_1p.sh
@@ -0,0 +1,194 @@
+#!/bin/bash
+
+#当前路径,不需要修改
+cur_path=`pwd`
+#export ASCEND_SLOG_PRINT_TO_STDOUT=1
+export NPU_CALCULATE_DEVICE=$ASCEND_DEVICE_ID
+
+#集合通信参数,不需要修改
+export RANK_SIZE=1
+export JOB_ID=10087
+RANK_ID_START=0
+
+
+# 数据集路径,保持为空,不需要修改
+data_path=""
+
+#基础参数,需要模型审视修改
+#网络名称,同目录名称
+Network="FSRCNN_ID2990_for_PyTorch"
+#训练epoch
+train_epochs=1
+#训练batch_size
+batch_size=16
+#训练step
+#train_steps=`expr 1281167 / ${batch_size}`
+#学习率
+learning_rate=0.495
+
+#TF2.X独有,不需要修改
+#export NPU_LOOP_SIZE=${train_steps}
+
+#维测参数,precision_mode需要模型审视修改
+precision_mode="allow_mix_precision"
+#维持参数,以下不需要修改
+over_dump=False
+data_dump_flag=False
+data_dump_step="10"
+profiling=False
+autotune=False
+
+# 帮助信息,不h需要修改
+if [[ $1 == --help || $1 == -h ]];then
+ echo"usage:./train_full_1p.sh "
+ echo " "
+ echo "parameter explain:
+ --precision_mode precision mode(allow_fp32_to_fp16/force_fp16/must_keep_origin_dtype/allow_mix_precision)
+ --over_dump if or not over detection, default is False
+ --data_dump_flag data dump flag, default is False
+ --data_dump_step data dump step, default is 10
+ --profiling if or not profiling for performance debug, default is False
+ --data_path source data of training
+ -h/--help show help message
+ "
+ exit 1
+fi
+
+#参数校验,不需要修改
+for para in $*
+do
+ if [[ $para == --precision_mode* ]];then
+ precision_mode=`echo ${para#*=}`
+ elif [[ $para == --over_dump* ]];then
+ over_dump=`echo ${para#*=}`
+ over_dump_path=${cur_path}/output/overflow_dump
+ mkdir -p ${over_dump_path}
+ elif [[ $para == --data_dump_flag* ]];then
+ data_dump_flag=`echo ${para#*=}`
+ data_dump_path=${cur_path}/output/data_dump
+ mkdir -p ${data_dump_path}
+ elif [[ $para == --data_dump_step* ]];then
+ data_dump_step=`echo ${para#*=}`
+ elif [[ $para == --profiling* ]];then
+ profiling=`echo ${para#*=}`
+ profiling_dump_path=${cur_path}/output/profiling
+ mkdir -p ${profiling_dump_path}
+ elif [[ $para == --data_path* ]];then
+ data_path=`echo ${para#*=}`
+ fi
+done
+
+#校验是否传入data_path,不需要修改
+if [[ $data_path == "" ]];then
+ echo "[Error] para \"data_path\" must be confing"
+ exit 1
+fi
+
+#训练开始时间,不需要修改
+start_time=$(date +%s)
+
+#进入训练脚本目录,需要模型审视修改
+cd $cur_path/../
+
+
+#sed -i "s|./data|$data_path|g" mmoe.py
+#sed -i "s|n_epochs = 80|n_epochs = 1|g" mmoe.py
+#sed -i "s|pass|break|g" main.py
+
+#python3 setup.py install
+#mkdir -p checkpoints
+#mkdir -p /root/.cache/torch/hub/checkpoints
+#cp $data_path/fcn_* /root/.cache/torch/hub/checkpoints
+
+for((RANK_ID=$RANK_ID_START;RANK_ID<$((RANK_SIZE+RANK_ID_START));RANK_ID++));
+do
+ #设置环境变量,不需要修改
+ echo "Device ID: $ASCEND_DEVICE_ID"
+ export RANK_ID=$RANK_ID
+
+
+
+ #创建DeviceID输出目录,不需要修改
+ if [ -d ${cur_path}/output/${ASCEND_DEVICE_ID} ];then
+ rm -rf ${cur_path}/output/${ASCEND_DEVICE_ID}
+ mkdir -p ${cur_path}/output/$ASCEND_DEVICE_ID/ckpt
+ else
+ mkdir -p ${cur_path}/output/$ASCEND_DEVICE_ID/ckpt
+ fi
+
+ #绑核,不需要绑核的模型删除,需要绑核的模型根据实际修改
+ #cpucount=`lscpu | grep "CPU(s):" | head -n 1 | awk '{print $2}'`
+ #cpustep=`expr $cpucount / 8`
+ #echo "taskset c steps:" $cpustep
+ #let a=RANK_ID*$cpustep
+ #let b=RANK_ID+1
+ #let c=b*$cpustep-1
+
+ #执行训练脚本,以下传参不需要修改,其他需要模型审视修改
+ nohup python3 train.py --train-file "$data_path/91-image_x3.h5" \
+ --eval-file "$data_path/Set5_x3.h5" \
+ --outputs-dir "outputs" \
+ --scale 3 \
+ --lr 1e-3 \
+ --batch-size $batch_size \
+ --num-epochs $train_epochs \
+ --num-workers 8 \
+ --seed 123 > ${cur_path}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log 2>&1 &
+done
+wait
+
+#恢复参数
+#sed -i "s|$data_path|./data|g" mmoe.py
+#sed -i "s|n_epochs = 1|n_epochs = 80|g" mmoe.py
+#sed -i "s|break|pass|g" main.py
+
+#conda deactivate
+#训练结束时间,不需要修改
+end_time=$(date +%s)
+e2e_time=$(( $end_time - $start_time ))
+
+#结果打印,不需要修改
+echo "------------------ Final result ------------------"
+#输出性能FPS,需要模型审视修改
+FPS=`grep "FPS" $cur_path/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log|awk -F "FPS:" '{print $2}'|tail -n +2|awk '{sum+=$1} END {print"",sum/NR}'|sed s/[[:space:]]//g`
+#FPS=`awk 'BEGIN{printf "%.2f\n",'${batch_size}'/'${time}'}'`
+
+
+#打印,不需要修改
+echo "Final Performance images/sec : $FPS"
+
+#输出训练精度,需要模型审视修改
+#train_accuracy=`grep eval_accuracy $cur_path/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log|grep -v mlp_log|awk 'END {print $5}'| sed 's/,//g' |cut -c 1-5`
+#打印,不需要修改
+#echo "Final Train Accuracy : ${train_accuracy}"
+#echo "E2E Training Duration sec : $e2e_time"
+
+#稳定性精度看护结果汇总
+#训练用例信息,不需要修改
+BatchSize=${batch_size}
+DeviceType=`uname -m`
+CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'perf'
+
+##获取性能数据
+#吞吐量,不需要修改
+ActualFPS=${FPS}
+#单迭代训练时长,不需要修改
+TrainingTime=`awk 'BEGIN{printf "%.2f\n",'${BatchSize}'*1000/'${FPS}'}'`
+
+#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视
+grep "FPS" $cur_path/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|awk -F "Loss:" '{print $2}'|awk -F "," '{print $1}' >> $cur_path/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt
+
+#最后一个迭代loss值,不需要修改
+ActualLoss=`awk 'END {print}' $cur_path/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt`
+
+#关键信息打印到${CaseName}.log中,不需要修改
+echo "Network = ${Network}" > $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "RankSize = ${RANK_SIZE}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "BatchSize = ${BatchSize}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "DeviceType = ${DeviceType}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "CaseName = ${CaseName}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "ActualFPS = ${ActualFPS}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "TrainingTime = ${TrainingTime}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+#echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "ActualLoss = ${ActualLoss}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
diff --git a/PyTorch/dev/cv/image_classification/FSRCNN_ID2990_for_PyTorch/train.py b/PyTorch/dev/cv/image_classification/FSRCNN_ID2990_for_PyTorch/train.py
new file mode 100644
index 0000000000000000000000000000000000000000..47251240352c51c44c160f97b682364cbfed224f
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/FSRCNN_ID2990_for_PyTorch/train.py
@@ -0,0 +1,170 @@
+#
+# BSD 3-Clause License
+#
+# Copyright (c) 2017 xxxx
+# All rights reserved.
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright notice, this
+# list of conditions and the following disclaimer.
+#
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# * Neither the name of the copyright holder nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# ============================================================================
+#
+import argparse
+import os
+import copy
+
+import torch
+from torch import nn
+import torch.optim as optim
+import torch.backends.cudnn as cudnn
+from torch.utils.data.dataloader import DataLoader
+from tqdm import tqdm
+import time
+from models import FSRCNN
+from datasets import TrainDataset, EvalDataset
+from utils import AverageMeter, calc_psnr
+import torch.npu
+import os
+import apex
+try:
+ from apex import amp
+except:
+ amp = None
+
+NPU_CALCULATE_DEVICE = 0
+if os.getenv('NPU_CALCULATE_DEVICE') and str.isdigit(os.getenv('NPU_CALCULATE_DEVICE')):
+ NPU_CALCULATE_DEVICE = int(os.getenv('NPU_CALCULATE_DEVICE'))
+if torch.npu.current_device() != NPU_CALCULATE_DEVICE:
+ torch.npu.set_device(f'npu:{NPU_CALCULATE_DEVICE}')
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--train-file', type=str, required=True)
+ parser.add_argument('--eval-file', type=str, required=True)
+ parser.add_argument('--outputs-dir', type=str, required=True)
+ parser.add_argument('--weights-file', type=str)
+ parser.add_argument('--scale', type=int, default=2)
+ parser.add_argument('--lr', type=float, default=1e-3)
+ parser.add_argument('--batch-size', type=int, default=16)
+ parser.add_argument('--num-epochs', type=int, default=20)
+ parser.add_argument('--num-workers', type=int, default=64)
+ parser.add_argument('--seed', type=int, default=123)
+ args = parser.parse_args()
+
+ args.outputs_dir = os.path.join(args.outputs_dir, 'x{}'.format(args.scale))
+
+ if not os.path.exists(args.outputs_dir):
+ os.makedirs(args.outputs_dir)
+
+ cudnn.benchmark = True
+ device = torch.device(f'npu:{NPU_CALCULATE_DEVICE}')
+
+ torch.manual_seed(args.seed)
+
+ model = FSRCNN(scale_factor=args.scale).to(f'npu:{NPU_CALCULATE_DEVICE}')
+ criterion = nn.MSELoss()
+
+ #optimizer = optim.Adam([
+ # {'params': model.first_part.parameters()},
+ # {'params': model.mid_part.parameters()},
+ # {'params': model.last_part.parameters(), 'lr': args.lr * 0.1}
+ #], lr=args.lr)
+ optimizer = apex.optimizers.NpuFusedAdam([
+ {'params': list(model.first_part.parameters())+list( model.mid_part.parameters())},
+ {'params': model.last_part.parameters(), 'lr': args.lr * 0.1}
+ ], lr=args.lr)
+ model, optimizer = amp.initialize(model, optimizer, opt_level='O2', loss_scale=128.0, combine_grad=True)
+
+ train_dataset = TrainDataset(args.train_file)
+ train_dataloader = DataLoader(dataset=train_dataset,
+ batch_size=args.batch_size,
+ shuffle=True,
+ num_workers=args.num_workers,
+ pin_memory=True)
+ eval_dataset = EvalDataset(args.eval_file)
+ eval_dataloader = DataLoader(dataset=eval_dataset, batch_size=1)
+
+ best_weights = copy.deepcopy(model.state_dict())
+ best_epoch = 0
+ best_psnr = 0.0
+ step = 0
+ for epoch in range(args.num_epochs):
+ model.train()
+ epoch_losses = AverageMeter()
+
+ with tqdm(total=(len(train_dataset) - len(train_dataset) % args.batch_size), ncols=80) as t:
+ t.set_description('epoch: {}/{}'.format(epoch, args.num_epochs - 1))
+
+ for data in train_dataloader:
+ start_time = time.time()
+ inputs, labels = data
+
+ inputs = inputs.to(f'npu:{NPU_CALCULATE_DEVICE}', non_blocking=True)
+ labels = labels.to(f'npu:{NPU_CALCULATE_DEVICE}', non_blocking=True)
+
+ preds = model(inputs)
+
+ loss = criterion(preds, labels)
+
+ epoch_losses.update(loss.detach(), len(inputs))
+
+ optimizer.zero_grad()
+ with amp.scale_loss(loss, optimizer) as scaled_loss:
+ scaled_loss.backward()
+ #loss.backward()
+ optimizer.step()
+ step += 1
+ step_time = time.time() - start_time
+ FPS = args.batch_size / step_time
+ print("Epoch:{}, step:{}, Loss:{:.4f}, time/step(s):{:.4f}, FPS:{:.3f}".format(epoch,step,epoch_losses.avg,step_time,FPS))
+ t.set_postfix(loss='{:.6f}'.format(epoch_losses.avg))
+ t.update(len(inputs))
+
+ torch.save(model.state_dict(), os.path.join(args.outputs_dir, 'epoch_{}.pth'.format(epoch)))
+
+ model.eval()
+ epoch_psnr = AverageMeter()
+
+ for data in eval_dataloader:
+ inputs, labels = data
+
+ inputs = inputs.to(f'npu:{NPU_CALCULATE_DEVICE}')
+ labels = labels.to(f'npu:{NPU_CALCULATE_DEVICE}')
+
+ with torch.no_grad():
+ preds = model(inputs).clamp(0.0, 1.0)
+
+ epoch_psnr.update(calc_psnr(preds, labels), len(inputs))
+
+ print('eval psnr: {:.2f}'.format(epoch_psnr.avg))
+
+ if epoch_psnr.avg > best_psnr:
+ best_epoch = epoch
+ best_psnr = epoch_psnr.avg
+ best_weights = copy.deepcopy(model.state_dict())
+
+ print('best epoch: {}, psnr: {:.2f}'.format(best_epoch, best_psnr))
+ torch.save(best_weights, os.path.join(args.outputs_dir, 'best.pth'))
diff --git a/PyTorch/dev/cv/image_classification/FSRCNN_ID2990_for_PyTorch/utils.py b/PyTorch/dev/cv/image_classification/FSRCNN_ID2990_for_PyTorch/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..123a5107f491fe0d71745e57839d78a3580d3c4d
--- /dev/null
+++ b/PyTorch/dev/cv/image_classification/FSRCNN_ID2990_for_PyTorch/utils.py
@@ -0,0 +1,118 @@
+#
+# BSD 3-Clause License
+#
+# Copyright (c) 2017 xxxx
+# All rights reserved.
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright notice, this
+# list of conditions and the following disclaimer.
+#
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# * Neither the name of the copyright holder nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# ============================================================================
+#
+import torch
+import numpy as np
+import torch.npu
+import os
+NPU_CALCULATE_DEVICE = 0
+if os.getenv('NPU_CALCULATE_DEVICE') and str.isdigit(os.getenv('NPU_CALCULATE_DEVICE')):
+ NPU_CALCULATE_DEVICE = int(os.getenv('NPU_CALCULATE_DEVICE'))
+if torch.npu.current_device() != NPU_CALCULATE_DEVICE:
+ torch.npu.set_device(f'npu:{NPU_CALCULATE_DEVICE}')
+
+
+def calc_patch_size(func):
+ def wrapper(args):
+ if args.scale == 2:
+ args.patch_size = 10
+ elif args.scale == 3:
+ args.patch_size = 7
+ elif args.scale == 4:
+ args.patch_size = 6
+ else:
+ raise Exception('Scale Error', args.scale)
+ return func(args)
+ return wrapper
+
+
+def convert_rgb_to_y(img, dim_order='hwc'):
+ if dim_order == 'hwc':
+ return 16. + (64.738 * img[..., 0] + 129.057 * img[..., 1] + 25.064 * img[..., 2]) / 256.
+ else:
+ return 16. + (64.738 * img[0] + 129.057 * img[1] + 25.064 * img[2]) / 256.
+
+
+def convert_rgb_to_ycbcr(img, dim_order='hwc'):
+ if dim_order == 'hwc':
+ y = 16. + (64.738 * img[..., 0] + 129.057 * img[..., 1] + 25.064 * img[..., 2]) / 256.
+ cb = 128. + (-37.945 * img[..., 0] - 74.494 * img[..., 1] + 112.439 * img[..., 2]) / 256.
+ cr = 128. + (112.439 * img[..., 0] - 94.154 * img[..., 1] - 18.285 * img[..., 2]) / 256.
+ else:
+ y = 16. + (64.738 * img[0] + 129.057 * img[1] + 25.064 * img[2]) / 256.
+ cb = 128. + (-37.945 * img[0] - 74.494 * img[1] + 112.439 * img[2]) / 256.
+ cr = 128. + (112.439 * img[0] - 94.154 * img[1] - 18.285 * img[2]) / 256.
+ return np.array([y, cb, cr]).transpose([1, 2, 0])
+
+
+def convert_ycbcr_to_rgb(img, dim_order='hwc'):
+ if dim_order == 'hwc':
+ r = 298.082 * img[..., 0] / 256. + 408.583 * img[..., 2] / 256. - 222.921
+ g = 298.082 * img[..., 0] / 256. - 100.291 * img[..., 1] / 256. - 208.120 * img[..., 2] / 256. + 135.576
+ b = 298.082 * img[..., 0] / 256. + 516.412 * img[..., 1] / 256. - 276.836
+ else:
+ r = 298.082 * img[0] / 256. + 408.583 * img[2] / 256. - 222.921
+ g = 298.082 * img[0] / 256. - 100.291 * img[1] / 256. - 208.120 * img[2] / 256. + 135.576
+ b = 298.082 * img[0] / 256. + 516.412 * img[1] / 256. - 276.836
+ return np.array([r, g, b]).transpose([1, 2, 0])
+
+
+def preprocess(img, device):
+ img = np.array(img).astype(np.float32)
+ ycbcr = convert_rgb_to_ycbcr(img)
+ x = ycbcr[..., 0]
+ x /= 255.
+ x = torch.from_numpy(x).to(f'npu:{NPU_CALCULATE_DEVICE}')
+ x = x.unsqueeze(0).unsqueeze(0)
+ return x, ycbcr
+
+
+def calc_psnr(img1, img2):
+ return 10. * torch.log10(1. / torch.mean((img1 - img2) ** 2))
+
+
+class AverageMeter(object):
+ def __init__(self):
+ self.reset()
+
+ def reset(self):
+ self.val = 0
+ self.avg = 0
+ self.sum = 0
+ self.count = 0
+
+ def update(self, val, n=1):
+ self.val = val
+ self.sum += val * n
+ self.count += n
+ self.avg = self.sum / self.count