From e656f312d64a99b0622f2817d434fd1d1f0c07b7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9F=B4=E5=B0=9A=E5=B2=AD?= Date: Thu, 22 Sep 2022 01:22:51 +0800 Subject: [PATCH 1/2] add new model --- .../infer/convert/convert_Se_resnet50.sh | 27 + .../SE-ResNet-50/infer/data_to_bin.py | 68 ++ .../SE-ResNet-50/infer/docker_start_infer.sh | 40 ++ .../SE-ResNet-50/infer/mxbase/CMakeLists.txt | 51 ++ .../SE-ResNet-50/infer/mxbase/build.sh | 55 ++ .../infer/mxbase/src/Se_resnet50.cpp | 339 +++++++++ .../infer/mxbase/src/Se_resnet50.h | 75 ++ .../SE-ResNet-50/infer/mxbase/src/main.cpp | 52 ++ .../SE-ResNet-50/infer/read_bin.py | 17 + .../SE-ResNet-50/infer/sdk/acc.py | 155 ++++ .../SE-ResNet-50/infer/sdk/api/__init__.py | 0 .../SE-ResNet-50/infer/sdk/api/infer.py | 155 ++++ .../sdk/config/Se_resnet50_ms_test.pipeline | 28 + .../SE-ResNet-50/infer/sdk/config/config.py | 25 + .../SE-ResNet-50/infer/sdk/main.py | 229 ++++++ .../SE-ResNet-50/modelarts/train_start.py | 670 ++++++++++++++++++ .../classification/SE-ResNet-50/se_module.py | 66 +- .../cv/classification/SE-ResNet-50/senet.py | 598 ++++++++-------- 18 files changed, 2318 insertions(+), 332 deletions(-) create mode 100644 PyTorch/contrib/cv/classification/SE-ResNet-50/infer/convert/convert_Se_resnet50.sh create mode 100644 PyTorch/contrib/cv/classification/SE-ResNet-50/infer/data_to_bin.py create mode 100644 PyTorch/contrib/cv/classification/SE-ResNet-50/infer/docker_start_infer.sh create mode 100644 PyTorch/contrib/cv/classification/SE-ResNet-50/infer/mxbase/CMakeLists.txt create mode 100644 PyTorch/contrib/cv/classification/SE-ResNet-50/infer/mxbase/build.sh create mode 100644 PyTorch/contrib/cv/classification/SE-ResNet-50/infer/mxbase/src/Se_resnet50.cpp create mode 100644 PyTorch/contrib/cv/classification/SE-ResNet-50/infer/mxbase/src/Se_resnet50.h create mode 100644 PyTorch/contrib/cv/classification/SE-ResNet-50/infer/mxbase/src/main.cpp create mode 100644 PyTorch/contrib/cv/classification/SE-ResNet-50/infer/read_bin.py create mode 100644 PyTorch/contrib/cv/classification/SE-ResNet-50/infer/sdk/acc.py create mode 100644 PyTorch/contrib/cv/classification/SE-ResNet-50/infer/sdk/api/__init__.py create mode 100644 PyTorch/contrib/cv/classification/SE-ResNet-50/infer/sdk/api/infer.py create mode 100644 PyTorch/contrib/cv/classification/SE-ResNet-50/infer/sdk/config/Se_resnet50_ms_test.pipeline create mode 100644 PyTorch/contrib/cv/classification/SE-ResNet-50/infer/sdk/config/config.py create mode 100644 PyTorch/contrib/cv/classification/SE-ResNet-50/infer/sdk/main.py create mode 100644 PyTorch/contrib/cv/classification/SE-ResNet-50/modelarts/train_start.py diff --git a/PyTorch/contrib/cv/classification/SE-ResNet-50/infer/convert/convert_Se_resnet50.sh b/PyTorch/contrib/cv/classification/SE-ResNet-50/infer/convert/convert_Se_resnet50.sh new file mode 100644 index 0000000000..8c768ee292 --- /dev/null +++ b/PyTorch/contrib/cv/classification/SE-ResNet-50/infer/convert/convert_Se_resnet50.sh @@ -0,0 +1,27 @@ +#!/bin/bash +# Copyright 2022 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +air_path=$1 +om_path=$2 + +echo "Input AIR file path: ${air_path}" +echo "Output OM file path: ${om_path}" + +atc --framework=5 --model="${air_path}" \ + --output="${om_path}" \ + --enable_small_channel=1 \ + --soc_version=Ascend310 \ + --op_select_implmode="high_precision" diff --git a/PyTorch/contrib/cv/classification/SE-ResNet-50/infer/data_to_bin.py b/PyTorch/contrib/cv/classification/SE-ResNet-50/infer/data_to_bin.py new file mode 100644 index 0000000000..b8a88becf9 --- /dev/null +++ b/PyTorch/contrib/cv/classification/SE-ResNet-50/infer/data_to_bin.py @@ -0,0 +1,68 @@ +import warnings +warnings.filterwarnings('ignore') +import argparse +import os +import random +import shutil +import time +import warnings +import torch +import numpy as np +import torch.nn as nn +import torch.nn.parallel +import torch.backends.cudnn as cudnn +import torch.distributed as dist +import torch.optim +import torch.multiprocessing as mp +import torch.utils.data +import torch.utils.data.distributed +import torchvision.transforms as transforms +import torchvision.datasets as datasets +import senet + +parser = argparse.ArgumentParser(description='PyTorch ImageNet Training') +parser.add_argument('--data', required=False, default="/mass_store/dataset/imagenet/", + type=str, help='Location of training outputs.') +parser.add_argument('--batch_size', required=False, default=1, + type=int, help='Location of training outputs.') +parser.add_argument('--workers', required=False, default=8, + type=int, help='Location of training outputs.') +parser.add_argument('--model_path', required=False, default="checkpoint.pt", + type=str, help='Location of training outputs.') +args = parser.parse_args() +model = senet.se_resnet50(pretrained=False) +best_acc1 = 0 +traindir = os.path.join(args.data, 'train') +valdir = os.path.join(args.data, 'val') +normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], + std=[0.229, 0.224, 0.225]) +train_sampler = None +train_dataset = datasets.ImageFolder( + traindir, + transforms.Compose([ + transforms.RandomResizedCrop(224), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + normalize, + ])) +train_loader = torch.utils.data.DataLoader( + train_dataset, batch_size=args.batch_size, shuffle=( + train_sampler is None), + num_workers=args.workers, pin_memory=False, sampler=train_sampler, drop_last=True) +val_loader = torch.utils.data.DataLoader( + datasets.ImageFolder(valdir, transforms.Compose([ + transforms.Resize(256), + transforms.CenterCrop(224), + transforms.ToTensor(), + normalize, + ])), + batch_size=args.batch_size, shuffle=True, + num_workers=args.workers, pin_memory=False, drop_last=True) +save_path = "/mass_store/dataset/cyj/SE-ResNet-50/val_final/" +for i, (images, target) in enumerate(val_loader): + # print(images.shape) + images = images.numpy() + target = target.numpy() + images.tofile(save_path + "images/" + str(i) + ".bin") + target.tofile(save_path + "target/" + str(i) + ".bin") + print("t", i) diff --git a/PyTorch/contrib/cv/classification/SE-ResNet-50/infer/docker_start_infer.sh b/PyTorch/contrib/cv/classification/SE-ResNet-50/infer/docker_start_infer.sh new file mode 100644 index 0000000000..6de8de1898 --- /dev/null +++ b/PyTorch/contrib/cv/classification/SE-ResNet-50/infer/docker_start_infer.sh @@ -0,0 +1,40 @@ +#!/bin/bash + +# Copyright(C) 2022 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +docker_image=$1 +model_dir=$2 +data_path=$3 + +if [ -z "${docker_image}" ]; then + echo "please input docker_image" + exit 1 +fi + +if [ ! -d "${model_dir}" ]; then + echo "please input model_dir" + exit 1 +fi + +docker run -it -u root\ + --device=/dev/davinci0 \ + --device=/dev/davinci_manager \ + --device=/dev/devmm_svm \ + --device=/dev/hisi_hdc \ + -v /usr/local/Ascend/driver:/usr/local/Ascend/driver \ + -v ${model_dir}:${model_dir} \ + -v ${data_path}:${data_path} \ + ${docker_image} \ + /bin/bash \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/SE-ResNet-50/infer/mxbase/CMakeLists.txt b/PyTorch/contrib/cv/classification/SE-ResNet-50/infer/mxbase/CMakeLists.txt new file mode 100644 index 0000000000..c06cc9d516 --- /dev/null +++ b/PyTorch/contrib/cv/classification/SE-ResNet-50/infer/mxbase/CMakeLists.txt @@ -0,0 +1,51 @@ +cmake_minimum_required(VERSION 3.10.0) +project(Se_resnet50) + +set(TARGET Se_resnet50) + +add_definitions(-DENABLE_DVPP_INTERFACE) +add_definitions(-D_GLIBCXX_USE_CXX11_ABI=0) +add_definitions(-Dgoogle=mindxsdk_private) +add_compile_options(-std=c++11 -fPIE -fstack-protector-all -fPIC -Wall) +add_link_options(-Wl,-z,relro,-z,now,-z,noexecstack -s -pie) + +# Check environment variable +if(NOT DEFINED ENV{ASCEND_HOME}) + message(FATAL_ERROR "please define environment variable:ASCEND_HOME") +endif() +if(NOT DEFINED ENV{ASCEND_VERSION}) + message(WARNING "please define environment variable:ASCEND_VERSION") +endif() +if(NOT DEFINED ENV{ARCH_PATTERN}) + message(WARNING "please define environment variable:ARCH_PATTERN") +endif() +set(ACL_INC_DIR $ENV{ASCEND_HOME}/$ENV{ASCEND_VERSION}/$ENV{ARCH_PATTERN}/acllib/include) +set(ACL_LIB_DIR $ENV{ASCEND_HOME}/$ENV{ASCEND_VERSION}/$ENV{ARCH_PATTERN}/acllib/lib64) + +set(MXBASE_ROOT_DIR $ENV{MX_SDK_HOME}) +set(MXBASE_INC ${MXBASE_ROOT_DIR}/include) +set(MXBASE_LIB_DIR ${MXBASE_ROOT_DIR}/lib) +set(MXBASE_POST_LIB_DIR ${MXBASE_ROOT_DIR}/lib/modelpostprocessors) +set(MXBASE_POST_PROCESS_DIR ${MXBASE_ROOT_DIR}/include/MxBase/postprocess/include) +if(DEFINED ENV{MXSDK_OPENSOURCE_DIR}) + set(OPENSOURCE_DIR $ENV{MXSDK_OPENSOURCE_DIR}) +else() + set(OPENSOURCE_DIR ${MXBASE_ROOT_DIR}/opensource) +endif() + +include_directories(${ACL_INC_DIR}) +include_directories(${OPENSOURCE_DIR}/include) +include_directories(${OPENSOURCE_DIR}/include/opencv4) + +include_directories(${MXBASE_INC}) +include_directories(${MXBASE_POST_PROCESS_DIR}) + +link_directories(${ACL_LIB_DIR}) +link_directories(${OPENSOURCE_DIR}/lib) +link_directories(${MXBASE_LIB_DIR}) +link_directories(${MXBASE_POST_LIB_DIR}) + +add_executable(${TARGET} src/main.cpp src/Se_resnet50.cpp) +target_link_libraries(${TARGET} glog cpprest mxbase opencv_world stdc++fs) + +install(TARGETS ${TARGET} RUNTIME DESTINATION ${PROJECT_SOURCE_DIR}/) diff --git a/PyTorch/contrib/cv/classification/SE-ResNet-50/infer/mxbase/build.sh b/PyTorch/contrib/cv/classification/SE-ResNet-50/infer/mxbase/build.sh new file mode 100644 index 0000000000..e23d258b6d --- /dev/null +++ b/PyTorch/contrib/cv/classification/SE-ResNet-50/infer/mxbase/build.sh @@ -0,0 +1,55 @@ +#!/bin/bash + +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +path_cur=$(dirname $0) + +function check_env() +{ + # set ASCEND_VERSION to ascend-toolkit/latest when it was not specified by user + if [ ! "${ASCEND_VERSION}" ]; then + export ASCEND_VERSION=ascend-toolkit/latest + echo "Set ASCEND_VERSION to the default value: ${ASCEND_VERSION}" + else + echo "ASCEND_VERSION is set to ${ASCEND_VERSION} by user" + fi + + if [ ! "${ARCH_PATTERN}" ]; then + # set ARCH_PATTERN to ./ when it was not specified by user + export ARCH_PATTERN=./ + echo "ARCH_PATTERN is set to the default value: ${ARCH_PATTERN}" + else + echo "ARCH_PATTERN is set to ${ARCH_PATTERN} by user" + fi +} + +function build_bert() +{ + cd $path_cur + rm -rf build + mkdir -p build + cd build + cmake .. + make + ret=$? + if [ ${ret} -ne 0 ]; then + echo "Failed to build bert." + exit ${ret} + fi + make install +} + +check_env +build_bert \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/SE-ResNet-50/infer/mxbase/src/Se_resnet50.cpp b/PyTorch/contrib/cv/classification/SE-ResNet-50/infer/mxbase/src/Se_resnet50.cpp new file mode 100644 index 0000000000..56803c865a --- /dev/null +++ b/PyTorch/contrib/cv/classification/SE-ResNet-50/infer/mxbase/src/Se_resnet50.cpp @@ -0,0 +1,339 @@ +/* + * Copyright (c) 2021. Huawei Technologies Co., Ltd. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/*mxbase cpp */ +#include "Se_resnet50.h" + +#include +#include +#include +#include + +#include +#include +#include + +#include "acl/acl.h" +#include "MxBase/DeviceManager/DeviceManager.h" +#include "MxBase/Log/Log.h" +#include "MxBase/CV/ObjectDetection/Nms/Nms.h" + +namespace { +const uint32_t YUV_BYTE_NU = 3; +const uint32_t YUV_BYTE_DE = 2; + +const uint32_t MODEL_HEIGHT = 224; +const uint32_t MODEL_WIDTH = 224; +} // namespace + +void PrintTensorShape(const std::vector &tensorDescVec, + const std::string &tensorName) { + LogInfo << "The shape of " << tensorName << " is as follows:"; + for (size_t i = 0; i < tensorDescVec.size(); ++i) { + LogInfo << " Tensor " << i << ":"; + for (size_t j = 0; j < tensorDescVec[i].tensorDims.size(); ++j) { + LogInfo << " dim: " << j << ": " << tensorDescVec[i].tensorDims[j]; + } + } +} + +APP_ERROR Se_resnet50::Init(const InitParam &initParam) { + deviceId_ = initParam.deviceId; + APP_ERROR ret = MxBase::DeviceManager::GetInstance()->InitDevices(); + if (ret != APP_ERR_OK) { + LogError << "Init devices failed, ret=" << ret << "."; + return ret; + } + ret = MxBase::TensorContext::GetInstance()->SetContext(initParam.deviceId); + if (ret != APP_ERR_OK) { + LogError << "Set context failed, ret=" << ret << "."; + return ret; + } + dvppWrapper_ = std::make_shared(); + ret = dvppWrapper_->Init(); + if (ret != APP_ERR_OK) { + LogError << "DvppWrapper init failed, ret=" << ret << "."; + return ret; + } + model_ = std::make_shared(); + ret = model_->Init(initParam.modelPath, modelDesc_); + if (ret != APP_ERR_OK) { + LogError << "ModelInferenceProcessor init failed, ret=" << ret << "."; + return ret; + } + + PrintTensorShape(modelDesc_.inputTensors, "Model Input Tensors"); + PrintTensorShape(modelDesc_.outputTensors, "Model Output Tensors"); + + return APP_ERR_OK; +} + +APP_ERROR Se_resnet50::DeInit() { + dvppWrapper_->DeInit(); + model_->DeInit(); + MxBase::DeviceManager::GetInstance()->DestroyDevices(); + return APP_ERR_OK; +} + +APP_ERROR Se_resnet50::Inference(const std::vector &inputs, + std::vector *outputs) { + auto dtypes = model_->GetOutputDataType(); + for (size_t i = 0; i < modelDesc_.outputTensors.size(); ++i) { + std::vector shape = {}; + for (size_t j = 0; j < modelDesc_.outputTensors[i].tensorDims.size(); ++j) { + shape.push_back((uint32_t)modelDesc_.outputTensors[i].tensorDims[j]); + } + MxBase::TensorBase tensor(shape, dtypes[i], + MxBase::MemoryData::MemoryType::MEMORY_DEVICE, + deviceId_); + APP_ERROR ret = MxBase::TensorBase::TensorBaseMalloc(tensor); + if (ret != APP_ERR_OK) { + LogError << "TensorBaseMalloc failed, ret=" << ret << "."; + return ret; + } + outputs->push_back(tensor); + } + MxBase::DynamicInfo dynamicInfo = {}; + dynamicInfo.dynamicType = MxBase::DynamicType::STATIC_BATCH; + auto startTime = std::chrono::high_resolution_clock::now(); + APP_ERROR ret = model_->ModelInference(inputs, *outputs, dynamicInfo); + auto endTime = std::chrono::high_resolution_clock::now(); + double costMs = std::chrono::duration(endTime - startTime).count(); // save time + inferCostTimeMilliSec += costMs; + if (ret != APP_ERR_OK) { + LogError << "ModelInference failed, ret=" << ret << "."; + return ret; + } + return APP_ERR_OK; +} + +APP_ERROR Se_resnet50::Process(const std::string &imgPath, + const std::string &resultPath, + const std::string &dataset_name) { + ImageShape imageShape{}; + ImageShape resizedImageShape{}; + std::vector dirs; + std::string data_set; + if (dataset_name == "TUM") { + data_set = imgPath + "/TUM/rgbd_dataset_freiburg2_desk_with_person"; + dirs.emplace_back("TUM"); + } else if (dataset_name == "Kitti") { + data_set = imgPath + "/Kitti_raw_data/"; + dirs = GetAlldir(data_set, dataset_name); + } else if (dataset_name == "Sintel") { + data_set = imgPath + "/Sintel/final_left/"; + dirs = GetAlldir(data_set, dataset_name); + } else { + data_set = imgPath; + dirs = GetAlldir(data_set, dataset_name); + std::cout << "other dataset start" << std::endl; + } + for (const auto &dir : dirs) { + std::vector images; + if (dataset_name == "TUM") + images = GetAlldir(data_set, "TUM"); + else + images = GetAllFiles(data_set + dir); + for (const auto& image_file : images) { + float fnum[3 * 224 * 224] = {0}; + std::ifstream in(image_file, std::ios::in | std::ios::binary); + LogInfo << imgPath; + in.read(reinterpret_cast(&fnum), sizeof (fnum)); + + MxBase::TensorBase tensorBase; + const uint32_t dataSize = 1 * 3 * 224 * 224 * 4; + MxBase::MemoryData memoryDataDst(dataSize, MxBase::MemoryData::MEMORY_DEVICE, deviceId_); + MxBase::MemoryData memoryDataSrc(reinterpret_cast(fnum), dataSize, + MxBase::MemoryData::MEMORY_HOST_MALLOC); + APP_ERROR ret = MxBase::MemoryHelper::MxbsMallocAndCopy(memoryDataDst, memoryDataSrc); + + if (ret != APP_ERR_OK) { + LogError << GetError(ret) << "Memory malloc failed."; + return ret; + } + std::vector shape = {static_cast(1), static_cast(3), + static_cast(224), static_cast(224)}; + tensorBase = MxBase::TensorBase(memoryDataDst, false, shape, MxBase::TENSOR_DTYPE_FLOAT32); + + std::vector inputs = {}; + std::vector outputs = {}; + inputs.push_back(tensorBase); + + std::cout << "inputs shape is " << inputs[0].GetShape()[0] << " " + << inputs[0].GetShape()[1] << " " << inputs[0].GetShape()[2] + << " " << inputs[0].GetShape()[3] << std::endl; + + auto startTime = std::chrono::high_resolution_clock::now(); + ret = Inference(inputs, &outputs); + auto endTime = std::chrono::high_resolution_clock::now(); + double costMs = std::chrono::duration(endTime - startTime).count(); // save time + inferCostTimeMilliSec += costMs; + if (ret != APP_ERR_OK) { + LogError << "Inference failed, ret=" << ret << "."; + return ret; + } + WriteResult(image_file, &outputs, dataset_name, dir); + std::cout << "time is " << costMs << std::endl; + } + } + return APP_ERR_OK; +} + +std::vector GetAlldir(const std::string& dir_name, + const std::string& data_name) { + std::vector res; + if (data_name == "TUM") { + std::string txt_name = dir_name + "/associate.txt"; + std::ifstream infile; + infile.open(txt_name.data()); + std::string s; + while (getline(infile, s)) { + std::stringstream input(s); + std::string result; + input >> result; + res.emplace_back(dir_name + "/" + result); + } + return res; + } + struct dirent *filename; + DIR *dir = OpenDir(dir_name); + if (dir == nullptr) { + return {}; + } + + if (data_name == "Kitti") { + while ((filename = readdir(dir)) != nullptr) { + std::string d_name = std::string(filename->d_name); + // get rid of "." and ".." + if (d_name == "." || d_name == ".." || filename->d_type != DT_DIR) + continue; + res.emplace_back(d_name + "/image"); + std::cout << "image_file name is " << d_name << std::endl; + } + } else { + while ((filename = readdir(dir)) != nullptr) { + std::string d_name = std::string(filename->d_name); + // get rid of "." and ".." + if (d_name == "." || d_name == ".." || filename->d_type != DT_DIR) + continue; + res.emplace_back(d_name); + } + } + + return res; +} + +std::vector GetAllFiles(std::string dirName) { + struct dirent *filename; + DIR *dir = OpenDir(dirName); + std::cout << dirName << std::endl; + if (dir == nullptr) { + return {}; + } + std::vector res; + while ((filename = readdir(dir)) != nullptr) { + std::string dName = std::string(filename->d_name); + if (dName == "." || dName == ".." || filename->d_type != DT_REG) { + continue; + } + res.emplace_back(std::string(dirName) + "/" + filename->d_name); + } + std::sort(res.begin(), res.end()); + for (auto &f : res) { + std::cout << "image file: " << f << std::endl; + } + return res; +} + +std::string RealPath(std::string path) { + char realPathMem[PATH_MAX] = {0}; + char *realPathRet = nullptr; + realPathRet = realpath(path.data(), realPathMem); + if (realPathRet == nullptr) { + std::cout << "File: " << path << " is not exist."; + return ""; + } + + std::string realPath(realPathMem); + std::cout << path << " realpath is: " << realPath << std::endl; + return realPath; +} + +DIR *OpenDir(std::string dirName) { + if (dirName.empty()) { + std::cout << " dirName is null ! " << std::endl; + return nullptr; + } + std::string realPath = RealPath(dirName); + struct stat s; + lstat(realPath.c_str(), &s); + if (!S_ISDIR(s.st_mode)) { + std::cout << "dirName is not a valid directory !" << std::endl; + return nullptr; + } + DIR *dir = opendir(realPath.c_str()); + if (dir == nullptr) { + std::cout << "Can not open dir " << dirName << std::endl; + return nullptr; + } + std::cout << "Successfully opened the dir " << dirName << std::endl; + return dir; +} + +APP_ERROR Se_resnet50::WriteResult(const std::string& imageFile, + std::vector *outputs, + const std::string & dataset_name, + const std::string& seq) { + std::string homePath; + if (dataset_name == "Kitti") { + int pos = seq.find('/'); + std::string seq1(seq, 0, pos); + homePath = "./final_result_Files/" + dataset_name + "/" + seq1; + std::cout << "1" << std::endl; + } else { + homePath = "./final_result_Files/" + dataset_name + "/" + seq; + } + std::string path1 = "mkdir -p " + homePath; + system(path1.c_str()); + std::cout << "homePath is " << homePath << std::endl; + for (size_t i = 0; i < (*outputs).size(); ++i) { + size_t outputSize; + APP_ERROR ret = (*outputs)[i].ToHost(); + if (ret != APP_ERR_OK) { + LogError << GetError(ret) << "tohost fail."; + return ret; + } + void *netOutput = (*outputs)[i].GetBuffer(); + + std::vector out_shape = (*outputs)[i].GetShape(); + LogDebug << "shape is " << out_shape[0] << " " << out_shape[1] << " " + << out_shape[2] << " " << out_shape[3] << std::endl; + outputSize =(*outputs)[i].GetByteSize(); + std::cout << "outputsize is " << outputSize << std::endl; + int pos = imageFile.rfind('/'); + std::string fileName(imageFile, pos + 1); + fileName.replace(fileName.rfind('.'), fileName.size() - fileName.rfind('.'), '_' + std::to_string(i) + ".bin"); + std::string outFileName = homePath + "/" + fileName; + std::cout << "output file is " << outFileName << std::endl; + FILE *outputFile = fopen(outFileName.c_str(), "wb"); + auto count1 = fwrite(netOutput, outputSize, sizeof(char), outputFile); + std::cout << "count is " << count1 << " " << sizeof(char) << std::endl; + fclose(outputFile); + outputFile = nullptr; + } + + LogDebug << "Se_resnet50Mindspore write results succeeded."; + return APP_ERR_OK; +} diff --git a/PyTorch/contrib/cv/classification/SE-ResNet-50/infer/mxbase/src/Se_resnet50.h b/PyTorch/contrib/cv/classification/SE-ResNet-50/infer/mxbase/src/Se_resnet50.h new file mode 100644 index 0000000000..f157462cc4 --- /dev/null +++ b/PyTorch/contrib/cv/classification/SE-ResNet-50/infer/mxbase/src/Se_resnet50.h @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2021. Huawei Technologies Co., Ltd. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef ALPHAPOSE_H +#define ALPHAPOSE_H + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include "MxBase/CV/Core/DataType.h" +#include "MxBase/DeviceManager/DeviceManager.h" +#include "MxBase/DvppWrapper/DvppWrapper.h" +#include "MxBase/ErrorCode/ErrorCode.h" +#include "MxBase/ModelInfer/ModelInferenceProcessor.h" +#include "MxBase/PostProcessBases/ObjectPostProcessBase.h" +#include "MxBase/Tensor/TensorContext/TensorContext.h" + +struct InitParam { + uint32_t deviceId; + std::string labelPath; + std::string dataset_name; + + bool checkTensor; + std::string modelPath; +}; + +struct ImageShape { + uint32_t width; + uint32_t height; +}; + +std::vector GetAllFiles(std::string dirName); +std::vector GetAlldir(const std::string& dir_name, const std::string& data_name); +std::string RealPath(std::string path); +DIR *OpenDir(std::string dirName); + +class Se_resnet50 { + public: + APP_ERROR Init(const InitParam &initParam); + APP_ERROR DeInit(); + APP_ERROR Inference(const std::vector &inputs, std::vector *outputs); + APP_ERROR Process(const std::string &imgPath, const std::string &resultPath, const std::string &dataset_name); + APP_ERROR WriteResult(const std::string& imageFile, std::vector *outputs, + const std::string & dataset_name, const std::string& seq); + private: + std::shared_ptr dvppWrapper_; + std::shared_ptr model_; + + MxBase::ModelDesc modelDesc_; + uint32_t deviceId_ = 0; + double inferCostTimeMilliSec = 0.0; +}; + + + +#endif // ALPHAPOSE_H diff --git a/PyTorch/contrib/cv/classification/SE-ResNet-50/infer/mxbase/src/main.cpp b/PyTorch/contrib/cv/classification/SE-ResNet-50/infer/mxbase/src/main.cpp new file mode 100644 index 0000000000..1ba2f29971 --- /dev/null +++ b/PyTorch/contrib/cv/classification/SE-ResNet-50/infer/mxbase/src/main.cpp @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2021. Huawei Technologies Co., Ltd. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "MxBase/Log/Log.h" +#include "Se_resnet50.h" + +namespace { +const uint32_t DEVICE_ID = 0; +const char RESULT_PATH[] = "../data/"; +} // namespace + +int main(int argc, char *argv[]) { + if (argc <= 3) { + LogWarn << "Please input image path, such as './ [om_file_path] [img_path] [dataset_name]'."; + return APP_ERR_OK; + } + InitParam initParam = {}; + initParam.deviceId = DEVICE_ID; + + + initParam.checkTensor = true; + + initParam.modelPath = argv[1]; + auto inferAlphapose = std::make_shared(); + APP_ERROR ret = inferAlphapose->Init(initParam); + if (ret != APP_ERR_OK) { + LogError << "Alphapose init failed, ret=" << ret << "."; + return ret; + } + std::string imgPath = argv[2]; + std::string dataset_name = argv[3]; + ret = inferAlphapose->Process(imgPath, RESULT_PATH, dataset_name); + if (ret != APP_ERR_OK) { + LogError << "Alphapose process failed, ret=" << ret << "."; + inferAlphapose->DeInit(); + return ret; + } + inferAlphapose->DeInit(); + return APP_ERR_OK; +} diff --git a/PyTorch/contrib/cv/classification/SE-ResNet-50/infer/read_bin.py b/PyTorch/contrib/cv/classification/SE-ResNet-50/infer/read_bin.py new file mode 100644 index 0000000000..51758f101c --- /dev/null +++ b/PyTorch/contrib/cv/classification/SE-ResNet-50/infer/read_bin.py @@ -0,0 +1,17 @@ +import os +import torch +import numpy as np +result_path = "../data_and_result/result/data/infer_result/images/images/" +result_file = os.listdir(result_path) +maxk = 5 +res = [] +for i, item in enumerate(result_file): + result_name = result_path + str(i) + "_0.bin" + output = torch.tensor(np.fromfile( + result_name, dtype=np.float32).reshape((1, 1000))) + _, pred = output.topk(maxk, 1, True, True) + res.append(pred[0]) +res = torch.stack(res) +res = res.numpy() +np.savetxt('sdk.txt', res, fmt="%d", delimiter=" ") +print("finished") diff --git a/PyTorch/contrib/cv/classification/SE-ResNet-50/infer/sdk/acc.py b/PyTorch/contrib/cv/classification/SE-ResNet-50/infer/sdk/acc.py new file mode 100644 index 0000000000..e986decd12 --- /dev/null +++ b/PyTorch/contrib/cv/classification/SE-ResNet-50/infer/sdk/acc.py @@ -0,0 +1,155 @@ +# Copyright 2022 Huawei Technologies Co., Ltd +# +# Licensed under the BSD 3-Clause License (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://opensource.org/licenses/BSD-3-Clause +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import torch +from collections import OrderedDict +import argparse +import os +import random +import shutil +import time +import warnings +import torch +import numpy as np +import torch.nn as nn +import numpy as np +parser = argparse.ArgumentParser(description='PyTorch ImageNet Training') +parser.add_argument('--data', required=False, default="/mass_store/dataset/imagenet/", + type=str, help='Location of training outputs.') +parser.add_argument('--batch_size', required=False, default=1, + type=int, help='Location of training outputs.') +parser.add_argument('--workers', required=False, default=8, + type=int, help='Location of training outputs.') +parser.add_argument('--model_path', required=False, default="checkpoint.pt", + type=str, help='Location of training outputs.') +args = parser.parse_args() + + +def proc_node_module(checkpoint, AttrName): + new_state_dict = OrderedDict() + for k, v in checkpoint[AttrName].items(): + if(k[0:7] == "module."): + name = k[7:] + else: + name = k[0:] + new_state_dict[name] = v + return new_state_dict + + +def main(): + print("hhh") + checkpoint = torch.load("./model_best.pth.tar", map_location='cpu') + checkpoint['state_dict'] = proc_node_module(checkpoint, 'state_dict') + model = se_resnet50() + model.load_state_dict(checkpoint['state_dict']) + model.eval() + criterion = nn.CrossEntropyLoss() + acc1 = validate(val_loader, model, criterion, args) + print("acc1:", acc1) + + +class AverageMeter(object): + """Computes and stores the average and current value""" + + def __init__(self, name, fmt=':f', start_count_index=2): + self.name = name + self.fmt = fmt + self.reset() + self.start_count_index = start_count_index + + def reset(self): + self.val = 0 + self.avg = 0 + self.sum = 0 + self.count = 0 + + def update(self, val, n=1): + if self.count == 0: + self.N = n + + self.val = val + self.count += n + if self.count > (self.start_count_index * self.N): + self.sum += val * n + self.avg = self.sum / \ + (self.count - self.start_count_index * self.N) + + def __str__(self): + fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})' + return fmtstr.format(**self.__dict__) + + +class ProgressMeter(object): + + def __init__(self, num_batches, meters, prefix=""): + self.batch_fmtstr = self._get_batch_fmtstr(num_batches) + self.meters = meters + self.prefix = prefix + + def display(self, batch): + entries = [self.prefix + self.batch_fmtstr.format(batch)] + entries += [str(meter) for meter in self.meters] + print('\t'.join(entries)) + + def _get_batch_fmtstr(self, num_batches): + num_digits = len(str(num_batches // 1)) + fmt = '{:' + str(num_digits) + 'd}' + return '[' + fmt + '/' + fmt.format(num_batches) + ']' + + +def validate(): + batch_time = AverageMeter('Time', ':6.3f') + losses = AverageMeter('Loss', ':.4e') + top1 = AverageMeter('Acc@1', ':6.2f') + top5 = AverageMeter('Acc@5', ':6.2f') + progress = ProgressMeter( + 50000, + [batch_time, losses, top1, top5], + prefix='Test: ') + result_path = "../data_and_result/result/data/infer_result/images/images/" + label_path = "../data_and_result/val/target/" + result_file = os.listdir(result_path) + target_file = os.listdir(label_path) + with torch.no_grad(): + end = time.time() + for i, item in enumerate(target_file): + result_name = result_path + str(i) + "_0.bin" + output = torch.tensor(np.fromfile( + result_name, dtype=np.float32).reshape((1, 1000))) + target_name = label_path + str(i) + ".bin" + target = torch.tensor(np.fromfile(target_name, dtype=np.int64)) + acc1, acc5 = accuracy(output, target, topk=(1, 5)) + top1.update(acc1[0], 1) + top5.update(acc5[0], 1) + print('[AVG-ACC] * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}' + .format(top1=top1, top5=top5)) + return top1.avg + + +def accuracy(output, target, topk=(1,)): + """Computes the accuracy over the k top predictions for the specified values of k""" + with torch.no_grad(): + maxk = max(topk) + batch_size = target.size(0) + _, pred = output.topk(maxk, 1, True, True) + pred = pred.t() + correct = pred.eq(target.view(1, -1).expand_as(pred)) + res = [] + for k in topk: + correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True) + res.append(correct_k.mul_(100.0 / batch_size)) + return res + + +if __name__ == '__main__': + validate() diff --git a/PyTorch/contrib/cv/classification/SE-ResNet-50/infer/sdk/api/__init__.py b/PyTorch/contrib/cv/classification/SE-ResNet-50/infer/sdk/api/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/PyTorch/contrib/cv/classification/SE-ResNet-50/infer/sdk/api/infer.py b/PyTorch/contrib/cv/classification/SE-ResNet-50/infer/sdk/api/infer.py new file mode 100644 index 0000000000..9b51a6fe7b --- /dev/null +++ b/PyTorch/contrib/cv/classification/SE-ResNet-50/infer/sdk/api/infer.py @@ -0,0 +1,155 @@ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +'''sdk infer''' +import json +import logging + +import MxpiDataType_pb2 as MxpiDataType +from StreamManagerApi import StreamManagerApi, MxDataInput, InProtobufVector, MxProtobufIn, StringVector + +from config import config as cfg + + +class SdkApi: + '''sdk api''' + INFER_TIMEOUT = cfg.INFER_TIMEOUT + STREAM_NAME = cfg.STREAM_NAME + + def __init__(self, pipeline_cfg): + self.pipeline_cfg = pipeline_cfg + self._stream_api = None + self._data_input = None + self._device_id = None + + def init(self): + '''sdk init ''' + with open(self.pipeline_cfg, 'r') as fp: + self._device_id = int( + json.loads(fp.read())[self.STREAM_NAME]["stream_config"] + ["deviceId"]) + print(f"The device id: {self._device_id}.") + + # create api + self._stream_api = StreamManagerApi() + + # init stream mgr + ret = self._stream_api.InitManager() + if ret != 0: + print(f"Failed to init stream manager, ret={ret}.") + return False + + # create streams + with open(self.pipeline_cfg, 'rb') as fp: + pipe_line = fp.read() + + ret = self._stream_api.CreateMultipleStreams(pipe_line) + if ret != 0: + print(f"Failed to create stream, ret={ret}.") + return False + + self._data_input = MxDataInput() + + return True + + def __del__(self): + '''del sdk''' + if not self._stream_api: + return + + self._stream_api.DestroyAllStreams() + + def send_data_input(self, stream_name, plugin_id, input_data): + '''input data use SendData''' + data_input = MxDataInput() + data_input.data = input_data + unique_id = self._stream_api.SendData(stream_name, plugin_id, + data_input) + if unique_id < 0: + logging.error("Fail to send data to stream.") + return False + return True + + def _send_protobuf(self, stream_name, plugin_id, element_name, buf_type, + pkg_list): + '''input data use SendProtobuf''' + protobuf = MxProtobufIn() + protobuf.key = element_name.encode("utf-8") + protobuf.type = buf_type + protobuf.protobuf = pkg_list.SerializeToString() + protobuf_vec = InProtobufVector() + protobuf_vec.push_back(protobuf) + err_code = self._stream_api.SendProtobuf(stream_name, plugin_id, + protobuf_vec) + if err_code != 0: + logging.error( + "Failed to send data to stream, stream_name(%s), plugin_id(%s), element_name(%s), " + "buf_type(%s), err_code(%s).", stream_name, plugin_id, + element_name, buf_type, err_code) + return False + return True + + def send_img_input(self, stream_name, plugin_id, element_name, input_data, + img_size): + '''use cv input to sdk''' + vision_list = MxpiDataType.MxpiVisionList() + vision_vec = vision_list.visionVec.add() + vision_vec.visionInfo.format = 1 + vision_vec.visionInfo.width = img_size[1] + vision_vec.visionInfo.height = img_size[0] + vision_vec.visionInfo.widthAligned = img_size[1] + vision_vec.visionInfo.heightAligned = img_size[0] + vision_vec.visionData.memType = 0 + vision_vec.visionData.dataStr = input_data + vision_vec.visionData.dataSize = len(input_data) + buf_type = b"MxTools.MxpiVisionList" + return self._send_protobuf(stream_name, plugin_id, element_name, + buf_type, vision_list) + + def send_tensor_input(self, stream_name, plugin_id, element_name, + input_data, input_shape, data_type): + '''input data use SendData''' + tensor_list = MxpiDataType.MxpiTensorPackageList() + tensor_pkg = tensor_list.tensorPackageVec.add() + # init tensor vector + tensor_vec = tensor_pkg.tensorVec.add() + tensor_vec.deviceId = self._device_id + tensor_vec.memType = 0 + tensor_vec.tensorShape.extend(input_shape) + tensor_vec.tensorDataType = data_type + tensor_vec.dataStr = input_data + tensor_vec.tensorDataSize = len(input_data) + + buf_type = b"MxTools.MxpiTensorPackageList" + return self._send_protobuf(stream_name, plugin_id, element_name, + buf_type, tensor_list) + + def get_result(self, stream_name, out_plugin_id=0): + '''result output''' + key_vec = StringVector() + key_vec.push_back(b'mxpi_tensorinfer0') + infer_result = self._stream_api.GetProtobuf(stream_name, 0, key_vec) + if infer_result.size() == 0: + print("inferResult is null") + return None + if infer_result[0].errorCode != 0: + print("GetProtobuf error. errorCode=%d" % + (infer_result[0].errorCode)) + return None + result = MxpiDataType.MxpiTensorPackageList() + result.ParseFromString(infer_result[0].messageBuf) + + print('datastr', type(result.tensorPackageVec[0].tensorVec[0].dataStr)) + + return result.tensorPackageVec[0].tensorVec[0].dataStr diff --git a/PyTorch/contrib/cv/classification/SE-ResNet-50/infer/sdk/config/Se_resnet50_ms_test.pipeline b/PyTorch/contrib/cv/classification/SE-ResNet-50/infer/sdk/config/Se_resnet50_ms_test.pipeline new file mode 100644 index 0000000000..97a7786052 --- /dev/null +++ b/PyTorch/contrib/cv/classification/SE-ResNet-50/infer/sdk/config/Se_resnet50_ms_test.pipeline @@ -0,0 +1,28 @@ +{ + "im_midas": { + "stream_config": { + "deviceId": "0" + }, + "appsrc0": { + "props": { + "blocksize": "409600" + }, + "factory": "appsrc", + "next": "mxpi_tensorinfer0" + }, + + "mxpi_tensorinfer0": { + "props": { + "dataSource": "appsrc0", + "modelPath": "se_resnet50.om" + + }, + "factory": "mxpi_tensorinfer", + "next": "appsink0" + }, + + "appsink0": { + "factory": "appsink" + } + } +} diff --git a/PyTorch/contrib/cv/classification/SE-ResNet-50/infer/sdk/config/config.py b/PyTorch/contrib/cv/classification/SE-ResNet-50/infer/sdk/config/config.py new file mode 100644 index 0000000000..a09fb82164 --- /dev/null +++ b/PyTorch/contrib/cv/classification/SE-ResNet-50/infer/sdk/config/config.py @@ -0,0 +1,25 @@ +# Copyright 2022 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +'''model config ''' + +STREAM_NAME = "im_midas" +MODEL_WIDTH = 224 +MODEL_HEIGHT = 224 + +INFER_TIMEOUT = 100000 + +TENSOR_DTYPE_FLOAT32 = 0 +TENSOR_DTYPE_FLOAT16 = 1 +TENSOR_DTYPE_INT8 = 2 diff --git a/PyTorch/contrib/cv/classification/SE-ResNet-50/infer/sdk/main.py b/PyTorch/contrib/cv/classification/SE-ResNet-50/infer/sdk/main.py new file mode 100644 index 0000000000..83781cd62c --- /dev/null +++ b/PyTorch/contrib/cv/classification/SE-ResNet-50/infer/sdk/main.py @@ -0,0 +1,229 @@ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +'''sdk main''' + +import argparse +import os +import time + +import cv2 + +import numpy as np + +from api.infer import SdkApi +from config import config as cfg + + +def parser_args(): + '''set parameter''' + parser = argparse.ArgumentParser(description="maskrcnn inference") + parser.add_argument("--img_path", + type=str, + required=True, + help="image directory.") + parser.add_argument( + "--pipeline_path", + type=str, + required=False, + default="config/maskrcnn_ms.pipeline", + help="image file path. The default is 'config/Se_resnet50_ms_test.pipeline'. ") + parser.add_argument( + "--model_type", + type=str, + required=False, + default="dvpp", + help="rgb: high-precision, dvpp: high performance. The default is 'dvpp'.") + parser.add_argument( + "--infer_mode", + type=str, + required=False, + default="infer", + help="infer:only infer, eval: accuracy evaluation. The default is 'infer'.") + parser.add_argument( + "--infer_result_dir", + type=str, + required=False, + default="./data/infer_result", + help="cache dir of inference result. The default is '../data/infer_result'." + ) + parser.add_argument( + "--dataset_name", + type=str, + required=True, + default="TUM", + help="dataset name." + ) + + parser.add_argument("--ann_file", + type=str, + required=False, + help="eval ann_file.") + + return parser.parse_args() + + +def write_depth(path, depth, bits=1): + """Write depth map to pfm and png file. + + Args: + path (str): filepath without extension + depth (array): depth + :param path: + :param depth: + :param bits: + """ + write_pfm(path + ".pfm", depth.astype(np.float32)) + + depth_min = depth.min() + depth_max = depth.max() + + max_val = (2 ** (8 * bits)) - 1 + + if depth_max - depth_min > np.finfo("float").eps: + out = max_val * (depth - depth_min) / (depth_max - depth_min) + else: + out = np.zeros(depth.shape, dtype=depth.type) + + if bits == 1: + cv2.imwrite(path + ".png", out.astype("uint8")) + elif bits == 2: + cv2.imwrite(path + ".png", out.astype("uint16")) + + return out + + +def write_pfm(path, image, scale=1): + """Write pfm file. + + Args: + path (str): path file + image (array): data + scale (int, optional): Scale. Defaults to 1. + """ + + with open(path, "wb") as file: + color = None + + if image.dtype.name != "float32": + raise Exception("Image dtype must be float32.") + + image = np.flipud(image) + + if len(image.shape) == 3 and image.shape[2] == 3: # color image + color = True + elif ( + len(image.shape) == 2 or len( + image.shape) == 3 and image.shape[2] == 1 + ): # greyscale + color = False + else: + raise Exception( + "Image must have H x W x 3, H x W x 1 or H x W dimensions.") + + file.write("PF\n" if color else "Pf\n".encode()) + file.write("%d %d\n".encode() % (image.shape[1], image.shape[0])) + + endian = image.dtype.byteorder + + if endian == "<" or endian == "=" and sys.byteorder == "little": + scale = -scale + + file.write("%f\n".encode() % scale) + + image.tofile(file) + + +def process_img(img_file): + '''bin preprocess''' + f = open(img_file, mode='rb') + img = np.fromfile(f, dtype=np.float32).reshape((224, 224, 3)) + return img + + +def image_inference(pipeline_path, stream_name, img_dir, result_dir, + replace_last, dataset_name, model_type): + '''sdk process''' + sdk_api = SdkApi(pipeline_path) + if not sdk_api.init(): + exit(-1) + + if not os.path.exists(result_dir): + os.makedirs(result_dir) + + img_data_plugin_id = 0 + dirs = [] + if dataset_name == 'images': + data_set = img_dir + "images" + dirs.append("images") + elif dataset_name == "Kitti": + data_set = img_dir + "/Kitti_raw_data" + dirs = os.listdir(data_set) + elif dataset_name == "Sintel": + data_set = img_dir + "/Sintel/final_left" + dirs = os.listdir(data_set) + else: + data_set = img_dir + dirs = os.listdir(data_set) + + for d in dirs: + if dataset_name == "images": + images = os.listdir(os.path.join(data_set)) + elif dataset_name == "Kitti": + images = os.listdir(os.path.join(data_set, d, "images")) + elif dataset_name == "Sintel": + images = os.listdir(os.path.join(data_set, d)) + else: + images = os.listdir(os.path.join(data_set, d)) + total_len = len(images) + for ind, file_name in enumerate(images): + if dataset_name == "images": + file_path = os.path.join(data_set, file_name) + elif dataset_name == "Kitti": + file_path = os.path.join(data_set, d, "images", file_name) + elif dataset_name == "Sintel": + file_path = os.path.join(data_set, d, file_name) + print(img_dir, " ", d, " ", file_name) + print("file_path is ", file_path) + img_np = process_img(file_path) + img_shape = img_np.shape + print("111", img_shape) + sdk_api.send_img_input(stream_name, + img_data_plugin_id, "appsrc0", + img_np.tobytes(), img_shape) + start_time = time.time() + result = sdk_api.get_result(stream_name) + end_time = time.time() - start_time + + save_path = os.path.join(result_dir, dataset_name, d) + if not os.path.exists(save_path): + os.makedirs(save_path) + save_path = os.path.join(save_path, file_name) + print('.' + save_path.split('.')[-1]) + save_path = save_path.replace( + '.' + save_path.split('.')[-1], '_0.bin') + print('save_path is ', save_path) + with open(save_path, "wb") as fp: + fp.write(result) + print( + f"End-2end inference, file_name: {file_path}, {ind + 1}/{total_len}, elapsed_time: {end_time}.\n" + ) + + +if __name__ == "__main__": + args = parser_args() + + stream_name1 = cfg.STREAM_NAME.encode("utf-8") + image_inference(args.pipeline_path, stream_name1, args.img_path, + args.infer_result_dir, True, args.dataset_name, args.model_type) diff --git a/PyTorch/contrib/cv/classification/SE-ResNet-50/modelarts/train_start.py b/PyTorch/contrib/cv/classification/SE-ResNet-50/modelarts/train_start.py new file mode 100644 index 0000000000..82ef5ed66e --- /dev/null +++ b/PyTorch/contrib/cv/classification/SE-ResNet-50/modelarts/train_start.py @@ -0,0 +1,670 @@ +# Copyright 2022 Huawei Technologies Co., Ltd +# +# Licensed under the BSD 3-Clause License (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://opensource.org/licenses/BSD-3-Clause +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import argparse +import os +import random +import shutil +import time +import warnings +import moxing as mox + +import apex +import numpy as np +import torch.npu +from apex import amp + +from collections import OrderedDict +import torch +import torch.onnx +import torch.nn as nn +import torch.nn.parallel +import torch.backends.cudnn as cudnn +import torch.distributed as dist +import torch.optim +import torch.multiprocessing as mp +import torch.utils.data +import torch.utils.data.distributed +import torchvision.transforms as transforms +import torchvision.datasets as datasets +import senet +CALCULATE_DEVICE = "npu:0" +model_names = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', + 'resnet152', 'resnext50_32x4d', 'resnext101_32x8d', + 'wide_resnet50_2', 'wide_resnet101_2'] +parser = argparse.ArgumentParser(description='PyTorch ImageNet Training') +parser.add_argument('--data', default='', type=str, + help='path to dataset') +parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet18', + choices=model_names, + help='model architecture: ' + + ' | '.join(model_names) + + ' (default: resnet18)') +parser.add_argument('-j', '--workers', default=4, type=int, metavar='N', + help='number of data loading workers (default: 4)') +parser.add_argument('--epochs', default=90, type=int, metavar='N', + help='number of total epochs to run') +parser.add_argument('--start-epoch', default=0, type=int, metavar='N', + help='manual epoch number (useful on restarts)') +parser.add_argument('-b', '--batch-size', default=256, type=int, + metavar='N', + help='mini-batch size (default: 256), this is the total ' + 'batch size of all GPUs on the current node when ' + 'using Data Parallel or Distributed Data Parallel') +parser.add_argument('--lr', '--learning-rate', default=0.1, type=float, + metavar='LR', help='initial learning rate', dest='lr') +parser.add_argument('--momentum', default=0.9, type=float, metavar='M', + help='momentum') +parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float, + metavar='W', help='weight decay (default: 1e-4)', + dest='weight_decay') +parser.add_argument('-p', '--print-freq', default=10, type=int, + metavar='N', help='print frequency (default: 10)') +parser.add_argument('--resume', default='', type=str, metavar='PATH', + help='path to latest checkpoint (default: none)') +parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true', + help='evaluate model on validation set') +parser.add_argument('--pretrained', default=False, dest='pretrained', action='store_true', + help='use pre-trained model') +parser.add_argument('--world-size', default=-1, type=int, + help='number of nodes for distributed training') +parser.add_argument('--rank', default=-1, type=int, + help='node rank for distributed training') +parser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str, + help='url used to set up distributed training') +parser.add_argument('--dist-backend', default='nccl', type=str, + help='distributed backend') +parser.add_argument('--seed', default=None, type=int, + help='seed for initializing training. ') +parser.add_argument('--gpu', default=None, type=int, + help='GPU id to use.') +parser.add_argument('--npu', default=None, type=int, + help='NPU id to use.') +parser.add_argument('--multiprocessing-distributed', action='store_true', + help='Use multi-processing distributed training to launch ' + 'N processes per node, which has N GPUs. This is the ' + 'fastest way to use PyTorch for either single node or ' + 'multi node data parallel training') + +parser.add_argument('--device', default='npu', type=str, help='npu or gpu') +parser.add_argument('--addr', default='10.136.181.115', + type=str, help='master addr') +parser.add_argument('--amp', default=False, action='store_true', + help='use amp to train the model') +parser.add_argument('--warm_up_epochs', default=0, type=int, + help='warm up') +parser.add_argument('--loss-scale', default=1024., type=float, + help='loss scale using in amp, default -1 means dynamic') +parser.add_argument('--opt-level', default='O2', type=str, + help='loss scale using in amp, default -1 means dynamic') +parser.add_argument('--prof', default=False, action='store_true', + help='use profiling to evaluate the performance of model') +parser.add_argument('--save_path', default='', type=str, + help='path to save models') +parser.add_argument('--num_classes', default=1000, type=int, + help='path to save models') + +# modelarts modification +parser.add_argument('--train_url', + default='obs://mindx-user/csl-shi/wideresnet101/result/', + type=str, + help="setting dir of training output") +parser.add_argument('--data_url', + default='obs://mindx-user/csl-shi/wideresnet101/data/', + type=str, + help='path to dataset') + +parser.add_argument('--model_url', + metavar='DIR', + default='', + help='path to pretrained model') +parser.add_argument('--onnx', default=True, action='store_true', + help="convert pth model to onnx") + +cur_step = 0 +CACHE_TRAINING_URL = "/cache/training/" +CACHE_DATA_URL = "/cache/data_url" +CACHE_MODEL_URL = "/cache/model" + +best_acc1 = 0 + + +def main(): + args = parser.parse_args() + global CALCULATE_DEVICE + CALCULATE_DEVICE = "npu:{}".format(args.npu) + if 'npu' in CALCULATE_DEVICE: + torch.npu.set_device(CALCULATE_DEVICE) + if args.data_url: + import moxing as mox + if args.seed is not None: + random.seed(args.seed) + torch.manual_seed(args.seed) + cudnn.deterministic = True + warnings.warn('You have chosen to seed training. ' + 'This will turn on the CUDNN deterministic setting, ' + 'which can slow down your training considerably! ' + 'You may see unexpected behavior when restarting ' + 'from checkpoints.') + + if args.gpu is not None: + warnings.warn('You have chosen a specific GPU. This will completely ' + 'disable data parallelism.') + + if args.dist_url == "env://" and args.world_size == -1: + args.world_size = int(os.environ["WORLD_SIZE"]) + + args.distributed = args.world_size > 1 or args.multiprocessing_distributed + + ngpus_per_node = torch.cuda.device_count() + if args.multiprocessing_distributed: + # Since we have ngpus_per_node processes per node, the total world_size + # needs to be adjusted accordingly + args.world_size = ngpus_per_node * args.world_size + # Use torch.multiprocessing.spawn to launch distributed processes: the + # main_worker process function + mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args)) + else: + # Simply call main_worker function + main_worker(args.gpu, ngpus_per_node, args) + + +def main_worker(gpu, ngpus_per_node, args): + global best_acc1 + ###### modify npu_p1 1###### + args.gpu = None + ###### modify npu_p1 1 end ###### + if args.gpu is not None: + print("Use GPU: {} for training".format(args.gpu)) + + if args.distributed: + if args.dist_url == "env://" and args.rank == -1: + args.rank = int(os.environ["RANK"]) + if args.multiprocessing_distributed: + # For multiprocessing distributed training, rank needs to be the + # global rank among all the processes + args.rank = args.rank * ngpus_per_node + gpu + ###### modify 8 ###### + if args.device == 'npu': + dist.init_process_group(backend=args.dist_backend, # init_method=args.dist_url, + world_size=args.world_size, rank=args.rank) + else: + dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url, + world_size=args.world_size, rank=args.rank) + ###### modify 8 end ###### + # create model + if args.pretrained: + print("=> using pre-trained model wide_resnet101_2") + model = senet.se_resnet50() + print("loading model of yours...") + model_path = "./checkpoint.pth.tar" + if args.model_url: + real_path = CACHE_MODEL_URL + if not os.path.exists(real_path): + os.makedirs(real_path) + mox.file.copy_parallel(args.model_url, real_path) + print("training data finish copy to %s." % real_path) + model_path = os.path.join(CACHE_MODEL_URL, 'checkpoint.pth.tar') + pretrained_dict = torch.load(model_path, map_location="cpu")["state_dict"] + model.load_state_dict({k.replace('module.', ''): v for k, v in pretrained_dict.items()}) + if "fc.weight" in pretrained_dict: + pretrained_dict.pop('fc.weight') + pretrained_dict.pop('fc.bias') + for param in model.parameters(): + param.requires_grad = False + model.fc = nn.Linear(2048, args.num_classes) + #model.load_state_dict(pretrained_dict, strict=False) + else: + print("=> creating model wide_resnet101_2") + model = senet.se_resnet50() + ###### modify npu_p1 2###### + if args.distributed: + ###### modify npu_p1 2 end ###### + # For multiprocessing distributed, DistributedDataParallel constructor + # should always set the single device scope, otherwise, + # DistributedDataParallel will use all available devices. + if args.gpu is not None: + torch.cuda.set_device(args.gpu) + model.cuda(args.gpu) + # When using a single GPU per process and per + # DistributedDataParallel, we need to divide the batch size + # ourselves based on the total number of GPUs we have + args.batch_size = int(args.batch_size / ngpus_per_node) + args.workers = int((args.workers + ngpus_per_node - 1) / ngpus_per_node) + model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu]) + else: + model.cuda() + # DistributedDataParallel will divide and allocate batch_size to all + # available GPUs if device_ids are not set + model = torch.nn.parallel.DistributedDataParallel(model) + elif args.gpu is not None: + torch.cuda.set_device(args.gpu) + model = model.cuda(args.gpu) + else: + # DataParallel will divide and allocate batch_size to all available GPUs + if args.arch.startswith('alexnet') or args.arch.startswith('vgg'): + model.features = torch.nn.DataParallel(model.features) + model.cuda() + else: + # model = torch.nn.DataParallel(model).cuda() + ###### modify npu_p1 3###### + model = model.to(CALCULATE_DEVICE) + ###### modify npu_p1 3 end ###### + + # define loss function (criterion) and optimizer + # criterion = nn.CrossEntropyLoss().cuda(args.gpu) + ############## npu modify 4 begin ############# + # 将损失函数迁移到NPU上进行计算。 + criterion = nn.CrossEntropyLoss().to(CALCULATE_DEVICE) + ############## npu modify 4 end ############# + optimizer = apex.optimizers.NpuFusedSGD(model.parameters(), args.lr, + momentum=args.momentum, + nesterov=True, + weight_decay=args.weight_decay) + ###### modify 1 ###### + if args.amp: + model, optimizer = amp.initialize( + model, optimizer, opt_level=args.opt_level, loss_scale=args.loss_scale) + ###### modify 1 end ###### + # optionally resume from a checkpoint + if args.resume: + if os.path.isfile(args.resume): + print("=> loading checkpoint '{}'".format(args.resume)) + if args.gpu is None: + checkpoint = torch.load(args.resume) + else: + # Map model to be loaded to specified single gpu. + loc = 'cuda:{}'.format(args.gpu) + checkpoint = torch.load(args.resume, map_location=loc) + args.start_epoch = checkpoint['epoch'] + best_acc1 = checkpoint['best_acc1'] + if args.gpu is not None: + # best_acc1 may be from a checkpoint from a different GPU + best_acc1 = best_acc1.to(args.gpu) + model.load_state_dict(checkpoint['state_dict']) + optimizer.load_state_dict(checkpoint['optimizer']) + print("=> loaded checkpoint '{}' (epoch {})" + .format(args.resume, checkpoint['epoch'])) + else: + print("=> no checkpoint found at '{}'".format(args.resume)) + + cudnn.benchmark = True + if args.data_url: + real_path = CACHE_DATA_URL + if not os.path.exists(real_path): + os.makedirs(real_path) + mox.file.copy_parallel(args.data_url, real_path) + print("training data finish copy to %s." % real_path) + args.data = real_path + + # Data loading code + traindir = os.path.join(args.data, 'train') + valdir = os.path.join(args.data, 'val') + normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], + std=[0.229, 0.224, 0.225]) + + train_dataset = datasets.ImageFolder( + traindir, + transforms.Compose([ + transforms.RandomResizedCrop(224), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + normalize, + ])) + + if args.distributed: + train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset) + else: + train_sampler = None + ###### modify 7 ###### + train_loader = torch.utils.data.DataLoader( + train_dataset, batch_size=args.batch_size, shuffle=( + train_sampler is None), + num_workers=args.workers, pin_memory=False, sampler=train_sampler, drop_last=True) + ###### modify 7 end ####### + val_loader = torch.utils.data.DataLoader( + datasets.ImageFolder(valdir, transforms.Compose([ + transforms.Resize(256), + transforms.CenterCrop(224), + transforms.ToTensor(), + normalize, + ])), + batch_size=args.batch_size, shuffle=False, + num_workers=args.workers, pin_memory=True) + + if args.evaluate: + validate(val_loader, model, criterion, args) + return + ###### modify 3 ###### + if args.prof: + profiling(train_loader, model, criterion, optimizer, args) + return + ###### modify 3 end ###### + + for epoch in range(args.start_epoch, args.epochs): + if args.distributed: + train_sampler.set_epoch(epoch) + adjust_learning_rate(optimizer, epoch, args) + + # train for one epoch + train(train_loader, model, criterion, optimizer, epoch, args, ngpus_per_node) + + # evaluate on validation set + acc1 = validate(val_loader, model, criterion, args) + + # remember best acc@1 and save checkpoint + is_best = acc1 > best_acc1 + best_acc1 = max(acc1, best_acc1) + + if not args.multiprocessing_distributed or (args.multiprocessing_distributed + and args.rank % ngpus_per_node == 0): + save_checkpoint({ + 'epoch': epoch + 1, + 'arch': args.arch, + 'state_dict': model.state_dict(), + 'best_acc1': best_acc1, + 'optimizer': optimizer.state_dict(), + }, is_best) + if args.train_url: + mox.file.copy_parallel(CACHE_TRAINING_URL, args.train_url) + + +def proc_node_module(checkpoint, AttrName): + new_state_dict = OrderedDict() + for k, v in checkpoint[AttrName].items(): + if(k[0:7] == "module."): + name = k[7:] + else: + name = k[0:] + new_state_dict[name] = v + return new_state_dict + +def convert(model_path, onnx_save, num_class): + checkpoint = torch.load(model_path, map_location='cpu') + checkpoint['state_dict'] = proc_node_module(checkpoint, 'state_dict') + #model = resnet_0_6_0.wide_resnet101_2(num_classes=num_class) + model = senet.se_resnet50() + model.load_state_dict(checkpoint['state_dict']) + model.eval() + input_names = ["actual_input_1"] + output_names = ["output1"] + dummy_input = torch.randn(1, 3, 224, 224) + if len(onnx_save) > 0: + save_path = os.path.join(onnx_save, "se_resnet50_2_npu_16.onnx") + else: + save_path = "se_resnet50_2_npu_16.onnx" + print(save_path) + torch.onnx.export(model, dummy_input, save_path + , input_names=input_names, output_names=output_names + , opset_version=11) + + +def profiling(data_loader, model, criterion, optimizer, args): + # switch to train mode + model.train() + + def update(model, images, target, optimizer): + output = model(images) + loss = criterion(output, target) + if args.amp: + with amp.scale_loss(loss, optimizer) as scaled_loss: + scaled_loss.backward() + else: + loss.backward() + optimizer.zero_grad() + optimizer.step() + + for step, (images, target) in enumerate(data_loader): + if args.device == 'npu': + loc = CALCULATE_DEVICE + images = images.to(loc, non_blocking=True).to(torch.float) + target = target.to(torch.int32).to(loc, non_blocking=True) + else: + images = images.cuda(args.gpu, non_blocking=True) + target = target.cuda(args.gpu, non_blocking=True) + + if step < 5: + update(model, images, target, optimizer) + else: + if args.device == 'npu': + with torch.autograd.profiler.profile(use_npu=True) as prof: + update(model, images, target, optimizer) + else: + with torch.autograd.profiler.profile(use_cuda=True) as prof: + update(model, images, target, optimizer) + break + + prof.export_chrome_trace("output.prof") + +def train(train_loader, model, criterion, optimizer, epoch, args, ngpus_per_node): + batch_time = AverageMeter('Time', ':6.3f') + data_time = AverageMeter('Data', ':6.3f') + losses = AverageMeter('Loss', ':.4e') + top1 = AverageMeter('Acc@1', ':6.2f') + top5 = AverageMeter('Acc@5', ':6.2f') + progress = ProgressMeter( + len(train_loader), + [batch_time, data_time, losses, top1, top5], + prefix="Epoch: [{}]".format(epoch)) + + # switch to train mode + model.train() + end = time.time() + for i, (images, target) in enumerate(train_loader): + # measure data loading time + data_time.update(time.time() - end) + + if args.gpu is not None: + images = images.cuda(args.gpu, non_blocking=True) + # if torch.cuda.is_available(): + # target = target.cuda(args.gpu, non_blocking=True) + ############## npu modify 5 begin ############# + # 将数据集迁移到NPU上进行计算并修改target数据类型 + if 'npu' in CALCULATE_DEVICE: + target = target.to(torch.int32) + images, target = images.to(CALCULATE_DEVICE, non_blocking=True), target.to(CALCULATE_DEVICE, non_blocking=True) + ############## npu modify 5 end ############# + + # compute output + output = model(images) + loss = criterion(output, target) + + # measure accuracy and record loss + acc1, acc5 = accuracy(output, target, topk=(1, 5)) + losses.update(loss.item(), images.size(0)) + top1.update(acc1[0], images.size(0)) + top5.update(acc5[0], images.size(0)) + + # compute gradient and do SGD step + optimizer.zero_grad() + ###### modify 2 ###### + if args.amp: + with amp.scale_loss(loss, optimizer) as scaled_loss: + scaled_loss.backward() + else: + loss.backward() + + ###### modify 2 end ###### + optimizer.step() + + # measure elapsed time + batch_time.update(time.time() - end) + end = time.time() + + ###### modify 4 ###### + if i % args.print_freq == 0: + if not args.multiprocessing_distributed or (args.multiprocessing_distributed + and args.rank % ngpus_per_node == 0): + progress.display(i) + + if not args.multiprocessing_distributed or (args.multiprocessing_distributed + and args.rank % ngpus_per_node == 0): + if batch_time.avg: + print("[npu id:", CALCULATE_DEVICE, "]", "batch_size:", args.world_size * args.batch_size, + 'Time: {:.3f}'.format(batch_time.avg), '* FPS@all {:.3f}'.format( + args.batch_size * args.world_size / batch_time.avg)) + ###### modify 4 end ###### + + +def validate(val_loader, model, criterion, args): + ###### modify 5 ###### + batch_time = AverageMeter('Time', ':6.3f', start_count_index= 5) + ###### modify 5 end ###### + losses = AverageMeter('Loss', ':.4e') + top1 = AverageMeter('Acc@1', ':6.2f') + top5 = AverageMeter('Acc@5', ':6.2f') + progress = ProgressMeter( + len(val_loader), + [batch_time, losses, top1, top5], + prefix='Test: ') + + # switch to evaluate mode + model.eval() + + with torch.no_grad(): + end = time.time() + for i, (images, target) in enumerate(val_loader): + if args.device == 'npu': + loc = CALCULATE_DEVICE + images = images.to(loc).to(torch.float) + if args.device == 'npu': + loc = CALCULATE_DEVICE + target = target.to(torch.int32).to(loc, non_blocking=True) + # compute output + + output = model(images) + loss = criterion(output, target) + + # measure accuracy and record loss + acc1, acc5 = accuracy(output, target, topk=(1, 5)) + losses.update(loss.item(), images.size(0)) + top1.update(acc1[0], images.size(0)) + top5.update(acc5[0], images.size(0)) + + # measure elapsed time + batch_time.update(time.time() - end) + end = time.time() + + if i % args.print_freq == 0: + progress.display(i) + + # TODO: this should also be done with the ProgressMeter + print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}' + .format(top1=top1, top5=top5)) + + return top1.avg + + +def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'): + args = parser.parse_args() + if args.train_url: + os.makedirs(CACHE_TRAINING_URL, 0o755, exist_ok=True) + filename = os.path.join(CACHE_TRAINING_URL, filename) + torch.save(state, filename) + convert(filename, CACHE_TRAINING_URL, args.num_classes) + path_best = os.path.join(CACHE_TRAINING_URL, 'model_best.pth.tar') + if is_best: + shutil.copyfile(filename, path_best) + else: + filename = os.path.join(args.save_path, filename) + torch.save(state, filename) + path_best = os.path.join(args.save_path, 'model_best.pth.tar') + if is_best: + shutil.copyfile(filename, path_best) + + +class AverageMeter(object): + """Computes and stores the average and current value""" + + def __init__(self, name, fmt=':f', start_count_index=2): + self.name = name + self.fmt = fmt + self.reset() + self.start_count_index = start_count_index + + def reset(self): + self.val = 0 + self.avg = 0 + self.sum = 0 + self.count = 0 + + def update(self, val, n=1): + if self.count == 0: + self.N = n + + self.val = val + self.count += n + if self.count > (self.start_count_index * self.N): + self.sum += val * n + self.avg = self.sum / (self.count - self.start_count_index * self.N) + + def __str__(self): + fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})' + return fmtstr.format(**self.__dict__) + + +class ProgressMeter(object): + def __init__(self, num_batches, meters, prefix=""): + self.batch_fmtstr = self._get_batch_fmtstr(num_batches) + self.meters = meters + self.prefix = prefix + + def display(self, batch): + entries = [self.prefix + self.batch_fmtstr.format(batch)] + entries += [str(meter) for meter in self.meters] + print('\t'.join(entries)) + + def _get_batch_fmtstr(self, num_batches): + num_digits = len(str(num_batches // 1)) + fmt = '{:' + str(num_digits) + 'd}' + return '[' + fmt + '/' + fmt.format(num_batches) + ']' + + +def adjust_learning_rate(optimizer, epoch, args): + """Sets the learning rate to the initial LR decayed by cosine method""" + + if args.warm_up_epochs > 0 and epoch < args.warm_up_epochs: + lr = args.lr * ((epoch + 1) / (args.warm_up_epochs + 1)) + else: + alpha = 0 + cosine_decay = 0.5 * ( + 1 + np.cos(np.pi * (epoch - args.warm_up_epochs) / (args.epochs - args.warm_up_epochs))) + decayed = (1 - alpha) * cosine_decay + alpha + lr = args.lr * decayed + + print("=> Epoch[%d] Setting lr: %.4f" % (epoch, lr)) + for param_group in optimizer.param_groups: + param_group['lr'] = lr + + +def accuracy(output, target, topk=(1,)): + """Computes the accuracy over the k top predictions for the specified values of k""" + with torch.no_grad(): + maxk = max(topk) + batch_size = target.size(0) + + _, pred = output.topk(maxk, 1, True, True) + pred = pred.t() + correct = pred.eq(target.view(1, -1).expand_as(pred)) + + res = [] + for k in topk: + correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True) + res.append(correct_k.mul_(100.0 / batch_size)) + return res + + +if __name__ == '__main__': + ############## npu modify 6 begin ############# + ############## npu modify 6 begin ############# + main() diff --git a/PyTorch/contrib/cv/classification/SE-ResNet-50/se_module.py b/PyTorch/contrib/cv/classification/SE-ResNet-50/se_module.py index 2fb6084d61..e03db22c47 100644 --- a/PyTorch/contrib/cv/classification/SE-ResNet-50/se_module.py +++ b/PyTorch/contrib/cv/classification/SE-ResNet-50/se_module.py @@ -1,34 +1,34 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -from torch import nn - - -class SELayer(nn.Module): - def __init__(self, channel, reduction=16): - super(SELayer, self).__init__() - self.avg_pool = nn.AdaptiveAvgPool2d(1) - self.fc = nn.Sequential( - nn.Linear(channel, channel // reduction, bias=False), - nn.ReLU(inplace=True), - nn.Linear(channel // reduction, channel, bias=False), - nn.Sigmoid() - ) - - def forward(self, x): - b, c, h, w = x.size() - y = self.avg_pool(x).view(b, c) - y = self.fc(y).view(b, c, 1, 1) +# -*- coding: utf-8 -*- +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +from torch import nn + + +class SELayer(nn.Module): + def __init__(self, channel, reduction=16): + super(SELayer, self).__init__() + self.avg_pool = nn.AdaptiveAvgPool2d(1) + self.fc = nn.Sequential( + nn.Linear(channel, channel // reduction, bias=False), + nn.ReLU(inplace=True), + nn.Linear(channel // reduction, channel, bias=False), + nn.Sigmoid() + ) + + def forward(self, x): + b, c, h, w = x.size() + y = self.avg_pool(x).view(b, c) + y = self.fc(y).view(b, c, 1, 1) return x * y.repeat(1,1,h,w) \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/SE-ResNet-50/senet.py b/PyTorch/contrib/cv/classification/SE-ResNet-50/senet.py index 01f3c76f3a..6dc5047cfc 100644 --- a/PyTorch/contrib/cv/classification/SE-ResNet-50/senet.py +++ b/PyTorch/contrib/cv/classification/SE-ResNet-50/senet.py @@ -1,300 +1,300 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -import torch.nn as nn -from torch.hub import load_state_dict_from_url -from torchvision.models import ResNet -from se_module import SELayer - - -def conv3x3(in_planes, out_planes, stride=1): - return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False) - - -class SEBasicBlock(nn.Module): - expansion = 1 - - def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, - base_width=64, dilation=1, norm_layer=None, - *, reduction=16): - super(SEBasicBlock, self).__init__() - self.conv1 = conv3x3(inplanes, planes, stride) - self.bn1 = nn.BatchNorm2d(planes) - self.relu = nn.ReLU(inplace=True) - self.conv2 = conv3x3(planes, planes, 1) - self.bn2 = nn.BatchNorm2d(planes) - self.se = SELayer(planes, reduction) - self.downsample = downsample - self.stride = stride - - def forward(self, x): - residual = x - out = self.conv1(x) - out = self.bn1(out) - out = self.relu(out) - - out = self.conv2(out) - out = self.bn2(out) - out = self.se(out) - - if self.downsample is not None: - residual = self.downsample(x) - - out += residual - out = self.relu(out) - - return out - - -class SEBottleneck(nn.Module): - expansion = 4 - - def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, - base_width=64, dilation=1, norm_layer=None, - *, reduction=16): - super(SEBottleneck, self).__init__() - self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) - self.bn1 = nn.BatchNorm2d(planes) - self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, - padding=1, bias=False) - self.bn2 = nn.BatchNorm2d(planes) - self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) - self.bn3 = nn.BatchNorm2d(planes * 4) - self.relu = nn.ReLU(inplace=True) - self.se = SELayer(planes * 4, reduction) - self.downsample = downsample - self.stride = stride - - def forward(self, x): - residual = x - - out = self.conv1(x) - out = self.bn1(out) - out = self.relu(out) - - out = self.conv2(out) - out = self.bn2(out) - out = self.relu(out) - - out = self.conv3(out) - out = self.bn3(out) - out = self.se(out) - - if self.downsample is not None: - residual = self.downsample(x) - - out += residual - out = self.relu(out) - - return out - - -def se_resnet18(num_classes=1_000): - """Constructs a ResNet-18 model. - Args: - pretrained (bool): If True, returns a model pre-trained on ImageNet - """ - model = ResNet(SEBasicBlock, [2, 2, 2, 2], num_classes=num_classes) - model.avgpool = nn.AdaptiveAvgPool2d(1) - return model - - -def se_resnet34(num_classes=1_000): - """Constructs a ResNet-34 model. - Args: - pretrained (bool): If True, returns a model pre-trained on ImageNet - """ - model = ResNet(SEBasicBlock, [3, 4, 6, 3], num_classes=num_classes) - model.avgpool = nn.AdaptiveAvgPool2d(1) - return model - - -def se_resnet50(num_classes=1_000, pretrained=False): - """Constructs a ResNet-50 model. - Args: - pretrained (bool): If True, returns a model pre-trained on ImageNet - """ - model = ResNet(SEBottleneck, [3, 4, 6, 3], num_classes=num_classes) - model.avgpool = nn.AdaptiveAvgPool2d(1) - if pretrained: - model.load_state_dict(load_state_dict_from_url( - "https://github.com/moskomule/senet.pytorch/releases/download/archive/seresnet50-60a8950a85b2b.pkl")) - return model - - -def se_resnet101(num_classes=1_000): - """Constructs a ResNet-101 model. - Args: - pretrained (bool): If True, returns a model pre-trained on ImageNet - """ - model = ResNet(SEBottleneck, [3, 4, 23, 3], num_classes=num_classes) - model.avgpool = nn.AdaptiveAvgPool2d(1) - return model - - -def se_resnet152(num_classes=1_000): - """Constructs a ResNet-152 model. - Args: - pretrained (bool): If True, returns a model pre-trained on ImageNet - """ - model = ResNet(SEBottleneck, [3, 8, 36, 3], num_classes=num_classes) - model.avgpool = nn.AdaptiveAvgPool2d(1) - return model - - -class CifarSEBasicBlock(nn.Module): - def __init__(self, inplanes, planes, stride=1, reduction=16): - super(CifarSEBasicBlock, self).__init__() - self.conv1 = conv3x3(inplanes, planes, stride) - self.bn1 = nn.BatchNorm2d(planes) - self.relu = nn.ReLU(inplace=True) - self.conv2 = conv3x3(planes, planes) - self.bn2 = nn.BatchNorm2d(planes) - self.se = SELayer(planes, reduction) - if inplanes != planes: - self.downsample = nn.Sequential(nn.Conv2d(inplanes, planes, kernel_size=1, stride=stride, bias=False), - nn.BatchNorm2d(planes)) - else: - self.downsample = lambda x: x - self.stride = stride - - def forward(self, x): - residual = self.downsample(x) - out = self.conv1(x) - out = self.bn1(out) - out = self.relu(out) - - out = self.conv2(out) - out = self.bn2(out) - out = self.se(out) - - out += residual - out = self.relu(out) - - return out - - -class CifarSEResNet(nn.Module): - def __init__(self, block, n_size, num_classes=10, reduction=16): - super(CifarSEResNet, self).__init__() - self.inplane = 16 - self.conv1 = nn.Conv2d( - 3, self.inplane, kernel_size=3, stride=1, padding=1, bias=False) - self.bn1 = nn.BatchNorm2d(self.inplane) - self.relu = nn.ReLU(inplace=True) - self.layer1 = self._make_layer( - block, 16, blocks=n_size, stride=1, reduction=reduction) - self.layer2 = self._make_layer( - block, 32, blocks=n_size, stride=2, reduction=reduction) - self.layer3 = self._make_layer( - block, 64, blocks=n_size, stride=2, reduction=reduction) - self.avgpool = nn.AdaptiveAvgPool2d(1) - self.fc = nn.Linear(64, num_classes) - self.initialize() - - def initialize(self): - for m in self.modules(): - if isinstance(m, nn.Conv2d): - nn.init.kaiming_normal_(m.weight) - elif isinstance(m, nn.BatchNorm2d): - nn.init.constant_(m.weight, 1) - nn.init.constant_(m.bias, 0) - - def _make_layer(self, block, planes, blocks, stride, reduction): - strides = [stride] + [1] * (blocks - 1) - layers = [] - for stride in strides: - layers.append(block(self.inplane, planes, stride, reduction)) - self.inplane = planes - - return nn.Sequential(*layers) - - def forward(self, x): - x = self.conv1(x) - x = self.bn1(x) - x = self.relu(x) - - x = self.layer1(x) - x = self.layer2(x) - x = self.layer3(x) - - x = self.avgpool(x) - x = x.view(x.size(0), -1) - x = self.fc(x) - - return x - - -class CifarSEPreActResNet(CifarSEResNet): - def __init__(self, block, n_size, num_classes=10, reduction=16): - super(CifarSEPreActResNet, self).__init__( - block, n_size, num_classes, reduction) - self.bn1 = nn.BatchNorm2d(self.inplane) - self.initialize() - - def forward(self, x): - x = self.conv1(x) - x = self.layer1(x) - x = self.layer2(x) - x = self.layer3(x) - - x = self.bn1(x) - x = self.relu(x) - - x = self.avgpool(x) - x = x.view(x.size(0), -1) - x = self.fc(x) - - -def se_resnet20(**kwargs): - """Constructs a ResNet-18 model. - """ - model = CifarSEResNet(CifarSEBasicBlock, 3, **kwargs) - return model - - -def se_resnet32(**kwargs): - """Constructs a ResNet-34 model. - """ - model = CifarSEResNet(CifarSEBasicBlock, 5, **kwargs) - return model - - -def se_resnet56(**kwargs): - """Constructs a ResNet-34 model. - """ - model = CifarSEResNet(CifarSEBasicBlock, 9, **kwargs) - return model - - -def se_preactresnet20(**kwargs): - """Constructs a ResNet-18 model. - """ - model = CifarSEPreActResNet(CifarSEBasicBlock, 3, **kwargs) - return model - - -def se_preactresnet32(**kwargs): - """Constructs a ResNet-34 model. - """ - model = CifarSEPreActResNet(CifarSEBasicBlock, 5, **kwargs) - return model - - -def se_preactresnet56(**kwargs): - """Constructs a ResNet-34 model. - """ - model = CifarSEPreActResNet(CifarSEBasicBlock, 9, **kwargs) +# -*- coding: utf-8 -*- +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +import torch.nn as nn +from torch.hub import load_state_dict_from_url +from torchvision.models import ResNet +from se_module import SELayer + + +def conv3x3(in_planes, out_planes, stride=1): + return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False) + + +class SEBasicBlock(nn.Module): + expansion = 1 + + def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, + base_width=64, dilation=1, norm_layer=None, + *, reduction=16): + super(SEBasicBlock, self).__init__() + self.conv1 = conv3x3(inplanes, planes, stride) + self.bn1 = nn.BatchNorm2d(planes) + self.relu = nn.ReLU(inplace=True) + self.conv2 = conv3x3(planes, planes, 1) + self.bn2 = nn.BatchNorm2d(planes) + self.se = SELayer(planes, reduction) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + residual = x + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.se(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out += residual + out = self.relu(out) + + return out + + +class SEBottleneck(nn.Module): + expansion = 4 + + def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, + base_width=64, dilation=1, norm_layer=None, + *, reduction=16): + super(SEBottleneck, self).__init__() + self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) + self.bn1 = nn.BatchNorm2d(planes) + self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, + padding=1, bias=False) + self.bn2 = nn.BatchNorm2d(planes) + self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) + self.bn3 = nn.BatchNorm2d(planes * 4) + self.relu = nn.ReLU(inplace=True) + self.se = SELayer(planes * 4, reduction) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + residual = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + out = self.se(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out += residual + out = self.relu(out) + + return out + + +def se_resnet18(num_classes=1_000): + """Constructs a ResNet-18 model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model = ResNet(SEBasicBlock, [2, 2, 2, 2], num_classes=num_classes) + model.avgpool = nn.AdaptiveAvgPool2d(1) + return model + + +def se_resnet34(num_classes=1_000): + """Constructs a ResNet-34 model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model = ResNet(SEBasicBlock, [3, 4, 6, 3], num_classes=num_classes) + model.avgpool = nn.AdaptiveAvgPool2d(1) + return model + + +def se_resnet50(num_classes=1_000, pretrained=False): + """Constructs a ResNet-50 model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model = ResNet(SEBottleneck, [3, 4, 6, 3], num_classes=num_classes) + model.avgpool = nn.AdaptiveAvgPool2d(1) + if pretrained: + model.load_state_dict(load_state_dict_from_url( + "https://github.com/moskomule/senet.pytorch/releases/download/archive/seresnet50-60a8950a85b2b.pkl")) + return model + + +def se_resnet101(num_classes=1_000): + """Constructs a ResNet-101 model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model = ResNet(SEBottleneck, [3, 4, 23, 3], num_classes=num_classes) + model.avgpool = nn.AdaptiveAvgPool2d(1) + return model + + +def se_resnet152(num_classes=1_000): + """Constructs a ResNet-152 model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model = ResNet(SEBottleneck, [3, 8, 36, 3], num_classes=num_classes) + model.avgpool = nn.AdaptiveAvgPool2d(1) + return model + + +class CifarSEBasicBlock(nn.Module): + def __init__(self, inplanes, planes, stride=1, reduction=16): + super(CifarSEBasicBlock, self).__init__() + self.conv1 = conv3x3(inplanes, planes, stride) + self.bn1 = nn.BatchNorm2d(planes) + self.relu = nn.ReLU(inplace=True) + self.conv2 = conv3x3(planes, planes) + self.bn2 = nn.BatchNorm2d(planes) + self.se = SELayer(planes, reduction) + if inplanes != planes: + self.downsample = nn.Sequential(nn.Conv2d(inplanes, planes, kernel_size=1, stride=stride, bias=False), + nn.BatchNorm2d(planes)) + else: + self.downsample = lambda x: x + self.stride = stride + + def forward(self, x): + residual = self.downsample(x) + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.se(out) + + out += residual + out = self.relu(out) + + return out + + +class CifarSEResNet(nn.Module): + def __init__(self, block, n_size, num_classes=10, reduction=16): + super(CifarSEResNet, self).__init__() + self.inplane = 16 + self.conv1 = nn.Conv2d( + 3, self.inplane, kernel_size=3, stride=1, padding=1, bias=False) + self.bn1 = nn.BatchNorm2d(self.inplane) + self.relu = nn.ReLU(inplace=True) + self.layer1 = self._make_layer( + block, 16, blocks=n_size, stride=1, reduction=reduction) + self.layer2 = self._make_layer( + block, 32, blocks=n_size, stride=2, reduction=reduction) + self.layer3 = self._make_layer( + block, 64, blocks=n_size, stride=2, reduction=reduction) + self.avgpool = nn.AdaptiveAvgPool2d(1) + self.fc = nn.Linear(64, num_classes) + self.initialize() + + def initialize(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight) + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + def _make_layer(self, block, planes, blocks, stride, reduction): + strides = [stride] + [1] * (blocks - 1) + layers = [] + for stride in strides: + layers.append(block(self.inplane, planes, stride, reduction)) + self.inplane = planes + + return nn.Sequential(*layers) + + def forward(self, x): + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + + x = self.avgpool(x) + x = x.view(x.size(0), -1) + x = self.fc(x) + + return x + + +class CifarSEPreActResNet(CifarSEResNet): + def __init__(self, block, n_size, num_classes=10, reduction=16): + super(CifarSEPreActResNet, self).__init__( + block, n_size, num_classes, reduction) + self.bn1 = nn.BatchNorm2d(self.inplane) + self.initialize() + + def forward(self, x): + x = self.conv1(x) + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + + x = self.bn1(x) + x = self.relu(x) + + x = self.avgpool(x) + x = x.view(x.size(0), -1) + x = self.fc(x) + + +def se_resnet20(**kwargs): + """Constructs a ResNet-18 model. + """ + model = CifarSEResNet(CifarSEBasicBlock, 3, **kwargs) + return model + + +def se_resnet32(**kwargs): + """Constructs a ResNet-34 model. + """ + model = CifarSEResNet(CifarSEBasicBlock, 5, **kwargs) + return model + + +def se_resnet56(**kwargs): + """Constructs a ResNet-34 model. + """ + model = CifarSEResNet(CifarSEBasicBlock, 9, **kwargs) + return model + + +def se_preactresnet20(**kwargs): + """Constructs a ResNet-18 model. + """ + model = CifarSEPreActResNet(CifarSEBasicBlock, 3, **kwargs) + return model + + +def se_preactresnet32(**kwargs): + """Constructs a ResNet-34 model. + """ + model = CifarSEPreActResNet(CifarSEBasicBlock, 5, **kwargs) + return model + + +def se_preactresnet56(**kwargs): + """Constructs a ResNet-34 model. + """ + model = CifarSEPreActResNet(CifarSEBasicBlock, 9, **kwargs) return model \ No newline at end of file -- Gitee From 0ab770aacf67e6b0499c143edee6bf7a57a785b7 Mon Sep 17 00:00:00 2001 From: chaishangling Date: Fri, 14 Jun 2024 14:33:13 +0800 Subject: [PATCH 2/2] changshi --- PyTorch/contrib/cv/classification/SE-ResNet-50/demo.py | 3 ++- PyTorch/contrib/cv/classification/SE-ResNet-50/main.py | 4 +++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/PyTorch/contrib/cv/classification/SE-ResNet-50/demo.py b/PyTorch/contrib/cv/classification/SE-ResNet-50/demo.py index 55fd3f86a3..ea85e5e8c0 100644 --- a/PyTorch/contrib/cv/classification/SE-ResNet-50/demo.py +++ b/PyTorch/contrib/cv/classification/SE-ResNet-50/demo.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ +print('changshi') import argparse import torch import torchvision @@ -71,4 +72,4 @@ def test(): print(result) if __name__ == "__main__": - test() \ No newline at end of file + test() diff --git a/PyTorch/contrib/cv/classification/SE-ResNet-50/main.py b/PyTorch/contrib/cv/classification/SE-ResNet-50/main.py index 5f8443cc30..c0ff8b2ec3 100644 --- a/PyTorch/contrib/cv/classification/SE-ResNet-50/main.py +++ b/PyTorch/contrib/cv/classification/SE-ResNet-50/main.py @@ -1,4 +1,4 @@ -# -*- coding: utf-8 -*- +# -*- coding: utf-8 -*- # Copyright 2020 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ +print('hh') +print('xiugai') import warnings warnings.filterwarnings('ignore') -- Gitee