diff --git a/PyTorch/built-in/cv/classification/MobileNetV2_for_PyTorch/infer/mxbase/CMakeLists.txt b/PyTorch/built-in/cv/classification/MobileNetV2_for_PyTorch/infer/mxbase/CMakeLists.txt index 3f9bf5ac634aca2c47a441e0769923ab55d7dec2..699ee76f6be31a5ab681d4b651847f2f0fd75f2f 100644 --- a/PyTorch/built-in/cv/classification/MobileNetV2_for_PyTorch/infer/mxbase/CMakeLists.txt +++ b/PyTorch/built-in/cv/classification/MobileNetV2_for_PyTorch/infer/mxbase/CMakeLists.txt @@ -1,7 +1,10 @@ cmake_minimum_required(VERSION 3.14.0) -project(resnet) -set(TARGET resnet) +project(arcface)) +set(TARGET arcface) + add_definitions(-DENABLE_DVPP_INTERFACE) +add_definitions(-D_GLIBCXX_USE_CXX11_ABI=0) +add_definitions(-Dgoogle=mindxsdk_private) add_compile_options(-std=c++11 -fPIE -fstack-protector-all -fPIC -Wall) add_link_options(-Wl,-z,relro,-z,now,-z,noexecstack -pie) @@ -18,28 +21,37 @@ endif() set(ACL_INC_DIR $ENV{ASCEND_HOME}/$ENV{ASCEND_VERSION}/$ENV{ARCH_PATTERN}/acllib/include) set(ACL_LIB_DIR $ENV{ASCEND_HOME}/$ENV{ASCEND_VERSION}/$ENV{ARCH_PATTERN}/acllib/lib64) -set(MXBASE_ROOT_DIR ${PROJECT_SOURCE_DIR}/../../) -set(MXBASE_INC ${MXBASE_ROOT_DIR}/src/include) -set(MXBASE_LIB_DIR ${MXBASE_ROOT_DIR}/dist/lib) -set(MXBASE_POST_LIB_DIR ${MXBASE_ROOT_DIR}/dist/lib/modelpostprocessors) -set(MXBASE_POST_PROCESS_DIR ${MXBASE_ROOT_DIR}/postprocess/include) +set(MXBASE_INC $ENV{MX_SDK_HOME}/include) +set(MXBASE_LIB_DIR $ENV{MX_SDK_HOME}/lib) +set(MXBASE_POST_LIB_DIR $ENV{MX_SDK_HOME}/lib/modelpostprocessors) +set(MXBASE_POST_PROCESS_DIR $ENV{MX_SDK_HOME}//include/MxBase/postprocess/include/) + if(DEFINED ENV{MXSDK_OPENSOURCE_DIR}) set(OPENSOURCE_DIR $ENV{MXSDK_OPENSOURCE_DIR})) else() - set(OPENSOURCE_DIR ${MXBASE_ROOT_DIR}/opensource/dist) + set(OPENSOURCE_DIR $ENV{MX_SDK_HOME}/opensource) endif() include_directories(${ACL_INC_DIR}) -include_directories(${OPENSOURCE_DIR}/include) -include_directories(${OPENSOURCE_DIR}/include/opencv4) - include_directories(${MXBASE_INC}) include_directories(${MXBASE_POST_PROCESS_DIR}) +include_directories(${OPENSOURCE_DIR}/include) +include_directories(${OPENSOURCE_DIR}/include/opencv4) +message(WARNING "${OPENSOURCE_DIR}/include/opencv4/opencv2") +include_directories(/usr/local/Ascend/ascend-toolkit/5.0.4/x86_64-linux/runtime/include/) link_directories(${ACL_LIB_DIR}) -link_directories(${OPENSOURCE_DIR}/lib) link_directories(${MXBASE_LIB_DIR}) link_directories(${MXBASE_POST_LIB_DIR}) +link_directories(${OPENSOURCE_DIR}/lib) + +set(ACL_LIB_PATH $ENV{ASCEND_HOME}/nnrt/latest/acllib) +message(WARNING "ACL_LIB_PATH:${ACL_LIB_PATH}/lib64/.") +#include_directories(${CMAKE_CURRENT_BINARY_DIR}) +include_directories(${ACL_LIB_PATH}/include) +link_directories(${ACL_LIB_PATH}/lib64/) + + add_executable(${TARGET} main_opencv.cpp Resnet50ClassifyOpencv.cpp) diff --git a/PyTorch/built-in/cv/classification/MobileNetV2_for_PyTorch/infer/sdk/pipeline/mobilenet.pipeline b/PyTorch/built-in/cv/classification/MobileNetV2_for_PyTorch/infer/sdk/pipeline/mobilenet.pipeline index 89d6d1971fed0c6233ee5b80612db2415c24ab1e..fdf0f59b6f4891a9f9fdde1c40c1eb09acca5887 100644 --- a/PyTorch/built-in/cv/classification/MobileNetV2_for_PyTorch/infer/sdk/pipeline/mobilenet.pipeline +++ b/PyTorch/built-in/cv/classification/MobileNetV2_for_PyTorch/infer/sdk/pipeline/mobilenet.pipeline @@ -1,34 +1,43 @@ { - "im_resnet50": { + "im_resnet50": { "stream_config": { "deviceId": "0" }, - "mxpi_imagedecoder0": { + "mxpi_imagedecoder0": { "props": { "handleMethod": "opencv" }, - "factory": "mxpi_imagedecoder", - "next": "mxpi_imageresize0" - }, - "mxpi_imageresize0": { - "props": { - "handleMethod": "opencv", + "factory": "mxpi_imagedecoder", + "next": "mxpi_imageresize0" + }, + "mxpi_imageresize0": { + "props": { + "handleMethod": "opencv", "resizeType": "Resizer_Stretch", - "resizeHeight": "336", - "resizeWidth": "336" - }, - "factory": "mxpi_imageresize", - "next": "mxpi_tensorinfer0" - }, - "mxpi_tensorinfer0": { - "props": { - "dataSource": "mxpi_imageresize0", - "modelPath": "../../convert/mobilenetv2.om", - "waitingTime": "2000", - "outputDeviceId": "-1" - }, - "factory": "mxpi_tensorinfer", - "next": "mxpi_classpostprocessor0" + "resizeHeight": "304", + "resizeWidth": "304" + }, + "factory": "mxpi_imageresize", + "next": "mxpi_opencvcentercrop0" + }, + "mxpi_opencvcentercrop0": { + "props": { + "cropHeight":"304", + "cropWidth":"304" + }, + "factory":"mxpi_opencvcentercrop", + "next": "mxpi_tensorinfer0" + }, + + "mxpi_tensorinfer0": { + "props": { + "dataSource": "mxpi_imageresize0", + "modelPath":"./MobilenetV2_for_PyTorch_1.2.om", + "waitingTime": "2000", + "outputDeviceId": "-1" + }, + "factory": "mxpi_tensorinfer", + "next": "mxpi_classpostprocessor0" }, "mxpi_classpostprocessor0": { "props": { @@ -39,7 +48,7 @@ }, "factory": "mxpi_classpostprocessor", "next": "mxpi_dataserialize0" - }, + }, "mxpi_dataserialize0": { "props": { "outputDataKeys": "mxpi_classpostprocessor0" @@ -47,18 +56,18 @@ "factory": "mxpi_dataserialize", "next": "appsink0" }, - "appsrc1": { - "props": { - "blocksize": "409600" - }, - "factory": "appsrc", - "next": "mxpi_imagedecoder0" - }, - "appsink0": { - "props": { - "blocksize": "4096000" - }, - "factory": "appsink" - } - } + "appsrc1": { + "props": { + "blocksize": "409600" + }, + "factory": "appsrc", + "next": "mxpi_imagedecoder0" + }, + "appsink0": { + "props": { + "blocksize": "4096000" + }, + "factory": "appsink" + } + } } diff --git a/PyTorch/built-in/cv/classification/MobileNetV2_for_PyTorch/train/mobilenetv2_8p_main_anycard.py b/PyTorch/built-in/cv/classification/MobileNetV2_for_PyTorch/train/mobilenetv2_8p_main_anycard.py index d58384109a56f3d0b14be7c97b94c21560a7b1c7..18d7c2aeaf539e01218e8bd8665fa257e665221e 100644 --- a/PyTorch/built-in/cv/classification/MobileNetV2_for_PyTorch/train/mobilenetv2_8p_main_anycard.py +++ b/PyTorch/built-in/cv/classification/MobileNetV2_for_PyTorch/train/mobilenetv2_8p_main_anycard.py @@ -44,6 +44,7 @@ import torchvision.models as models from mobilenet import mobilenet_v2 import apex from apex import amp +from collections import OrderedDict from multi_epochs_dataloader import MultiEpochsDataLoader @@ -249,7 +250,8 @@ def main_worker(gpu, ngpus_per_node, args): weight_decay=args.weight_decay) if args.amp: - model, optimizer = amp.initialize(model, optimizer, opt_level=args.opt_level, loss_scale=args.loss_scale,combine_grad=True) + model, optimizer = amp.initialize(model, optimizer, opt_level=args.opt_level, + loss_scale=args.loss_scale, combine_grad=True) model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], broadcast_buffers=False) @@ -260,7 +262,15 @@ def main_worker(gpu, ngpus_per_node, args): checkpoint = torch.load(args.resume, map_location=loc) args.start_epoch = checkpoint['epoch'] best_acc1 = checkpoint['best_acc1'] - model.load_state_dict(checkpoint['state_dict']) + state_dict = checkpoint['state_dict'] + new_state_dict = OrderedDict() + for k, v in state_dict.items(): + if k[0: 7] == "module.": + name = k[7:] + else: + name = k[0:] + new_state_dict[name] = v + model.load_state_dict(new_state_dict) optimizer.load_state_dict(checkpoint['optimizer']) if args.amp: amp.load_state_dict(checkpoint['amp']) @@ -351,7 +361,7 @@ def train(train_loader, train_loader_len, model, criterion, optimizer, epoch, ar if args.graph_mode: print("graph mode on") torch.npu.enable_graph_mode() - if i > 200 : + if i > 200: pass # measure data loading time data_time.update(time.time() - end) @@ -459,7 +469,7 @@ def validate(val_loader, model, criterion, args, ngpus_per_node): end = time.time() for i, (images, target) in enumerate(val_loader): - if i > 48 : + if i > 48: pass target = target.to(torch.int32) @@ -629,7 +639,8 @@ def get_pytorch_val_loader(data_path, batch_size, workers=5, _worker_init_fn=Non val_dataset, sampler=val_sampler, batch_size=batch_size, shuffle=(val_sampler is None), - num_workers=workers, worker_init_fn=_worker_init_fn, pin_memory=True, collate_fn=fast_collate, drop_last=True) + num_workers=workers, worker_init_fn=_worker_init_fn, + pin_memory=True, collate_fn=fast_collate, drop_last=True) return val_loader