From 011c7508c503d828c29d719bf995cb911164e924 Mon Sep 17 00:00:00 2001 From: zhaozhenlong Date: Fri, 2 Apr 2021 16:44:24 +0800 Subject: [PATCH] add npu op mirror pad for art gene --- .../lite/src/runtime/kernel/npu/npu_kernel.h | 6 +- .../lite/src/runtime/kernel/npu/pad_npu.cc | 85 ++++++++++++++----- .../lite/src/runtime/kernel/npu/pad_npu.h | 3 + 3 files changed, 71 insertions(+), 23 deletions(-) diff --git a/mindspore/lite/src/runtime/kernel/npu/npu_kernel.h b/mindspore/lite/src/runtime/kernel/npu/npu_kernel.h index 92652603f1..6046180790 100644 --- a/mindspore/lite/src/runtime/kernel/npu/npu_kernel.h +++ b/mindspore/lite/src/runtime/kernel/npu/npu_kernel.h @@ -54,13 +54,13 @@ kernel::LiteKernel *NPUKernelCreator(const std::vector &inputs, const lite::InnerContext *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { if (!primitive->infer_flag()) { - MS_LOG(ERROR) << "NPU does not support runtime inference shape. Type is:" - << schema::EnumNamePrimitiveType(static_cast(primitive->Type())); + MS_LOG(WARNING) << "NPU does not support runtime inference shape. Type is:" + << schema::EnumNamePrimitiveType(static_cast(primitive->Type())); free(opParameter); return nullptr; } if (inputs[0]->shape().size() > 4) { - MS_LOG(ERROR) << "Npu does not support input tensor dims greater than 4"; + MS_LOG(WARNING) << "Npu does not support input tensor dims greater than 4"; free(opParameter); return nullptr; } diff --git a/mindspore/lite/src/runtime/kernel/npu/pad_npu.cc b/mindspore/lite/src/runtime/kernel/npu/pad_npu.cc index 45f0979984..7e3669b545 100644 --- a/mindspore/lite/src/runtime/kernel/npu/pad_npu.cc +++ b/mindspore/lite/src/runtime/kernel/npu/pad_npu.cc @@ -25,8 +25,30 @@ using mindspore::schema::PrimitiveType_Pad; namespace mindspore::kernel { int PadNPUKernel::IsSupport(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter) { - if (pad_->GetPaddingMode() != schema::PaddingMode_CONSTANT) { - MS_LOG(WARNING) << "NPU only support CONSTANT padding mode"; + if (inputs.size() == 2) { + if (inputs[1]->data_c() == nullptr && inputs[1]->ElementsNum() != 8) { + MS_LOG(ERROR) << "pad input 2 nullptr or paddings size " << inputs[1]->ElementsNum() << " unsupported"; + return RET_ERROR; + } + for (int i = 0; i < inputs[1]->ElementsNum(); i++) { + param_->paddings_[i] = static_cast(inputs[1]->data_c())[i]; + } + } else if (inputs.size() == 1) { + if (pad_->GetPaddings().size() != 8) { + MS_LOG(WARNING) << "NPU only support paddings size 8"; + return RET_ERROR; + } + for (int i = 0; i < pad_->GetPaddings().size(); i++) { + param_->paddings_[i] = pad_->GetPaddings()[i]; + } + } else { + MS_LOG(ERROR) << "pad input size " << inputs.size() << " not supported"; + return RET_ERROR; + } + if (pad_->GetPaddingMode() != schema::PaddingMode_CONSTANT && + pad_->GetPaddingMode() != schema::PaddingMode_SYMMETRIC && + pad_->GetPaddingMode() != schema::PaddingMode_REFLECT) { + MS_LOG(ERROR) << "pad npu not support mode " << pad_->GetPaddingMode(); return RET_ERROR; } return RET_OK; @@ -34,39 +56,62 @@ int PadNPUKernel::IsSupport(const std::vector &inputs, const std int PadNPUKernel::SetNPUInputs(const std::vector &inputs, const std::vector &outputs, const std::vector &npu_inputs) { - op_ = new (std::nothrow) hiai::op::PadV2(name_); - if (op_ == nullptr) { - MS_LOG(ERROR) << name_ << " op is nullptr"; - return RET_ERROR; - } - int size = static_cast(pad_->GetPaddings().size() / 2); - ge::TensorDesc padding_tensor_desc(ge::Shape({size, 2}), ge::FORMAT_NCHW, ge::DT_INT32); + ge::TensorDesc padding_tensor_desc(ge::Shape({4, 2}), ge::FORMAT_NCHW, ge::DT_INT32); ge::TensorPtr padding_tensor = std::make_shared(padding_tensor_desc); - padding_tensor->SetData(reinterpret_cast(pad_->GetPaddings().data()), 2 * size * sizeof(int)); + padding_tensor->SetData(reinterpret_cast(param_->paddings_), 8 * sizeof(int)); paddings_ = new hiai::op::Const(name_ + "paddings"); paddings_->set_attr_value(padding_tensor); + if (pad_->GetPaddingMode() == schema::PaddingMode_CONSTANT) { + op_ = new (std::nothrow) hiai::op::PadV2(name_); + if (op_ == nullptr) { + MS_LOG(ERROR) << name_ << " op is nullptr"; + return RET_ERROR; + } - ge::TensorDesc constant_values_tensor_desc(ge::Shape({1}), ge::FORMAT_NCHW, ge::DT_FLOAT); - ge::TensorPtr constant_values_tensor = std::make_shared(constant_values_tensor_desc); - vector constant_values_data_value = {pad_->GetConstantValue()}; - constant_values_tensor->SetData(reinterpret_cast(constant_values_data_value.data()), 1 * sizeof(float)); - constant_ = new hiai::op::Const(name_ + "constant"); - constant_->set_attr_value(constant_values_tensor); + ge::TensorDesc constant_values_tensor_desc(ge::Shape({1}), ge::FORMAT_NCHW, ge::DT_FLOAT); + ge::TensorPtr constant_values_tensor = std::make_shared(constant_values_tensor_desc); + vector constant_values_data_value = {pad_->GetConstantValue()}; + constant_values_tensor->SetData(reinterpret_cast(constant_values_data_value.data()), 1 * sizeof(float)); + constant_ = new hiai::op::Const(name_ + "constant"); + constant_->set_attr_value(constant_values_tensor); - op_->set_input_x(*npu_inputs[0]); - op_->set_input_constant_values(*constant_); - op_->set_input_paddings(*paddings_); + op_->set_input_x(*npu_inputs[0]); + op_->set_input_constant_values(*constant_); + op_->set_input_paddings(*paddings_); + } else { + mirror_op_ = new (std::nothrow) hiai::op::MirrorPad(name_); + if (mirror_op_ == nullptr) { + MS_LOG(ERROR) << name_ << " op is nullptr"; + return RET_ERROR; + } + mirror_op_->set_input_x(*npu_inputs[0]); + mirror_op_->set_input_paddings(*paddings_); + if (pad_->GetPaddingMode() == schema::PaddingMode_SYMMETRIC) { + mirror_op_->set_attr_mode("SYMMETRIC"); + } else { + mirror_op_->set_attr_mode("REFLECT"); + } + } return RET_OK; } -ge::Operator *mindspore::kernel::PadNPUKernel::GetNPUOp() { return this->op_; } +ge::Operator *mindspore::kernel::PadNPUKernel::GetNPUOp() { + if (pad_->GetPaddingMode() == schema::PaddingMode_CONSTANT) { + return op_; + } + return mirror_op_; +} PadNPUKernel::~PadNPUKernel() { if (op_ != nullptr) { delete op_; op_ = nullptr; } + if (mirror_op_ != nullptr) { + delete mirror_op_; + mirror_op_ = nullptr; + } if (paddings_ != nullptr) { delete paddings_; paddings_ = nullptr; diff --git a/mindspore/lite/src/runtime/kernel/npu/pad_npu.h b/mindspore/lite/src/runtime/kernel/npu/pad_npu.h index 0d445be97d..759d214b63 100644 --- a/mindspore/lite/src/runtime/kernel/npu/pad_npu.h +++ b/mindspore/lite/src/runtime/kernel/npu/pad_npu.h @@ -29,6 +29,7 @@ class PadNPUKernel : public NPUKernel { const mindspore::lite::PrimitiveC *primitive) : NPUKernel(parameter, inputs, outputs, ctx, primitive) { pad_ = reinterpret_cast(primitive); + param_ = reinterpret_cast(op_parameter_); } ~PadNPUKernel() override; @@ -40,6 +41,8 @@ class PadNPUKernel : public NPUKernel { private: hiai::op::PadV2 *op_ = nullptr; + hiai::op::MirrorPad *mirror_op_ = nullptr; + PadParameter *param_; hiai::op::Const *paddings_ = nullptr; hiai::op::Const *constant_ = nullptr; const mindspore::lite::Pad *pad_;