Browse Source

!14620 add npu op mirror pad sec

From: @zhaozhenlong
Reviewed-by: @zhanghaibo5,@jpc_chenjianping
Signed-off-by: @zhanghaibo5
sec-icsl
mindspore-ci-bot Gitee 4 years ago
parent
commit
a86589e6e5
3 changed files with 71 additions and 23 deletions
  1. +3
    -3
      mindspore/lite/src/runtime/kernel/npu/npu_kernel.h
  2. +65
    -20
      mindspore/lite/src/runtime/kernel/npu/pad_npu.cc
  3. +3
    -0
      mindspore/lite/src/runtime/kernel/npu/pad_npu.h

+ 3
- 3
mindspore/lite/src/runtime/kernel/npu/npu_kernel.h View File

@@ -54,13 +54,13 @@ kernel::LiteKernel *NPUKernelCreator(const std::vector<lite::Tensor *> &inputs,
const lite::InnerContext *ctx, const kernel::KernelKey &desc,
const mindspore::lite::PrimitiveC *primitive) {
if (!primitive->infer_flag()) {
MS_LOG(ERROR) << "NPU does not support runtime inference shape. Type is:"
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(primitive->Type()));
MS_LOG(WARNING) << "NPU does not support runtime inference shape. Type is:"
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(primitive->Type()));
free(opParameter);
return nullptr;
}
if (inputs[0]->shape().size() > 4) {
MS_LOG(ERROR) << "Npu does not support input tensor dims greater than 4";
MS_LOG(WARNING) << "Npu does not support input tensor dims greater than 4";
free(opParameter);
return nullptr;
}


+ 65
- 20
mindspore/lite/src/runtime/kernel/npu/pad_npu.cc View File

@@ -25,8 +25,30 @@ using mindspore::schema::PrimitiveType_Pad;
namespace mindspore::kernel {
int PadNPUKernel::IsSupport(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs,
OpParameter *opParameter) {
if (pad_->GetPaddingMode() != schema::PaddingMode_CONSTANT) {
MS_LOG(WARNING) << "NPU only support CONSTANT padding mode";
if (inputs.size() == 2) {
if (inputs[1]->data_c() == nullptr && inputs[1]->ElementsNum() != 8) {
MS_LOG(ERROR) << "pad input 2 nullptr or paddings size " << inputs[1]->ElementsNum() << " unsupported";
return RET_ERROR;
}
for (int i = 0; i < inputs[1]->ElementsNum(); i++) {
param_->paddings_[i] = static_cast<int *>(inputs[1]->data_c())[i];
}
} else if (inputs.size() == 1) {
if (pad_->GetPaddings().size() != 8) {
MS_LOG(WARNING) << "NPU only support paddings size 8";
return RET_ERROR;
}
for (int i = 0; i < pad_->GetPaddings().size(); i++) {
param_->paddings_[i] = pad_->GetPaddings()[i];
}
} else {
MS_LOG(ERROR) << "pad input size " << inputs.size() << " not supported";
return RET_ERROR;
}
if (pad_->GetPaddingMode() != schema::PaddingMode_CONSTANT &&
pad_->GetPaddingMode() != schema::PaddingMode_SYMMETRIC &&
pad_->GetPaddingMode() != schema::PaddingMode_REFLECT) {
MS_LOG(ERROR) << "pad npu not support mode " << pad_->GetPaddingMode();
return RET_ERROR;
}
return RET_OK;
@@ -34,39 +56,62 @@ int PadNPUKernel::IsSupport(const std::vector<lite::Tensor *> &inputs, const std

int PadNPUKernel::SetNPUInputs(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs,
const std::vector<ge::Operator *> &npu_inputs) {
op_ = new (std::nothrow) hiai::op::PadV2(name_);
if (op_ == nullptr) {
MS_LOG(ERROR) << name_ << " op is nullptr";
return RET_ERROR;
}
int size = static_cast<int>(pad_->GetPaddings().size() / 2);
ge::TensorDesc padding_tensor_desc(ge::Shape({size, 2}), ge::FORMAT_NCHW, ge::DT_INT32);
ge::TensorDesc padding_tensor_desc(ge::Shape({4, 2}), ge::FORMAT_NCHW, ge::DT_INT32);
ge::TensorPtr padding_tensor = std::make_shared<hiai::Tensor>(padding_tensor_desc);
padding_tensor->SetData(reinterpret_cast<uint8_t *>(pad_->GetPaddings().data()), 2 * size * sizeof(int));
padding_tensor->SetData(reinterpret_cast<uint8_t *>(param_->paddings_), 8 * sizeof(int));
paddings_ = new hiai::op::Const(name_ + "paddings");
paddings_->set_attr_value(padding_tensor);
if (pad_->GetPaddingMode() == schema::PaddingMode_CONSTANT) {
op_ = new (std::nothrow) hiai::op::PadV2(name_);
if (op_ == nullptr) {
MS_LOG(ERROR) << name_ << " op is nullptr";
return RET_ERROR;
}

ge::TensorDesc constant_values_tensor_desc(ge::Shape({1}), ge::FORMAT_NCHW, ge::DT_FLOAT);
ge::TensorPtr constant_values_tensor = std::make_shared<hiai::Tensor>(constant_values_tensor_desc);
vector<float> constant_values_data_value = {pad_->GetConstantValue()};
constant_values_tensor->SetData(reinterpret_cast<uint8_t *>(constant_values_data_value.data()), 1 * sizeof(float));
constant_ = new hiai::op::Const(name_ + "constant");
constant_->set_attr_value(constant_values_tensor);
ge::TensorDesc constant_values_tensor_desc(ge::Shape({1}), ge::FORMAT_NCHW, ge::DT_FLOAT);
ge::TensorPtr constant_values_tensor = std::make_shared<hiai::Tensor>(constant_values_tensor_desc);
vector<float> constant_values_data_value = {pad_->GetConstantValue()};
constant_values_tensor->SetData(reinterpret_cast<uint8_t *>(constant_values_data_value.data()), 1 * sizeof(float));
constant_ = new hiai::op::Const(name_ + "constant");
constant_->set_attr_value(constant_values_tensor);

op_->set_input_x(*npu_inputs[0]);
op_->set_input_constant_values(*constant_);
op_->set_input_paddings(*paddings_);
op_->set_input_x(*npu_inputs[0]);
op_->set_input_constant_values(*constant_);
op_->set_input_paddings(*paddings_);
} else {
mirror_op_ = new (std::nothrow) hiai::op::MirrorPad(name_);
if (mirror_op_ == nullptr) {
MS_LOG(ERROR) << name_ << " op is nullptr";
return RET_ERROR;
}
mirror_op_->set_input_x(*npu_inputs[0]);
mirror_op_->set_input_paddings(*paddings_);
if (pad_->GetPaddingMode() == schema::PaddingMode_SYMMETRIC) {
mirror_op_->set_attr_mode("SYMMETRIC");
} else {
mirror_op_->set_attr_mode("REFLECT");
}
}

return RET_OK;
}

ge::Operator *mindspore::kernel::PadNPUKernel::GetNPUOp() { return this->op_; }
ge::Operator *mindspore::kernel::PadNPUKernel::GetNPUOp() {
if (pad_->GetPaddingMode() == schema::PaddingMode_CONSTANT) {
return op_;
}
return mirror_op_;
}

PadNPUKernel::~PadNPUKernel() {
if (op_ != nullptr) {
delete op_;
op_ = nullptr;
}
if (mirror_op_ != nullptr) {
delete mirror_op_;
mirror_op_ = nullptr;
}
if (paddings_ != nullptr) {
delete paddings_;
paddings_ = nullptr;


+ 3
- 0
mindspore/lite/src/runtime/kernel/npu/pad_npu.h View File

@@ -29,6 +29,7 @@ class PadNPUKernel : public NPUKernel {
const mindspore::lite::PrimitiveC *primitive)
: NPUKernel(parameter, inputs, outputs, ctx, primitive) {
pad_ = reinterpret_cast<const mindspore::lite::Pad *>(primitive);
param_ = reinterpret_cast<PadParameter *>(op_parameter_);
}
~PadNPUKernel() override;

@@ -40,6 +41,8 @@ class PadNPUKernel : public NPUKernel {

private:
hiai::op::PadV2 *op_ = nullptr;
hiai::op::MirrorPad *mirror_op_ = nullptr;
PadParameter *param_;
hiai::op::Const *paddings_ = nullptr;
hiai::op::Const *constant_ = nullptr;
const mindspore::lite::Pad *pad_;


Loading…
Cancel
Save