From 5b37f667efc167632566d9adfa2643516291e8ee Mon Sep 17 00:00:00 2001 From: zhaozhenlong Date: Wed, 31 Mar 2021 17:37:01 +0800 Subject: [PATCH] enhance update on gate --- .../lite/src/runtime/kernel/npu/argmax_npu.cc | 76 +++++++++++++++++++ .../lite/src/runtime/kernel/npu/argmax_npu.h | 49 ++++++++++++ .../src/runtime/kernel/npu/reshape_npu.cc | 40 ++++++---- .../lite/src/runtime/kernel/npu/reshape_npu.h | 2 +- mindspore/lite/test/models_npu.cfg | 1 + mindspore/lite/test/models_onnx.cfg | 1 + 6 files changed, 155 insertions(+), 14 deletions(-) create mode 100644 mindspore/lite/src/runtime/kernel/npu/argmax_npu.cc create mode 100644 mindspore/lite/src/runtime/kernel/npu/argmax_npu.h diff --git a/mindspore/lite/src/runtime/kernel/npu/argmax_npu.cc b/mindspore/lite/src/runtime/kernel/npu/argmax_npu.cc new file mode 100644 index 0000000000..a55e9e20ff --- /dev/null +++ b/mindspore/lite/src/runtime/kernel/npu/argmax_npu.cc @@ -0,0 +1,76 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "src/runtime/kernel/npu/argmax_npu.h" +#include +#include "include/graph/op/all_ops.h" +#include "src/kernel_registry.h" +#include "src/runtime/agent/npu/npu_converter_utils.h" + +using mindspore::kernel::KERNEL_ARCH::kNPU; +using mindspore::lite::KernelRegistrar; +using mindspore::schema::PrimitiveType_ArgMax; + +namespace mindspore::kernel { +int ArgmaxNPUKernel::IsSupport(const std::vector &inputs, const std::vector &outputs, + OpParameter *opParameter) { + return RET_OK; +} + +int ArgmaxNPUKernel::SetNPUInputs(const std::vector &inputs, const std::vector &outputs, + const std::vector &npu_inputs) { + op_ = new (std::nothrow) hiai::op::ArgMaxExt2(name_); + if (op_ == nullptr) { + MS_LOG(ERROR) << "New argmax npu operator for " << name_ << " failed."; + return RET_ERROR; + } + op_->set_input_x(*npu_inputs[0]); + auto axis_const_ = new (std::nothrow) hiai::op::Const(name_ + "_axis"); + if (axis_const_ == nullptr) { + MS_LOG(ERROR) << "New weight const failed."; + return RET_ERROR; + } + ge::TensorDesc tensor_desc(ge::Shape({1}), ge::FORMAT_NCHW, ge::DT_INT32); + std::shared_ptr ge_tensor = + std::make_shared(tensor_desc, reinterpret_cast(&(param_->axis_)), sizeof(int)); + if (ge_tensor == nullptr) { + MS_LOG(ERROR) << "new ge_tensor failed."; + return RET_ERROR; + } + axis_const_->set_attr_value(ge_tensor); + op_->set_input_axis(*axis_const_); + op_->set_attr_keep_dims(param_->keep_dims_); + op_->set_attr_outmaxval(param_->out_value_); + op_->set_attr_topk(param_->topk_); + + return RET_OK; +} + +ge::Operator *mindspore::kernel::ArgmaxNPUKernel::GetNPUOp() { return op_; } + +ArgmaxNPUKernel::~ArgmaxNPUKernel() { + if (op_ != nullptr) { + delete op_; + op_ = nullptr; + } + if (axis_const_ != nullptr) { + delete axis_const_; + axis_const_ = nullptr; + } +} + +REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_ArgMax, NPUKernelCreator) +} // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/npu/argmax_npu.h b/mindspore/lite/src/runtime/kernel/npu/argmax_npu.h new file mode 100644 index 0000000000..639209f066 --- /dev/null +++ b/mindspore/lite/src/runtime/kernel/npu/argmax_npu.h @@ -0,0 +1,49 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_ARGMAX_NPU_H_ +#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_ARGMAX_NPU_H_ + +#include +#include "include/graph/op/all_ops.h" +#include "include/graph/compatible/all_ops.h" +#include "src/runtime/kernel/npu/npu_kernel.h" +#include "nnacl/arg_min_max_parameter.h" + +namespace mindspore::kernel { +class ArgmaxNPUKernel : public NPUKernel { + public: + ArgmaxNPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::InnerContext *ctx, + const mindspore::lite::PrimitiveC *primitive) + : NPUKernel(parameter, inputs, outputs, ctx, primitive) { + param_ = reinterpret_cast(parameter); + } + ~ArgmaxNPUKernel() override; + + int IsSupport(const std::vector &inputs, const std::vector &outputs, + OpParameter *opParameter) override; + int SetNPUInputs(const std::vector &inputs, const std::vector &outputs, + const std::vector &npu_inputs) override; + ge::Operator *GetNPUOp() override; + + private: + hiai::op::ArgMaxExt2 *op_ = nullptr; + hiai::op::Const *axis_const_ = nullptr; + ArgMinMaxParameter *param_; +}; +} // namespace mindspore::kernel +#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_ARGMAX_NPU_H_ diff --git a/mindspore/lite/src/runtime/kernel/npu/reshape_npu.cc b/mindspore/lite/src/runtime/kernel/npu/reshape_npu.cc index 20033f6e41..5ae39f4384 100644 --- a/mindspore/lite/src/runtime/kernel/npu/reshape_npu.cc +++ b/mindspore/lite/src/runtime/kernel/npu/reshape_npu.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -24,8 +24,18 @@ using mindspore::schema::PrimitiveType_Reshape; namespace mindspore::kernel { int ReshapeNPUKernel::IsSupport(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter) { - if (reshape_param_->shape_dim_ == 0) { - MS_LOG(ERROR) << "Npu reshape op only supports const shape."; + if (inputs.size() == 1 && reshape_param_->shape_dim_ == 0) { + MS_LOG(WARNING) << "Npu reshape op only supports const shape."; + return RET_ERROR; + } + if (inputs.size() == 2) { + auto shape_tensor = inputs.at(1); + if (!shape_tensor->IsConst()) { + MS_LOG(WARNING) << "Npu reshape op only supports const shape."; + return RET_ERROR; + } + } else { + MS_LOG(ERROR) << "npu reshape input size should be 1 or 2, got " << inputs.size(); return RET_ERROR; } return RET_OK; @@ -40,17 +50,21 @@ int ReshapeNPUKernel::SetNPUInputs(const std::vector &inputs, return RET_ERROR; } op_->set_input_x(*npu_inputs[0]); - - shape_op_ = new (std::nothrow) hiai::op::Const(name_ + "_shape"); - std::vector shape; - for (int i = 0; i < reshape_param_->shape_dim_; i++) { - shape.push_back(reshape_param_->shape_[i]); + if (inputs.size() == 1) { + shape_op_ = new (std::nothrow) hiai::op::Const(name_ + "_shape"); + std::vector shape; + for (int i = 0; i < reshape_param_->shape_dim_; i++) { + shape.push_back(reshape_param_->shape_[i]); + } + ge::TensorDesc shape_tensor_desc(ge::Shape({reshape_param_->shape_dim_}), ge::FORMAT_NCHW, ge::DT_INT32); + ge::TensorPtr ai_shape_tensor = std::make_shared(shape_tensor_desc); + ai_shape_tensor->SetData(reinterpret_cast(shape.data()), reshape_param_->shape_dim_ * sizeof(int32_t)); + shape_op_->set_attr_value(ai_shape_tensor); + op_->set_input_shape(*shape_op_); + } else { + op_->set_input_shape(*npu_inputs[1]); } - ge::TensorDesc shape_tensor_desc(ge::Shape({reshape_param_->shape_dim_}), ge::FORMAT_NCHW, ge::DT_INT32); - ge::TensorPtr ai_shape_tensor = std::make_shared(shape_tensor_desc); - ai_shape_tensor->SetData(reinterpret_cast(shape.data()), reshape_param_->shape_dim_ * sizeof(int32_t)); - shape_op_->set_attr_value(ai_shape_tensor); - op_->set_input_shape(*shape_op_); + return RET_OK; } diff --git a/mindspore/lite/src/runtime/kernel/npu/reshape_npu.h b/mindspore/lite/src/runtime/kernel/npu/reshape_npu.h index 1543419811..48265f8388 100644 --- a/mindspore/lite/src/runtime/kernel/npu/reshape_npu.h +++ b/mindspore/lite/src/runtime/kernel/npu/reshape_npu.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/mindspore/lite/test/models_npu.cfg b/mindspore/lite/test/models_npu.cfg index 41224e034a..f588f1b3aa 100644 --- a/mindspore/lite/test/models_npu.cfg +++ b/mindspore/lite/test/models_npu.cfg @@ -76,3 +76,4 @@ ml_video_edit_img_segment_adaptise_pb2tflite.tflite 0.5 2 hdc_age_medium 504 gts_detect_5k_tf115.tflite 319 ml_video_edit_art_transfer.onnx 3 3 +ml_video_edit_enhance_update.onnx 0.5 diff --git a/mindspore/lite/test/models_onnx.cfg b/mindspore/lite/test/models_onnx.cfg index 366cf1c0a1..0cd2270b63 100644 --- a/mindspore/lite/test/models_onnx.cfg +++ b/mindspore/lite/test/models_onnx.cfg @@ -56,3 +56,4 @@ hdc_ocr_detect.onnx ml_edu_kit_hand_detection.onnx ml_edu_kit_hand_key_position.onnx ml_video_edit_art_generate.onnx +ml_video_edit_enhance_update.onnx