Browse Source

!14497 enhance_update net on gate

From: @zhaozhenlong
Reviewed-by: 
Signed-off-by:
pull/14497/MERGE
mindspore-ci-bot Gitee 4 years ago
parent
commit
e4bfb4439a
6 changed files with 155 additions and 14 deletions
  1. +76
    -0
      mindspore/lite/src/runtime/kernel/npu/argmax_npu.cc
  2. +49
    -0
      mindspore/lite/src/runtime/kernel/npu/argmax_npu.h
  3. +27
    -13
      mindspore/lite/src/runtime/kernel/npu/reshape_npu.cc
  4. +1
    -1
      mindspore/lite/src/runtime/kernel/npu/reshape_npu.h
  5. +1
    -0
      mindspore/lite/test/models_npu.cfg
  6. +1
    -0
      mindspore/lite/test/models_onnx.cfg

+ 76
- 0
mindspore/lite/src/runtime/kernel/npu/argmax_npu.cc View File

@@ -0,0 +1,76 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "src/runtime/kernel/npu/argmax_npu.h"
#include <memory>
#include "include/graph/op/all_ops.h"
#include "src/kernel_registry.h"
#include "src/runtime/agent/npu/npu_converter_utils.h"

using mindspore::kernel::KERNEL_ARCH::kNPU;
using mindspore::lite::KernelRegistrar;
using mindspore::schema::PrimitiveType_ArgMax;

namespace mindspore::kernel {
int ArgmaxNPUKernel::IsSupport(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs,
OpParameter *opParameter) {
return RET_OK;
}

int ArgmaxNPUKernel::SetNPUInputs(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs,
const std::vector<ge::Operator *> &npu_inputs) {
op_ = new (std::nothrow) hiai::op::ArgMaxExt2(name_);
if (op_ == nullptr) {
MS_LOG(ERROR) << "New argmax npu operator for " << name_ << " failed.";
return RET_ERROR;
}
op_->set_input_x(*npu_inputs[0]);
auto axis_const_ = new (std::nothrow) hiai::op::Const(name_ + "_axis");
if (axis_const_ == nullptr) {
MS_LOG(ERROR) << "New weight const failed.";
return RET_ERROR;
}
ge::TensorDesc tensor_desc(ge::Shape({1}), ge::FORMAT_NCHW, ge::DT_INT32);
std::shared_ptr<ge::Tensor> ge_tensor =
std::make_shared<ge::Tensor>(tensor_desc, reinterpret_cast<const uint8_t *>(&(param_->axis_)), sizeof(int));
if (ge_tensor == nullptr) {
MS_LOG(ERROR) << "new ge_tensor failed.";
return RET_ERROR;
}
axis_const_->set_attr_value(ge_tensor);
op_->set_input_axis(*axis_const_);
op_->set_attr_keep_dims(param_->keep_dims_);
op_->set_attr_outmaxval(param_->out_value_);
op_->set_attr_topk(param_->topk_);

return RET_OK;
}

ge::Operator *mindspore::kernel::ArgmaxNPUKernel::GetNPUOp() { return op_; }

ArgmaxNPUKernel::~ArgmaxNPUKernel() {
if (op_ != nullptr) {
delete op_;
op_ = nullptr;
}
if (axis_const_ != nullptr) {
delete axis_const_;
axis_const_ = nullptr;
}
}

REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_ArgMax, NPUKernelCreator<ArgmaxNPUKernel>)
} // namespace mindspore::kernel

+ 49
- 0
mindspore/lite/src/runtime/kernel/npu/argmax_npu.h View File

@@ -0,0 +1,49 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_ARGMAX_NPU_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_ARGMAX_NPU_H_

#include <vector>
#include "include/graph/op/all_ops.h"
#include "include/graph/compatible/all_ops.h"
#include "src/runtime/kernel/npu/npu_kernel.h"
#include "nnacl/arg_min_max_parameter.h"

namespace mindspore::kernel {
class ArgmaxNPUKernel : public NPUKernel {
public:
ArgmaxNPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx,
const mindspore::lite::PrimitiveC *primitive)
: NPUKernel(parameter, inputs, outputs, ctx, primitive) {
param_ = reinterpret_cast<ArgMinMaxParameter *>(parameter);
}
~ArgmaxNPUKernel() override;

int IsSupport(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs,
OpParameter *opParameter) override;
int SetNPUInputs(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs,
const std::vector<ge::Operator *> &npu_inputs) override;
ge::Operator *GetNPUOp() override;

private:
hiai::op::ArgMaxExt2 *op_ = nullptr;
hiai::op::Const *axis_const_ = nullptr;
ArgMinMaxParameter *param_;
};
} // namespace mindspore::kernel
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_ARGMAX_NPU_H_

+ 27
- 13
mindspore/lite/src/runtime/kernel/npu/reshape_npu.cc View File

@@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2020-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -24,8 +24,18 @@ using mindspore::schema::PrimitiveType_Reshape;
namespace mindspore::kernel {
int ReshapeNPUKernel::IsSupport(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs,
OpParameter *opParameter) {
if (reshape_param_->shape_dim_ == 0) {
MS_LOG(ERROR) << "Npu reshape op only supports const shape.";
if (inputs.size() == 1 && reshape_param_->shape_dim_ == 0) {
MS_LOG(WARNING) << "Npu reshape op only supports const shape.";
return RET_ERROR;
}
if (inputs.size() == 2) {
auto shape_tensor = inputs.at(1);
if (!shape_tensor->IsConst()) {
MS_LOG(WARNING) << "Npu reshape op only supports const shape.";
return RET_ERROR;
}
} else {
MS_LOG(ERROR) << "npu reshape input size should be 1 or 2, got " << inputs.size();
return RET_ERROR;
}
return RET_OK;
@@ -40,17 +50,21 @@ int ReshapeNPUKernel::SetNPUInputs(const std::vector<lite::Tensor *> &inputs,
return RET_ERROR;
}
op_->set_input_x(*npu_inputs[0]);

shape_op_ = new (std::nothrow) hiai::op::Const(name_ + "_shape");
std::vector<int> shape;
for (int i = 0; i < reshape_param_->shape_dim_; i++) {
shape.push_back(reshape_param_->shape_[i]);
if (inputs.size() == 1) {
shape_op_ = new (std::nothrow) hiai::op::Const(name_ + "_shape");
std::vector<int> shape;
for (int i = 0; i < reshape_param_->shape_dim_; i++) {
shape.push_back(reshape_param_->shape_[i]);
}
ge::TensorDesc shape_tensor_desc(ge::Shape({reshape_param_->shape_dim_}), ge::FORMAT_NCHW, ge::DT_INT32);
ge::TensorPtr ai_shape_tensor = std::make_shared<hiai::Tensor>(shape_tensor_desc);
ai_shape_tensor->SetData(reinterpret_cast<uint8_t *>(shape.data()), reshape_param_->shape_dim_ * sizeof(int32_t));
shape_op_->set_attr_value(ai_shape_tensor);
op_->set_input_shape(*shape_op_);
} else {
op_->set_input_shape(*npu_inputs[1]);
}
ge::TensorDesc shape_tensor_desc(ge::Shape({reshape_param_->shape_dim_}), ge::FORMAT_NCHW, ge::DT_INT32);
ge::TensorPtr ai_shape_tensor = std::make_shared<hiai::Tensor>(shape_tensor_desc);
ai_shape_tensor->SetData(reinterpret_cast<uint8_t *>(shape.data()), reshape_param_->shape_dim_ * sizeof(int32_t));
shape_op_->set_attr_value(ai_shape_tensor);
op_->set_input_shape(*shape_op_);

return RET_OK;
}



+ 1
- 1
mindspore/lite/src/runtime/kernel/npu/reshape_npu.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2020-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 1
- 0
mindspore/lite/test/models_npu.cfg View File

@@ -76,3 +76,4 @@ ml_video_edit_img_segment_adaptise_pb2tflite.tflite 0.5 2
hdc_age_medium 504
gts_detect_5k_tf115.tflite 319
ml_video_edit_art_transfer.onnx 3 3
ml_video_edit_enhance_update.onnx 0.5

+ 1
- 0
mindspore/lite/test/models_onnx.cfg View File

@@ -56,3 +56,4 @@ hdc_ocr_detect.onnx
ml_edu_kit_hand_detection.onnx
ml_edu_kit_hand_key_position.onnx
ml_video_edit_art_generate.onnx
ml_video_edit_enhance_update.onnx

Loading…
Cancel
Save