Browse Source

!1637 Set storage shape to single_op executor.

From: @zhao_zhixuan
Reviewed-by: @xchu42,@ji_chen
Signed-off-by: @ji_chen
tags/v1.3.0
mindspore-ci-bot Gitee 3 years ago
parent
commit
0fc60426e5
3 changed files with 33 additions and 1 deletions
  1. +1
    -1
      ge/hybrid/executor/hybrid_model_executor.cc
  2. +25
    -0
      ge/single_op/single_op.cc
  3. +7
    -0
      tests/ut/ge/single_op/single_op_unittest.cc

+ 1
- 1
ge/hybrid/executor/hybrid_model_executor.cc View File

@@ -50,7 +50,7 @@ Status HybridModelExecutor::Execute(HybridModelExecutor::ExecuteArgs &args) {
auto root_graph_item = model_->GetRootGraphItem();
GE_CHECK_NOTNULL(root_graph_item);

if (root_graph_item->IsDynamic()) {
if (root_graph_item->IsDynamic() && !model_->IsSingleOp()) {
GE_CHK_STATUS_RET(CheckInputShapeByShapeRange(root_graph_item, args),
"[%s] check input node shape by shape range failed.",
root_graph_item->GetName().c_str());


+ 25
- 0
ge/single_op/single_op.cc View File

@@ -113,6 +113,30 @@ Status UpdateInputsBufferAddr(StreamResource *stream_resource, rtStream_t stream
return SUCCESS;
}

Status ModifyTensorDesc(GeTensorDesc &tensor) {
int64_t storage_format_val = static_cast<Format>(FORMAT_RESERVED);
(void)AttrUtils::GetInt(tensor, ge::ATTR_NAME_STORAGE_FORMAT, storage_format_val);
auto storage_format = static_cast<Format>(storage_format_val);
auto format = tensor.GetFormat();
if (storage_format != FORMAT_RESERVED && storage_format != format) {
std::vector<int64_t> storage_shape;
if (!AttrUtils::GetListInt(tensor, ge::ATTR_NAME_STORAGE_SHAPE, storage_shape)) {
GELOGE(ACL_ERROR_GE_INTERNAL_ERROR, "[Get][storage_shape]failed while storage_format was set.");
REPORT_INNER_ERROR("E19999", "Get storage_shape failed while storage_format was set.");
return ACL_ERROR_GE_INTERNAL_ERROR;
}

GELOGD("Storage format set. update shape to [%s], and original shape to [%s]",
GeShape(storage_shape).ToString().c_str(), tensor.GetShape().ToString().c_str());
tensor.SetOriginShape(tensor.GetShape());
tensor.SetOriginFormat(format);
tensor.SetShape(GeShape(storage_shape));
tensor.SetFormat(storage_format);
}

return SUCCESS;
}

Status InitHybridModelArgs(const std::vector<DataBuffer> &input_buffers,
const std::vector<DataBuffer> &output_buffers,
const std::vector<GeTensorDesc> &inputs_desc,
@@ -126,6 +150,7 @@ Status InitHybridModelArgs(const std::vector<DataBuffer> &input_buffers,
for (auto &tensor_desc : inputs_desc) {
auto desc = MakeShared<GeTensorDesc>(tensor_desc);
GE_CHECK_NOTNULL(desc);
GE_CHK_STATUS_RET_NOLOG(ModifyTensorDesc(*desc));
args.input_desc.emplace_back(desc);
}
return SUCCESS;


+ 7
- 0
tests/ut/ge/single_op/single_op_unittest.cc View File

@@ -159,6 +159,13 @@ TEST_F(UtestSingleOp, test_singleop_execute_async2) {
single_op.hybrid_model_executor_.reset(new (std::nothrow)hybrid::HybridModelExecutor(single_op.hybrid_model_.get(), 0, stream));
EXPECT_EQ(single_op.running_param_->mem_base, nullptr);
EXPECT_EQ(single_op.tasks_.size(), 0);

GeTensorDesc tensor;
int64_t storage_format_val = static_cast<Format>(FORMAT_NCHW);
AttrUtils::SetInt(tensor, "storage_format", storage_format_val);
std::vector<int64_t> storage_shape{1, 1, 1, 1};
AttrUtils::SetListInt(tensor, "storage_shape", storage_shape);
single_op.inputs_desc_.emplace_back(tensor);
EXPECT_EQ(single_op.ExecuteAsync(input_buffers, output_buffers), PARAM_INVALID);
}



Loading…
Cancel
Save