diff --git a/ge/hybrid/executor/hybrid_model_executor.cc b/ge/hybrid/executor/hybrid_model_executor.cc index f8635a97..d4d97840 100755 --- a/ge/hybrid/executor/hybrid_model_executor.cc +++ b/ge/hybrid/executor/hybrid_model_executor.cc @@ -50,7 +50,7 @@ Status HybridModelExecutor::Execute(HybridModelExecutor::ExecuteArgs &args) { auto root_graph_item = model_->GetRootGraphItem(); GE_CHECK_NOTNULL(root_graph_item); - if (root_graph_item->IsDynamic()) { + if (root_graph_item->IsDynamic() && !model_->IsSingleOp()) { GE_CHK_STATUS_RET(CheckInputShapeByShapeRange(root_graph_item, args), "[%s] check input node shape by shape range failed.", root_graph_item->GetName().c_str()); diff --git a/ge/single_op/single_op.cc b/ge/single_op/single_op.cc index 36ca1850..d09e8398 100755 --- a/ge/single_op/single_op.cc +++ b/ge/single_op/single_op.cc @@ -113,6 +113,30 @@ Status UpdateInputsBufferAddr(StreamResource *stream_resource, rtStream_t stream return SUCCESS; } +Status ModifyTensorDesc(GeTensorDesc &tensor) { + int64_t storage_format_val = static_cast(FORMAT_RESERVED); + (void)AttrUtils::GetInt(tensor, ge::ATTR_NAME_STORAGE_FORMAT, storage_format_val); + auto storage_format = static_cast(storage_format_val); + auto format = tensor.GetFormat(); + if (storage_format != FORMAT_RESERVED && storage_format != format) { + std::vector storage_shape; + if (!AttrUtils::GetListInt(tensor, ge::ATTR_NAME_STORAGE_SHAPE, storage_shape)) { + GELOGE(ACL_ERROR_GE_INTERNAL_ERROR, "[Get][storage_shape]failed while storage_format was set."); + REPORT_INNER_ERROR("E19999", "Get storage_shape failed while storage_format was set."); + return ACL_ERROR_GE_INTERNAL_ERROR; + } + + GELOGD("Storage format set. update shape to [%s], and original shape to [%s]", + GeShape(storage_shape).ToString().c_str(), tensor.GetShape().ToString().c_str()); + tensor.SetOriginShape(tensor.GetShape()); + tensor.SetOriginFormat(format); + tensor.SetShape(GeShape(storage_shape)); + tensor.SetFormat(storage_format); + } + + return SUCCESS; +} + Status InitHybridModelArgs(const std::vector &input_buffers, const std::vector &output_buffers, const std::vector &inputs_desc, @@ -126,6 +150,7 @@ Status InitHybridModelArgs(const std::vector &input_buffers, for (auto &tensor_desc : inputs_desc) { auto desc = MakeShared(tensor_desc); GE_CHECK_NOTNULL(desc); + GE_CHK_STATUS_RET_NOLOG(ModifyTensorDesc(*desc)); args.input_desc.emplace_back(desc); } return SUCCESS; diff --git a/tests/ut/ge/single_op/single_op_unittest.cc b/tests/ut/ge/single_op/single_op_unittest.cc index 3519811b..db3de7ec 100644 --- a/tests/ut/ge/single_op/single_op_unittest.cc +++ b/tests/ut/ge/single_op/single_op_unittest.cc @@ -159,6 +159,13 @@ TEST_F(UtestSingleOp, test_singleop_execute_async2) { single_op.hybrid_model_executor_.reset(new (std::nothrow)hybrid::HybridModelExecutor(single_op.hybrid_model_.get(), 0, stream)); EXPECT_EQ(single_op.running_param_->mem_base, nullptr); EXPECT_EQ(single_op.tasks_.size(), 0); + + GeTensorDesc tensor; + int64_t storage_format_val = static_cast(FORMAT_NCHW); + AttrUtils::SetInt(tensor, "storage_format", storage_format_val); + std::vector storage_shape{1, 1, 1, 1}; + AttrUtils::SetListInt(tensor, "storage_shape", storage_shape); + single_op.inputs_desc_.emplace_back(tensor); EXPECT_EQ(single_op.ExecuteAsync(input_buffers, output_buffers), PARAM_INVALID); }