Browse Source

!1906 set size for dynamic input

Merge pull request !1906 from wangzhengjun/set_size
tags/v1.5.1
i-robot Gitee 3 years ago
parent
commit
3b916597b2
2 changed files with 33 additions and 3 deletions
  1. +5
    -3
      ge/hybrid/executor/hybrid_model_async_executor.cc
  2. +28
    -0
      tests/ut/ge/hybrid/executor/hybrid_model_async_executor_unittest.cc

+ 5
- 3
ge/hybrid/executor/hybrid_model_async_executor.cc View File

@@ -295,13 +295,15 @@ Status HybridModelAsyncExecutor::PrepareInputs(const InputData &current_data, Hy
}
}
tensor_desc->SetShape(shape);
args.input_desc[input_index] = tensor_desc;
GELOGD("Update shape of input[%zu] to [%s]", input_index, tensor_desc->MutableShape().ToString().c_str());
GELOGD("Update shape[%s] of input[%zu] to [%s]",
shape.ToString().c_str(), input_index, tensor_desc->MutableShape().ToString().c_str());
GE_CHK_GRAPH_STATUS_RET(TensorUtils::GetTensorMemorySizeInBytes(*tensor_desc, tensor_size),
"[Invoke][GetTensorMemorySizeInBytes]Failed to calc tensor size,"
"index = %zu, shape = [%s], model_id = %u.",
input_index, tensor_desc->GetShape().ToString().c_str(), model_id_);
GELOGD("Input tensor[%zu] size = %zu", input_index, tensor_size);
GELOGD("Input tensor[%zu] size = %ld", input_index, tensor_size);
TensorUtils::SetSize(*tensor_desc, tensor_size);
args.input_desc[input_index] = tensor_desc;
}

GE_CHECK_GE(tensor_size, 0);


+ 28
- 0
tests/ut/ge/hybrid/executor/hybrid_model_async_executor_unittest.cc View File

@@ -103,4 +103,32 @@ TEST_F(UtestHybridModelAsyncExecutor, Test_execute) {
context.callback_manager->callback_queue_.Push(eof_entry);
ASSERT_EQ(executor.Execute(args), SUCCESS);
}

TEST_F(UtestHybridModelAsyncExecutor, test_PrepareInputs) {
ComputeGraphPtr graph = std::make_shared<ComputeGraph>("test");
GeRootModelPtr ge_root_model = make_shared<GeRootModel>(graph);
ge_root_model->SetModelName("test_name");
GeModelPtr ge_sub_model = make_shared<GeModel>();
HybridModel hybrid_model(ge_root_model);
HybridModelAsyncExecutor executor(&hybrid_model);
GeTensorDescPtr tensor_desc = make_shared<GeTensorDesc>(GeShape({-1, 16, 16, 3}));
tensor_desc->SetShapeRange({{1, 256}, {16, 16}, {16, 16}, {3, 3}});
executor.input_tensor_desc_.insert({0, tensor_desc});
executor.device_id_ = 0;
executor.input_sizes_.insert({0, -1});
executor.is_input_dynamic_.push_back(true);

unique_ptr<uint8_t[]> data_buf(new (std::nothrow)uint8_t[3072]);
InputData input_data;
input_data.blobs.push_back(DataBuffer(data_buf.get(), 3072, false));
input_data.shapes.push_back({1, 16, 16, 3});
HybridModelExecutor::ExecuteArgs args;

auto ret = executor.PrepareInputs(input_data, args);
ASSERT_EQ(ret, SUCCESS);
ASSERT_EQ(args.input_desc[0]->GetShape().ToString(), GeShape({1, 16, 16, 3}).ToString());
int64_t tensor_size = 0;
TensorUtils::GetSize(*(args.input_desc[0]), tensor_size);
ASSERT_EQ(tensor_size, 3104);
}
} // namespace ge

Loading…
Cancel
Save