Browse Source

modified: ge/hybrid/executor/hybrid_model_async_executor.cc

modified:   ge/hybrid/executor/subgraph_executor.cc
	modified:   ge/hybrid/node_executor/aicore/aicore_op_task.cc
tags/v1.3.0
zhaoxinxin 4 years ago
parent
commit
a1fce7c8fa
3 changed files with 4 additions and 4 deletions
  1. +2
    -0
      ge/hybrid/executor/hybrid_model_async_executor.cc
  2. +2
    -2
      ge/hybrid/executor/subgraph_executor.cc
  3. +0
    -2
      ge/hybrid/node_executor/aicore/aicore_op_task.cc

+ 2
- 0
ge/hybrid/executor/hybrid_model_async_executor.cc View File

@@ -67,6 +67,7 @@ Status HybridModelAsyncExecutor::Start(const std::shared_ptr<ModelListener> &lis
future_ = std::async(std::launch::async, [&]() -> Status {
GetThreadLocalContext() = *executor_->GetContext()->ge_context;
GetContext().SetSessionId(executor_->GetContext()->session_id);
GetContext().SetContextId(executor_->GetContext()->context_id);
return RunInternal();
});

@@ -166,6 +167,7 @@ Status HybridModelAsyncExecutor::RunInternal() {
} else {
GELOGI("HybridModel will execute in singleline mode");
ge::GetContext().SetSessionId(executor_->GetContext()->session_id);
ge::GetContext().SetContextId(executor_->GetContext()->context_id);
ret = executor_->Execute(args);
}
ret = HandleResult(ret, current_data.index, args, data_wrapper->GetOutput());


+ 2
- 2
ge/hybrid/executor/subgraph_executor.cc View File

@@ -227,6 +227,7 @@ Status SubgraphExecutor::PrepareNodes(int group) {
if (node_item.is_dynamic) {
auto prepare_future = pre_run_pool_.commit([this, p_node_state]() -> Status {
GetContext().SetSessionId(context_->session_id);
GetContext().SetContextId(context_->context_id);
GE_CHK_STATUS_RET_NOLOG(InferShape(shape_inference_engine_.get(), *p_node_state));
return PrepareForExecution(context_, *p_node_state);
});
@@ -273,10 +274,8 @@ Status SubgraphExecutor::PrepareNodes(int group) {
}

Status SubgraphExecutor::InferShape(ShapeInferenceEngine *shape_inference_engine, NodeState &node_state) const {
GetContext().SetSessionId(context_->context_id);
HYBRID_CHK_STATUS_RET(shape_inference_engine->InferShape(node_state),
"[%s] Failed to InferShape.", node_state.GetName().c_str());
GetContext().SetSessionId(context_->session_id);
HYBRID_CHK_STATUS_RET(shape_inference_engine->PropagateOutputShapes(node_state),
"[%s] Failed to PropagateOutputShapes.", node_state.GetName().c_str());
return SUCCESS;
@@ -345,6 +344,7 @@ Status SubgraphExecutor::ScheduleTasks(int group) {
GELOGD("[%s] Start to schedule prepare workers.", graph_item_->GetName().c_str());
auto prepare_future = std::async(std::launch::async, [&]() -> Status {
GetContext().SetSessionId(context_->session_id);
GetContext().SetContextId(context_->context_id);
auto ret = PrepareNodes(group);
ready_queue_.Push(nullptr);
return ret;


+ 0
- 2
ge/hybrid/node_executor/aicore/aicore_op_task.cc View File

@@ -307,11 +307,9 @@ Status AiCoreOpTask::UpdateTilingInfo(TaskContext &context) {

auto execution_context = context.GetExecutionContext();

GetContext().SetSessionId(execution_context->context_id);
RECORD_EXECUTION_EVENT(execution_context, context.GetNodeName(), "[CalcTilingInfo] Start");
GE_CHK_STATUS_RET(CalcTilingInfo(node, tiling_info));
RECORD_EXECUTION_EVENT(execution_context, context.GetNodeName(), "[CalcTilingInfo] End");
GetContext().SetSessionId(execution_context->session_id);

// update op args by tiling info
block_dim_ = static_cast<uint32_t>(tiling_info.block_dim);


Loading…
Cancel
Save