From a1fce7c8fa2709e245670ce249d40e02edb48c8a Mon Sep 17 00:00:00 2001 From: zhaoxinxin Date: Tue, 16 Mar 2021 14:27:50 +0800 Subject: [PATCH] modified: ge/hybrid/executor/hybrid_model_async_executor.cc modified: ge/hybrid/executor/subgraph_executor.cc modified: ge/hybrid/node_executor/aicore/aicore_op_task.cc --- ge/hybrid/executor/hybrid_model_async_executor.cc | 2 ++ ge/hybrid/executor/subgraph_executor.cc | 4 ++-- ge/hybrid/node_executor/aicore/aicore_op_task.cc | 2 -- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/ge/hybrid/executor/hybrid_model_async_executor.cc b/ge/hybrid/executor/hybrid_model_async_executor.cc index 9f37e7d5..0194a492 100644 --- a/ge/hybrid/executor/hybrid_model_async_executor.cc +++ b/ge/hybrid/executor/hybrid_model_async_executor.cc @@ -67,6 +67,7 @@ Status HybridModelAsyncExecutor::Start(const std::shared_ptr &lis future_ = std::async(std::launch::async, [&]() -> Status { GetThreadLocalContext() = *executor_->GetContext()->ge_context; GetContext().SetSessionId(executor_->GetContext()->session_id); + GetContext().SetContextId(executor_->GetContext()->context_id); return RunInternal(); }); @@ -166,6 +167,7 @@ Status HybridModelAsyncExecutor::RunInternal() { } else { GELOGI("HybridModel will execute in singleline mode"); ge::GetContext().SetSessionId(executor_->GetContext()->session_id); + ge::GetContext().SetContextId(executor_->GetContext()->context_id); ret = executor_->Execute(args); } ret = HandleResult(ret, current_data.index, args, data_wrapper->GetOutput()); diff --git a/ge/hybrid/executor/subgraph_executor.cc b/ge/hybrid/executor/subgraph_executor.cc index 45db9936..57e4052d 100644 --- a/ge/hybrid/executor/subgraph_executor.cc +++ b/ge/hybrid/executor/subgraph_executor.cc @@ -227,6 +227,7 @@ Status SubgraphExecutor::PrepareNodes(int group) { if (node_item.is_dynamic) { auto prepare_future = pre_run_pool_.commit([this, p_node_state]() -> Status { GetContext().SetSessionId(context_->session_id); + GetContext().SetContextId(context_->context_id); GE_CHK_STATUS_RET_NOLOG(InferShape(shape_inference_engine_.get(), *p_node_state)); return PrepareForExecution(context_, *p_node_state); }); @@ -273,10 +274,8 @@ Status SubgraphExecutor::PrepareNodes(int group) { } Status SubgraphExecutor::InferShape(ShapeInferenceEngine *shape_inference_engine, NodeState &node_state) const { - GetContext().SetSessionId(context_->context_id); HYBRID_CHK_STATUS_RET(shape_inference_engine->InferShape(node_state), "[%s] Failed to InferShape.", node_state.GetName().c_str()); - GetContext().SetSessionId(context_->session_id); HYBRID_CHK_STATUS_RET(shape_inference_engine->PropagateOutputShapes(node_state), "[%s] Failed to PropagateOutputShapes.", node_state.GetName().c_str()); return SUCCESS; @@ -345,6 +344,7 @@ Status SubgraphExecutor::ScheduleTasks(int group) { GELOGD("[%s] Start to schedule prepare workers.", graph_item_->GetName().c_str()); auto prepare_future = std::async(std::launch::async, [&]() -> Status { GetContext().SetSessionId(context_->session_id); + GetContext().SetContextId(context_->context_id); auto ret = PrepareNodes(group); ready_queue_.Push(nullptr); return ret; diff --git a/ge/hybrid/node_executor/aicore/aicore_op_task.cc b/ge/hybrid/node_executor/aicore/aicore_op_task.cc index 07c2ddb5..6af2fd4a 100644 --- a/ge/hybrid/node_executor/aicore/aicore_op_task.cc +++ b/ge/hybrid/node_executor/aicore/aicore_op_task.cc @@ -307,11 +307,9 @@ Status AiCoreOpTask::UpdateTilingInfo(TaskContext &context) { auto execution_context = context.GetExecutionContext(); - GetContext().SetSessionId(execution_context->context_id); RECORD_EXECUTION_EVENT(execution_context, context.GetNodeName(), "[CalcTilingInfo] Start"); GE_CHK_STATUS_RET(CalcTilingInfo(node, tiling_info)); RECORD_EXECUTION_EVENT(execution_context, context.GetNodeName(), "[CalcTilingInfo] End"); - GetContext().SetSessionId(execution_context->session_id); // update op args by tiling info block_dim_ = static_cast(tiling_info.block_dim);