From cb347ae8cc0a44fd5d5e648196c92d3df9929294 Mon Sep 17 00:00:00 2001 From: zhangxiaokun Date: Thu, 29 Apr 2021 20:00:28 +0800 Subject: [PATCH] add void for guard --- ge/hybrid/executor/node_state.cc | 3 +++ ge/hybrid/executor/subgraph_context.cc | 1 + ge/hybrid/executor/worker/execution_engine.cc | 1 + ge/hybrid/executor/worker/shape_inference_engine.cc | 2 ++ ge/hybrid/node_executor/rts/rts_node_task.cc | 4 ++-- ge/hybrid/node_executor/task_context.cc | 2 +- 6 files changed, 10 insertions(+), 3 deletions(-) diff --git a/ge/hybrid/executor/node_state.cc b/ge/hybrid/executor/node_state.cc index 1326e863..aaa7801f 100644 --- a/ge/hybrid/executor/node_state.cc +++ b/ge/hybrid/executor/node_state.cc @@ -155,6 +155,7 @@ Status ShapeInferenceState::AwaitShapesReady(const GraphExecutionContext &contex dst_tensor_desc->SetOriginShape(tensor_desc.GetOriginShape()); (void)TensorUtils::SetSize(*dst_tensor_desc, tensor_size); } + (void)guard; } for (auto &p : shape_futures) { @@ -180,6 +181,7 @@ Status ShapeInferenceState::AwaitShapesReady(const GraphExecutionContext &contex input_desc->SetShape(src_tensor_desc->GetShape()); input_desc->SetOriginShape(src_tensor_desc->GetOriginShape()); (void) TensorUtils::SetSize(*input_desc, tensor_size); + (void)guard; } return SUCCESS; @@ -285,6 +287,7 @@ void NodeState::ResetContext(int group) { shape_inference_state_.InitShapeState(); subgraph_context_->ResetContext(node_item_->node); GELOGD("Node[%s] in while loop, current loop: %lu, merge index: %d", GetName().c_str(), loop_count_, merge_index_); + (void)guard; } void NodeState::ResetSchedule() { diff --git a/ge/hybrid/executor/subgraph_context.cc b/ge/hybrid/executor/subgraph_context.cc index 636bef0c..9a9a97c2 100644 --- a/ge/hybrid/executor/subgraph_context.cc +++ b/ge/hybrid/executor/subgraph_context.cc @@ -47,6 +47,7 @@ NodeStatePtr SubgraphContext::GetOrCreateNodeState(const NodeItem *node_item) { if (node_state == nullptr) { const auto &guard = node_item->MutexGuard("GetOrCreateNodeState"); node_state.reset(new(std::nothrow)NodeState(*node_item, this)); + (void)guard; } return node_state; diff --git a/ge/hybrid/executor/worker/execution_engine.cc b/ge/hybrid/executor/worker/execution_engine.cc index 4d77d0f0..678e5c63 100755 --- a/ge/hybrid/executor/worker/execution_engine.cc +++ b/ge/hybrid/executor/worker/execution_engine.cc @@ -317,6 +317,7 @@ Status NodeDoneCallback::OnNodeDone() { const auto &guard = node_item.MutexGuard("OnNodeDone"); GE_CHK_STATUS_RET_NOLOG(ShapeInferenceEngine::CalcOutputTensorSizes(node_item)); GE_CHK_STATUS_RET_NOLOG(context_->GetNodeState()->GetShapeInferenceState().UpdateOutputDesc()); + (void)guard; } // PropagateOutputs for type == DEPEND_COMPUTE if (node_item.shape_inference_type == DEPEND_COMPUTE) { diff --git a/ge/hybrid/executor/worker/shape_inference_engine.cc b/ge/hybrid/executor/worker/shape_inference_engine.cc index 36ff9a07..a2efbb25 100755 --- a/ge/hybrid/executor/worker/shape_inference_engine.cc +++ b/ge/hybrid/executor/worker/shape_inference_engine.cc @@ -51,6 +51,7 @@ Status ShapeInferenceEngine::InferShape(NodeState &node_state) { GE_CHK_STATUS_RET_NOLOG(CalcOutputTensorSizes(node_item)); return SUCCESS; } + (void)guard; // Skip shape inference for node of type DEPEND_COMPUTE if (node_item.shape_inference_type == DEPEND_COMPUTE) { @@ -150,6 +151,7 @@ Status ShapeInferenceEngine::PropagateOutputShapes(NodeState &node_state) { } } } + (void)guard; RECORD_SHAPE_INFERENCE_EVENT(execution_context_, node_item.NodeName().c_str(), "[PropagateOutputShapes] End"); GELOGD("[%s] Propagating output shapes finished successfully.", node_item.NodeName().c_str()); return SUCCESS; diff --git a/ge/hybrid/node_executor/rts/rts_node_task.cc b/ge/hybrid/node_executor/rts/rts_node_task.cc index 57f8003b..94566fc6 100644 --- a/ge/hybrid/node_executor/rts/rts_node_task.cc +++ b/ge/hybrid/node_executor/rts/rts_node_task.cc @@ -149,7 +149,7 @@ Status StreamMergeNodeTask::ExecuteAsync(TaskContext &task_context, std::functio const auto in_x = task_context.MutableInput(index); // x GE_CHECK_NOTNULL(in_x); - task_context.SetOutput(MERGE_DATA_OUTPUT, *in_x); // y + GE_CHK_STATUS_RET_NOLOG(task_context.SetOutput(MERGE_DATA_OUTPUT, *in_x)); // y const auto out_y = task_context.MutableOutput(MERGE_INDEX_OUTPUT); // value_index GE_CHECK_NOTNULL(out_y); @@ -194,7 +194,7 @@ Status PassThroughNodeTask::ExecuteAsync(TaskContext &task_context, std::functio GELOGD("[%s] Start to execute.", task_context.GetNodeName()); const auto in_x = task_context.GetInput(0); // x GE_CHECK_NOTNULL(in_x); - task_context.SetOutput(0, *in_x); // y + GE_CHK_STATUS_RET_NOLOG(task_context.SetOutput(0, *in_x)); // y if (done_callback) { GE_CHK_STATUS_RET(task_context.RegisterCallback(done_callback)); diff --git a/ge/hybrid/node_executor/task_context.cc b/ge/hybrid/node_executor/task_context.cc index a09b3282..0468930a 100644 --- a/ge/hybrid/node_executor/task_context.cc +++ b/ge/hybrid/node_executor/task_context.cc @@ -462,7 +462,7 @@ Status TaskContext::PropagateOutputs() { } } } - + (void)guard; return SUCCESS; }