| @@ -155,6 +155,7 @@ Status ShapeInferenceState::AwaitShapesReady(const GraphExecutionContext &contex | |||||
| dst_tensor_desc->SetOriginShape(tensor_desc.GetOriginShape()); | dst_tensor_desc->SetOriginShape(tensor_desc.GetOriginShape()); | ||||
| (void)TensorUtils::SetSize(*dst_tensor_desc, tensor_size); | (void)TensorUtils::SetSize(*dst_tensor_desc, tensor_size); | ||||
| } | } | ||||
| (void)guard; | |||||
| } | } | ||||
| for (auto &p : shape_futures) { | for (auto &p : shape_futures) { | ||||
| @@ -180,6 +181,7 @@ Status ShapeInferenceState::AwaitShapesReady(const GraphExecutionContext &contex | |||||
| input_desc->SetShape(src_tensor_desc->GetShape()); | input_desc->SetShape(src_tensor_desc->GetShape()); | ||||
| input_desc->SetOriginShape(src_tensor_desc->GetOriginShape()); | input_desc->SetOriginShape(src_tensor_desc->GetOriginShape()); | ||||
| (void) TensorUtils::SetSize(*input_desc, tensor_size); | (void) TensorUtils::SetSize(*input_desc, tensor_size); | ||||
| (void)guard; | |||||
| } | } | ||||
| return SUCCESS; | return SUCCESS; | ||||
| @@ -285,6 +287,7 @@ void NodeState::ResetContext(int group) { | |||||
| shape_inference_state_.InitShapeState(); | shape_inference_state_.InitShapeState(); | ||||
| subgraph_context_->ResetContext(node_item_->node); | subgraph_context_->ResetContext(node_item_->node); | ||||
| GELOGD("Node[%s] in while loop, current loop: %lu, merge index: %d", GetName().c_str(), loop_count_, merge_index_); | GELOGD("Node[%s] in while loop, current loop: %lu, merge index: %d", GetName().c_str(), loop_count_, merge_index_); | ||||
| (void)guard; | |||||
| } | } | ||||
| void NodeState::ResetSchedule() { | void NodeState::ResetSchedule() { | ||||
| @@ -47,6 +47,7 @@ NodeStatePtr SubgraphContext::GetOrCreateNodeState(const NodeItem *node_item) { | |||||
| if (node_state == nullptr) { | if (node_state == nullptr) { | ||||
| const auto &guard = node_item->MutexGuard("GetOrCreateNodeState"); | const auto &guard = node_item->MutexGuard("GetOrCreateNodeState"); | ||||
| node_state.reset(new(std::nothrow)NodeState(*node_item, this)); | node_state.reset(new(std::nothrow)NodeState(*node_item, this)); | ||||
| (void)guard; | |||||
| } | } | ||||
| return node_state; | return node_state; | ||||
| @@ -317,6 +317,7 @@ Status NodeDoneCallback::OnNodeDone() { | |||||
| const auto &guard = node_item.MutexGuard("OnNodeDone"); | const auto &guard = node_item.MutexGuard("OnNodeDone"); | ||||
| GE_CHK_STATUS_RET_NOLOG(ShapeInferenceEngine::CalcOutputTensorSizes(node_item)); | GE_CHK_STATUS_RET_NOLOG(ShapeInferenceEngine::CalcOutputTensorSizes(node_item)); | ||||
| GE_CHK_STATUS_RET_NOLOG(context_->GetNodeState()->GetShapeInferenceState().UpdateOutputDesc()); | GE_CHK_STATUS_RET_NOLOG(context_->GetNodeState()->GetShapeInferenceState().UpdateOutputDesc()); | ||||
| (void)guard; | |||||
| } | } | ||||
| // PropagateOutputs for type == DEPEND_COMPUTE | // PropagateOutputs for type == DEPEND_COMPUTE | ||||
| if (node_item.shape_inference_type == DEPEND_COMPUTE) { | if (node_item.shape_inference_type == DEPEND_COMPUTE) { | ||||
| @@ -51,6 +51,7 @@ Status ShapeInferenceEngine::InferShape(NodeState &node_state) { | |||||
| GE_CHK_STATUS_RET_NOLOG(CalcOutputTensorSizes(node_item)); | GE_CHK_STATUS_RET_NOLOG(CalcOutputTensorSizes(node_item)); | ||||
| return SUCCESS; | return SUCCESS; | ||||
| } | } | ||||
| (void)guard; | |||||
| // Skip shape inference for node of type DEPEND_COMPUTE | // Skip shape inference for node of type DEPEND_COMPUTE | ||||
| if (node_item.shape_inference_type == DEPEND_COMPUTE) { | if (node_item.shape_inference_type == DEPEND_COMPUTE) { | ||||
| @@ -150,6 +151,7 @@ Status ShapeInferenceEngine::PropagateOutputShapes(NodeState &node_state) { | |||||
| } | } | ||||
| } | } | ||||
| } | } | ||||
| (void)guard; | |||||
| RECORD_SHAPE_INFERENCE_EVENT(execution_context_, node_item.NodeName().c_str(), "[PropagateOutputShapes] End"); | RECORD_SHAPE_INFERENCE_EVENT(execution_context_, node_item.NodeName().c_str(), "[PropagateOutputShapes] End"); | ||||
| GELOGD("[%s] Propagating output shapes finished successfully.", node_item.NodeName().c_str()); | GELOGD("[%s] Propagating output shapes finished successfully.", node_item.NodeName().c_str()); | ||||
| return SUCCESS; | return SUCCESS; | ||||
| @@ -149,7 +149,7 @@ Status StreamMergeNodeTask::ExecuteAsync(TaskContext &task_context, std::functio | |||||
| const auto in_x = task_context.MutableInput(index); // x | const auto in_x = task_context.MutableInput(index); // x | ||||
| GE_CHECK_NOTNULL(in_x); | GE_CHECK_NOTNULL(in_x); | ||||
| task_context.SetOutput(MERGE_DATA_OUTPUT, *in_x); // y | |||||
| GE_CHK_STATUS_RET_NOLOG(task_context.SetOutput(MERGE_DATA_OUTPUT, *in_x)); // y | |||||
| const auto out_y = task_context.MutableOutput(MERGE_INDEX_OUTPUT); // value_index | const auto out_y = task_context.MutableOutput(MERGE_INDEX_OUTPUT); // value_index | ||||
| GE_CHECK_NOTNULL(out_y); | GE_CHECK_NOTNULL(out_y); | ||||
| @@ -194,7 +194,7 @@ Status PassThroughNodeTask::ExecuteAsync(TaskContext &task_context, std::functio | |||||
| GELOGD("[%s] Start to execute.", task_context.GetNodeName()); | GELOGD("[%s] Start to execute.", task_context.GetNodeName()); | ||||
| const auto in_x = task_context.GetInput(0); // x | const auto in_x = task_context.GetInput(0); // x | ||||
| GE_CHECK_NOTNULL(in_x); | GE_CHECK_NOTNULL(in_x); | ||||
| task_context.SetOutput(0, *in_x); // y | |||||
| GE_CHK_STATUS_RET_NOLOG(task_context.SetOutput(0, *in_x)); // y | |||||
| if (done_callback) { | if (done_callback) { | ||||
| GE_CHK_STATUS_RET(task_context.RegisterCallback(done_callback)); | GE_CHK_STATUS_RET(task_context.RegisterCallback(done_callback)); | ||||
| @@ -462,7 +462,7 @@ Status TaskContext::PropagateOutputs() { | |||||
| } | } | ||||
| } | } | ||||
| } | } | ||||
| (void)guard; | |||||
| return SUCCESS; | return SUCCESS; | ||||
| } | } | ||||