From a31e700c3ad9c3ef316b6d271994f838b7dfe808 Mon Sep 17 00:00:00 2001 From: chuxing Date: Sat, 5 Dec 2020 10:17:34 +0800 Subject: [PATCH] fixing static check --- ge/hybrid/executor/hybrid_profiler.cc | 2 +- ge/hybrid/executor/subgraph_executor.cc | 1 + ge/hybrid/executor/worker/shape_inference_engine.cc | 6 +++--- ge/hybrid/model/node_item.h | 4 ++-- .../node_executor/compiledsubgraph/known_node_executor.h | 2 +- .../node_executor/ge_local/ge_local_node_executor.cc | 8 ++++---- 6 files changed, 12 insertions(+), 11 deletions(-) diff --git a/ge/hybrid/executor/hybrid_profiler.cc b/ge/hybrid/executor/hybrid_profiler.cc index 7228197f..336a633f 100644 --- a/ge/hybrid/executor/hybrid_profiler.cc +++ b/ge/hybrid/executor/hybrid_profiler.cc @@ -25,7 +25,7 @@ namespace ge { namespace hybrid { namespace { const int kMaxEvents = 10000; -const int kEventDescMax = 256; +const int kEventDescMax = 512; const int kMaxEventTypes = 8; const int kIndent = 8; } diff --git a/ge/hybrid/executor/subgraph_executor.cc b/ge/hybrid/executor/subgraph_executor.cc index 76a6cc37..5a464f8e 100644 --- a/ge/hybrid/executor/subgraph_executor.cc +++ b/ge/hybrid/executor/subgraph_executor.cc @@ -93,6 +93,7 @@ Status SubgraphExecutor::InitInputsForUnknownShape(const std::vectorGetName().c_str(), i); GE_CHECK_LE(i + 1, input_desc.size()); const auto &tensor_desc = input_desc[i]; + GE_CHECK_NOTNULL(tensor_desc); auto node_state = subgraph_context_->GetOrCreateNodeState(input_node); GE_CHECK_NOTNULL(node_state); node_state->GetShapeInferenceState().UpdateInputShape(0, tensor_desc->GetOriginShape(), tensor_desc->GetShape()); diff --git a/ge/hybrid/executor/worker/shape_inference_engine.cc b/ge/hybrid/executor/worker/shape_inference_engine.cc index bd429b21..d4019eda 100755 --- a/ge/hybrid/executor/worker/shape_inference_engine.cc +++ b/ge/hybrid/executor/worker/shape_inference_engine.cc @@ -164,7 +164,7 @@ Status ShapeInferenceEngine::InferShapeForSubgraph(const NodeItem &node_item, co for (auto &it : fused_subgraph.input_mapping) { auto parent_tensor_desc = node_item.MutableInputDesc(it.first); GE_CHECK_NOTNULL(parent_tensor_desc); - GELOGD("Start to update shape by input[%u]", it.first); + GELOGD("Start to update shape by input[%d]", it.first); GELOGD("Update shape to [%s]", parent_tensor_desc->GetShape().ToString().c_str()); GELOGD("Update original shape to [%s]", parent_tensor_desc->GetOriginShape().ToString().c_str()); for (auto &tensor_desc : it.second) { @@ -183,12 +183,12 @@ Status ShapeInferenceEngine::InferShapeForSubgraph(const NodeItem &node_item, co } for (auto &it : fused_subgraph.output_mapping) { - uint32_t parent_output_idx = it.first; + int parent_output_idx = it.first; const auto &op_desc = it.second; GELOGD("Update parent output[%d] by [%s]", parent_output_idx, op_desc->GetName().c_str()); auto input_desc = op_desc->MutableInputDesc(0); GE_CHECK_NOTNULL(input_desc); - auto parent_output_tensor_desc = node_item.op_desc->MutableOutputDesc(parent_output_idx); + auto parent_output_tensor_desc = node_item.MutableOutputDesc(parent_output_idx); GE_CHECK_NOTNULL(parent_output_tensor_desc); GELOGD("Update shape to [%s]", input_desc->GetShape().ToString().c_str()); GELOGD("Update original shape to [%s]", input_desc->GetOriginShape().ToString().c_str()); diff --git a/ge/hybrid/model/node_item.h b/ge/hybrid/model/node_item.h index 8fac4a73..8fbdc648 100644 --- a/ge/hybrid/model/node_item.h +++ b/ge/hybrid/model/node_item.h @@ -30,8 +30,8 @@ class NodeTask; class NodeExecutor; struct FusedSubgraph { - std::map> input_mapping; - std::map output_mapping; + std::map> input_mapping; + std::map output_mapping; std::vector nodes; ComputeGraphPtr graph; }; diff --git a/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.h b/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.h index fb1966b4..2dde993b 100644 --- a/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.h +++ b/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.h @@ -27,7 +27,7 @@ class HybridModel; class KnownNodeTask : public NodeTask { public: - KnownNodeTask(std::shared_ptr davinci_model) + explicit KnownNodeTask(std::shared_ptr davinci_model) : davinci_model_(davinci_model) {} diff --git a/ge/hybrid/node_executor/ge_local/ge_local_node_executor.cc b/ge/hybrid/node_executor/ge_local/ge_local_node_executor.cc index ee45964c..7a83641d 100755 --- a/ge/hybrid/node_executor/ge_local/ge_local_node_executor.cc +++ b/ge/hybrid/node_executor/ge_local/ge_local_node_executor.cc @@ -61,10 +61,10 @@ Status RefInputTask::Execute(TaskContext &context) { Status RefInputTask::RefOneByOne(TaskContext &context) { GELOGI("node %s type %s ref input one by one begin.", node_name_.c_str(), node_type_.c_str()); - uint32_t input_num = context.NumInputs(); - uint32_t output_num = context.NumOutputs(); + int input_num = context.NumInputs(); + int output_num = context.NumOutputs(); if (output_num > input_num) { - GELOGE(INTERNAL_ERROR, "node %s type %s has %u outputs but only %u inputs, can't ref one by one.", + GELOGE(INTERNAL_ERROR, "node %s type %s has %d outputs but only %d inputs, can't ref one by one.", node_name_.c_str(), node_type_.c_str(), output_num, input_num); return INTERNAL_ERROR; } @@ -72,7 +72,7 @@ Status RefInputTask::RefOneByOne(TaskContext &context) { auto input = context.GetInput(out_index); GE_CHECK_NOTNULL(input); GE_CHK_STATUS_RET(context.SetOutput(out_index, *input)); - GELOGD("node %s type %s output[%u] ref input[%u] addr=%p.", + GELOGD("node %s type %s output[%d] ref input[%d] addr=%p.", node_name_.c_str(), node_type_.c_str(), out_index, out_index, input->GetData()); } GELOGI("node %s type %s ref input one by one end.", node_name_.c_str(), node_type_.c_str());