Browse Source

fixex coverity warning

tags/v1.5.1
李磊 3 years ago
parent
commit
fdab42b28d
5 changed files with 22 additions and 18 deletions
  1. +16
    -16
      ge/graph/load/model_manager/davinci_model.cc
  2. +1
    -1
      ge/hybrid/executor/worker/execution_engine.cc
  3. +2
    -0
      ge/hybrid/model/hybrid_model_builder.cc
  4. +2
    -1
      ge/hybrid/node_executor/hccl/hccl_node_executor.cc
  5. +1
    -0
      ge/ir_build/ge_ir_build.cc

+ 16
- 16
ge/graph/load/model_manager/davinci_model.cc View File

@@ -387,8 +387,8 @@ Status DavinciModel::InitWeightMem(void *dev_ptr, void *weight_ptr, size_t weigh


Status DavinciModel::InitFeatureMapAndP2PMem(void *dev_ptr, size_t mem_size) { Status DavinciModel::InitFeatureMapAndP2PMem(void *dev_ptr, size_t mem_size) {
if (is_feature_map_mem_has_inited_) { if (is_feature_map_mem_has_inited_) {
REPORT_INNER_ERROR("E19999", "Call InitFeatureMapMem more than once, model_id:%u, check invalid", model_id_);
GELOGE(PARAM_INVALID, "[Check][Param] call InitFeatureMapMem more than once, model_id:%u", model_id_);
REPORT_INNER_ERROR("E19999", "InitFeatureMapMem is called more than once, model_id:%u, check invalid", model_id_);
GELOGE(PARAM_INVALID, "[Check][Param] InitFeatureMapMem is called more than once, model_id:%u", model_id_);
return PARAM_INVALID; return PARAM_INVALID;
} }
is_feature_map_mem_has_inited_ = true; is_feature_map_mem_has_inited_ = true;
@@ -456,8 +456,7 @@ Status DavinciModel::InitVariableMem() {


void DavinciModel::InitRuntimeParams() { void DavinciModel::InitRuntimeParams() {
int64_t value = 0; int64_t value = 0;
bool ret;
ret = ge::AttrUtils::GetInt(ge_model_, ATTR_MODEL_MEMORY_SIZE, value);
bool ret = ge::AttrUtils::GetInt(ge_model_, ATTR_MODEL_MEMORY_SIZE, value);
runtime_param_.mem_size = ret ? (uint64_t)value : 0; runtime_param_.mem_size = ret ? (uint64_t)value : 0;
ret = ge::AttrUtils::GetInt(ge_model_, ATTR_MODEL_WEIGHT_SIZE, value); ret = ge::AttrUtils::GetInt(ge_model_, ATTR_MODEL_WEIGHT_SIZE, value);
runtime_param_.weight_size = ret ? (uint64_t)value : 0; runtime_param_.weight_size = ret ? (uint64_t)value : 0;
@@ -983,7 +982,7 @@ Status DavinciModel::InitDataOp(const ComputeGraphPtr &graph, const NodePtr &nod
// op_desc Checked by Init: Data, valid. // op_desc Checked by Init: Data, valid.
auto op_desc = node->GetOpDesc(); auto op_desc = node->GetOpDesc();
if (node->GetOwnerComputeGraph() != graph) { if (node->GetOwnerComputeGraph() != graph) {
GELOGI("Skip subgraph Data node: %s.", op_desc->GetName().c_str());
GELOGI("Skip Data node: %s in subgraph.", op_desc->GetName().c_str());
return SUCCESS; return SUCCESS;
} }


@@ -1195,7 +1194,7 @@ Status DavinciModel::InitRealSizeAndShapeInfo(const ComputeGraphPtr &compute_gra
GELOGD("No need to get size and shape of netoutput in subgraph."); GELOGD("No need to get size and shape of netoutput in subgraph.");
return SUCCESS; return SUCCESS;
} }
GELOGD("Start init real size and shape info of %s.", node->GetName().c_str());
GELOGD("Start to initialize real size and shape info of %s.", node->GetName().c_str());
GetAllGearsInfo(node); GetAllGearsInfo(node);
if (is_getnext_sink_dynamic_) { if (is_getnext_sink_dynamic_) {
GE_IF_BOOL_EXEC(GetGetDynamicDimsNodeInfo(node) != SUCCESS, GE_IF_BOOL_EXEC(GetGetDynamicDimsNodeInfo(node) != SUCCESS,
@@ -1238,7 +1237,7 @@ void DavinciModel::GetAllGearsInfo(const NodePtr &node) {
} }
if (!gear_info.empty()) { if (!gear_info.empty()) {
all_gears_info_.emplace_back(gear_info); all_gears_info_.emplace_back(gear_info);
GELOGD("Init all gears info from %s, gaer info is %s", node->GetName().c_str(),
GELOGD("Init all gears info from %s, gear info is %s", node->GetName().c_str(),
formats::JoinToString(gear_info).c_str()); formats::JoinToString(gear_info).c_str());
} }
} }
@@ -1318,7 +1317,7 @@ Status DavinciModel::GetGearAndRealOutSizeInfo(const ComputeGraphPtr &graph, con


Status DavinciModel::GetRealOutputSizeOfCase(const ComputeGraphPtr &graph, size_t input_index, Status DavinciModel::GetRealOutputSizeOfCase(const ComputeGraphPtr &graph, size_t input_index,
const NodePtr &case_node) { const NodePtr &case_node) {
GELOGD("Start get output size of %s, which is %zu input to netoutput", case_node->GetName().c_str(), input_index);
GELOGD("Start to get output size of %s, which is %zu input to netoutput", case_node->GetName().c_str(), input_index);
const auto &func_desc = case_node->GetOpDesc(); const auto &func_desc = case_node->GetOpDesc();
GE_CHECK_NOTNULL(func_desc); GE_CHECK_NOTNULL(func_desc);
std::map<vector<int32_t>, int64_t> gear_and_real_out_size_info; std::map<vector<int32_t>, int64_t> gear_and_real_out_size_info;
@@ -2227,10 +2226,10 @@ void DavinciModel::CreateOutput(uint32_t index, const OpDescPtr &op_desc, InputO
dims[i] = shape.GetDim(i); dims[i] = shape.GetDim(i);
} }
} else { // FOR FORMAT_NHWC or FORMAT_NCHW } else { // FOR FORMAT_NHWC or FORMAT_NCHW
dims[0] = shape.GetDim(format == FORMAT_NHWC ? NHWC_DIM_N : NCHW_DIM_N); // 0: first dim
dims[1] = shape.GetDim(format == FORMAT_NHWC ? NHWC_DIM_C : NCHW_DIM_C); // 1: second dim
dims[2] = shape.GetDim(format == FORMAT_NHWC ? NHWC_DIM_H : NCHW_DIM_H); // 2: third dim
dims[3] = shape.GetDim(format == FORMAT_NHWC ? NHWC_DIM_W : NCHW_DIM_W); // 3: forth dim
dims[0] = shape.GetDim((format == FORMAT_NHWC) ? NHWC_DIM_N : NCHW_DIM_N); // 0: first dim
dims[1] = shape.GetDim((format == FORMAT_NHWC) ? NHWC_DIM_C : NCHW_DIM_C); // 1: second dim
dims[2] = shape.GetDim((format == FORMAT_NHWC) ? NHWC_DIM_H : NCHW_DIM_H); // 2: third dim
dims[3] = shape.GetDim((format == FORMAT_NHWC) ? NHWC_DIM_W : NCHW_DIM_W); // 3: forth dim
} }
output.shape_info.num = dims[0]; // 0: first dim output.shape_info.num = dims[0]; // 0: first dim
output.shape_info.channel = dims[1]; // 1: second dim output.shape_info.channel = dims[1]; // 1: second dim
@@ -2741,7 +2740,7 @@ Status DavinciModel::ReturnResult(uint32_t data_id, const bool rslt_flg, const b
} }


if (!has_output_node_) { if (!has_output_node_) {
GELOGW("Output tensor list is empty, model id: %u", model_id_);
GELOGW("The tensor list of output is empty, model id: %u", model_id_);
GE_CHK_STATUS(listener_->OnComputeDone(model_id_, data_id, INTERNAL_ERROR, outputs), GE_CHK_STATUS(listener_->OnComputeDone(model_id_, data_id, INTERNAL_ERROR, outputs),
"[Call][OnComputeDone] failed, model_id:%u, data_id:%u.", model_id_, data_id); "[Call][OnComputeDone] failed, model_id:%u, data_id:%u.", model_id_, data_id);
return INTERNAL_ERROR; return INTERNAL_ERROR;
@@ -3071,7 +3070,7 @@ Status DavinciModel::CreateKnownZeroCopyMap(const vector<void *> &inputs, const
GELOGI("output %zu, v addr %p, r addr %p, p addr %p", i, addr_list[i], addr, outputs[i]); GELOGI("output %zu, v addr %p, r addr %p, p addr %p", i, addr_list[i], addr, outputs[i]);
} }


GELOGI("success, known input data info size: %zu, known output data info size: %zu",
GELOGI("create map for zero copy success, known input data info size: %zu, known output data info size: %zu",
known_input_data_info_.size(), known_output_data_info_.size()); known_input_data_info_.size(), known_output_data_info_.size());
return SUCCESS; return SUCCESS;
} }
@@ -3106,12 +3105,12 @@ Status DavinciModel::UpdateKnownZeroCopyAddr(vector<void *> &total_io_addrs, boo
total_io_addrs[i] = known_output_data_info_.at(total_io_addrs[i]); total_io_addrs[i] = known_output_data_info_.at(total_io_addrs[i]);
} }
} }
GELOGI("success, total io addrs size: %zu", total_io_addrs.size());
GELOGI("update known zero copy addr success, total io addrs size: %zu", total_io_addrs.size());
return SUCCESS; return SUCCESS;
} }


Status DavinciModel::UpdateKnownNodeArgs(const vector<void *> &inputs, const vector<void *> &outputs) { Status DavinciModel::UpdateKnownNodeArgs(const vector<void *> &inputs, const vector<void *> &outputs) {
GELOGI("DavinciModel::UpdateKnownNodeArgs in");
GELOGI("DavinciModel::UpdateKnownNodeArgs begin");
GE_CHK_STATUS_RET(CreateKnownZeroCopyMap(inputs, outputs), GE_CHK_STATUS_RET(CreateKnownZeroCopyMap(inputs, outputs),
"[Call][CreateKnownZeroCopyMap] failed, model_id:%u.", model_id_); "[Call][CreateKnownZeroCopyMap] failed, model_id:%u.", model_id_);
total_io_addrs_.clear(); total_io_addrs_.clear();
@@ -3683,6 +3682,7 @@ Status DavinciModel::InitConstant(const OpDescPtr &op_desc) {
elem_num = 1; elem_num = 1;
} }
uint64_t *buff = reinterpret_cast<uint64_t *>(tensor->MutableData().data()); uint64_t *buff = reinterpret_cast<uint64_t *>(tensor->MutableData().data());
GE_CHECK_NOTNULL(buff);
if (ge::CheckInt64Uint32MulOverflow(elem_num, kBytes * kStringHeadElems) != SUCCESS) { if (ge::CheckInt64Uint32MulOverflow(elem_num, kBytes * kStringHeadElems) != SUCCESS) {
GELOGE(FAILED, "[Call][CheckInt64Uint32MulOverflow] Shape size:%ld is invalid", elem_num); GELOGE(FAILED, "[Call][CheckInt64Uint32MulOverflow] Shape size:%ld is invalid", elem_num);
return FAILED; return FAILED;


+ 1
- 1
ge/hybrid/executor/worker/execution_engine.cc View File

@@ -428,7 +428,7 @@ Status ExecutionEngine::ValidateInputTensors(const NodeState &node_state, const
continue; continue;
} }


int64_t expected_size;
int64_t expected_size = 0;
(void)TensorUtils::GetSize(*tensor_desc, expected_size); (void)TensorUtils::GetSize(*tensor_desc, expected_size);
GELOGD("[%s] Input[%d] expects [%ld] bytes.", task_context.GetNodeName(), i, expected_size); GELOGD("[%s] Input[%d] expects [%ld] bytes.", task_context.GetNodeName(), i, expected_size);
auto size_diff = expected_size - static_cast<int64_t>(input_tensor->GetSize()); auto size_diff = expected_size - static_cast<int64_t>(input_tensor->GetSize());


+ 2
- 0
ge/hybrid/model/hybrid_model_builder.cc View File

@@ -900,6 +900,7 @@ Status HybridModelBuilder::LoadGraph() {
GE_CHECK_NOTNULL(node_item); GE_CHECK_NOTNULL(node_item);
AscendString graph_name; AscendString graph_name;
GE_CHK_GRAPH_STATUS_RET(it.second->GetGraph().GetName(graph_name), "Failed to get subgraph name"); GE_CHK_GRAPH_STATUS_RET(it.second->GetGraph().GetName(graph_name), "Failed to get subgraph name");
GE_CHECK_NOTNULL(graph_name.GetString());
auto subgraph = hybrid_model_.GetRootGraph()->GetSubgraph(graph_name.GetString()); auto subgraph = hybrid_model_.GetRootGraph()->GetSubgraph(graph_name.GetString());
GE_CHECK_NOTNULL(subgraph); GE_CHECK_NOTNULL(subgraph);
GE_CHK_STATUS_RET(IdentifyVariableOutputs(*node_item, subgraph), GE_CHK_STATUS_RET(IdentifyVariableOutputs(*node_item, subgraph),
@@ -967,6 +968,7 @@ Status HybridModelBuilder::HandleDtString(const GeTensor &tensor, void *var_addr


auto &mutable_tensor = const_cast<GeTensor &>(tensor); auto &mutable_tensor = const_cast<GeTensor &>(tensor);
uint64_t *buff = reinterpret_cast<uint64_t *>(mutable_tensor.MutableData().data()); uint64_t *buff = reinterpret_cast<uint64_t *>(mutable_tensor.MutableData().data());
GE_CHECK_NOTNULL(buff);
GE_CHK_BOOL_RET_STATUS(ge::CheckInt64Uint32MulOverflow(elem_num, kBytes * kStringHeadElems) == SUCCESS, FAILED, GE_CHK_BOOL_RET_STATUS(ge::CheckInt64Uint32MulOverflow(elem_num, kBytes * kStringHeadElems) == SUCCESS, FAILED,
"[Invoke][CheckInt64Uint32MulOverflow] failed because Shape size is invalid."); "[Invoke][CheckInt64Uint32MulOverflow] failed because Shape size is invalid.");
auto offset = static_cast<uint64_t>(elem_num * kBytes * kStringHeadElems); auto offset = static_cast<uint64_t>(elem_num * kBytes * kStringHeadElems);


+ 2
- 1
ge/hybrid/node_executor/hccl/hccl_node_executor.cc View File

@@ -417,7 +417,7 @@ Status BuildGatherAllToAllParams(TaskContext &context, HcomGatherAllToAllVParams
} }
params.recvtype = iter->second; params.recvtype = iter->second;


int64_t addr_len;
int64_t addr_len = 0;
(void) ge::AttrUtils::GetInt(op_desc, "addr_length", addr_len); (void) ge::AttrUtils::GetInt(op_desc, "addr_length", addr_len);
params.addrLength = static_cast<int>(addr_len); params.addrLength = static_cast<int>(addr_len);


@@ -460,6 +460,7 @@ Status AllToAllNodeTask::ExecuteAsync(TaskContext &context, std::function<void()
return FAILED; return FAILED;
} }
HcomGatherAllToAllVParams params; HcomGatherAllToAllVParams params;
params.group = nullptr;
GE_CHK_STATUS_RET(BuildGatherAllToAllParams(context, params)); GE_CHK_STATUS_RET(BuildGatherAllToAllParams(context, params));
HcclResult hccl_ret = HcomExecEnqueueGatherAllToAllV(params, callback); HcclResult hccl_ret = HcomExecEnqueueGatherAllToAllV(params, callback);
if (hccl_ret != HCCL_SUCCESS) { if (hccl_ret != HCCL_SUCCESS) {


+ 1
- 0
ge/ir_build/ge_ir_build.cc View File

@@ -867,6 +867,7 @@ graphStatus aclgrphDumpGraph(const ge::Graph &graph, const char *file, const siz
graphStatus aclgrphGenerateForOp(const AscendString &op_type, const vector<TensorDesc> &inputs, graphStatus aclgrphGenerateForOp(const AscendString &op_type, const vector<TensorDesc> &inputs,
const vector<TensorDesc> &outputs, Graph &graph) { const vector<TensorDesc> &outputs, Graph &graph) {
ErrorManager::GetInstance().SetStage(error_message::kModelCompile, error_message::kOther); ErrorManager::GetInstance().SetStage(error_message::kModelCompile, error_message::kOther);
GE_CHECK_NOTNULL(op_type.GetString());
auto op_type_str = std::string(op_type.GetString()); auto op_type_str = std::string(op_type.GetString());
auto op_name = op_type_str + "_" + std::to_string(ge::GetCurrentTimestamp()); auto op_name = op_type_str + "_" + std::to_string(ge::GetCurrentTimestamp());
auto op_desc = ge::MakeShared<ge::OpDesc>(op_name, op_type_str); auto op_desc = ge::MakeShared<ge::OpDesc>(op_name, op_type_str);


Loading…
Cancel
Save