From 88b73dc98c3e2ce782b3b4b0f9bf86523d5807df Mon Sep 17 00:00:00 2001 From: wangxiaotian22 Date: Mon, 29 Mar 2021 11:39:01 +0800 Subject: [PATCH] add error msg --- ge/graph/load/model_manager/davinci_model.cc | 221 ++++++++++++++++++- ge/graph/manager/graph_caching_allocator.cc | 15 ++ 2 files changed, 233 insertions(+), 3 deletions(-) diff --git a/ge/graph/load/model_manager/davinci_model.cc b/ge/graph/load/model_manager/davinci_model.cc index 645d1f35..ed6e738d 100755 --- a/ge/graph/load/model_manager/davinci_model.cc +++ b/ge/graph/load/model_manager/davinci_model.cc @@ -67,6 +67,8 @@ try { \ thread_id = std::thread(func, args); \ } catch (const std::system_error &e) { \ + REPORT_CALL_ERROR("E19999", "Create thread fail, ecode:%d, emsg:%s, when DavinciModel %s", \ + e.code().value(), e.what(), __FUNCTION__); \ GELOGE(FAILED, "Caught system_error with code:%d, meaning:%s", e.code().value(), e.what()); \ GELOGE(FAILED, "Thread creat FAIL, Please check the left resource!"); \ return FAILED; \ @@ -329,6 +331,8 @@ void DavinciModel::Shrink() { Status DavinciModel::InitWeightMem(void *dev_ptr, void *weight_ptr, size_t weight_size) { if (is_weight_mem_has_inited_) { + REPORT_INNER_ERROR("E19999", "Call InitWeightMem more than once, model_id:%u, check invalid when %s", + model_id_, __FUNCTION__); GELOGE(FAILED, "call InitWeightMem more than once."); return FAILED; } @@ -339,6 +343,8 @@ Status DavinciModel::InitWeightMem(void *dev_ptr, void *weight_ptr, size_t weigh GE_CHECK_LE(weights_size, ALLOC_MEMORY_MAX_SIZE); if ((weight_ptr != nullptr) && (weight_size < weights_size)) { + REPORT_INNER_ERROR("E19999", "Param weight_ptr is nullptr or ge_model.weight.size:%zu < param weights_size:%zu, " + "model_id:%u, check invalid when %s", weight_size, weights_size, model_id_, __FUNCTION__); GELOGE(FAILED, "Invalid mem param: weight_size=%zu totalsize=%zu.", weight_size, weights_size); return FAILED; } @@ -352,6 +358,8 @@ Status DavinciModel::InitWeightMem(void *dev_ptr, void *weight_ptr, size_t weigh if (weight_ptr == nullptr) { weights_mem_base_ = MallocWeightsMem(weights_size); if (weights_mem_base_ == nullptr) { + REPORT_CALL_ERROR("E19999", "MallocWeightsMem fail, weights_size:%zu, model_id:%u, check invalid when %s", + weights_size, model_id_, __FUNCTION__); GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, "Alloc weight memory failed. size: %zu", weights_size); return ACL_ERROR_GE_MEMORY_ALLOCATION; } @@ -370,6 +378,8 @@ Status DavinciModel::InitWeightMem(void *dev_ptr, void *weight_ptr, size_t weigh Status DavinciModel::InitFeatureMapAndP2PMem(void *dev_ptr, size_t mem_size) { if (is_feature_map_mem_has_inited_) { + REPORT_INNER_ERROR("E19999", "Call InitFeatureMapMem more than once, model_id:%u, check invalid when %s", + model_id_, __FUNCTION__); GELOGE(PARAM_INVALID, "call InitFeatureMapMem more than once"); return PARAM_INVALID; } @@ -379,6 +389,8 @@ Status DavinciModel::InitFeatureMapAndP2PMem(void *dev_ptr, size_t mem_size) { std::size_t p2p_data_size = P2PMemInfos().at(RT_MEMORY_P2P_DDR).memory_size; if ((dev_ptr != nullptr) && (mem_size < TotalMemSize())) { + REPORT_INNER_ERROR("E19999", "Param dev_ptr is nullptr or mem_size:%zu < ge_model.mem_size:%zu, " + "model_id:%u, check invalid when %s", mem_size, TotalMemSize(), model_id_, __FUNCTION__); GELOGE(PARAM_INVALID, "Invalid mem param: mem_size=%zu totalsize=%zu.", mem_size, TotalMemSize()); return PARAM_INVALID; } @@ -390,6 +402,8 @@ Status DavinciModel::InitFeatureMapAndP2PMem(void *dev_ptr, size_t mem_size) { if (TotalMemSize() && mem_base_ == nullptr) { mem_base_ = MallocFeatureMapMem(data_size); if (mem_base_ == nullptr) { + REPORT_CALL_ERROR("E19999", "MallocFeatureMapMem fail, data_size:%zu, model_id:%u, check invalid when %s", + data_size, model_id_, __FUNCTION__); GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, "Alloc feature map memory failed. size: %zu", data_size); return ACL_ERROR_GE_MEMORY_ALLOCATION; } @@ -406,6 +420,8 @@ Status DavinciModel::InitFeatureMapAndP2PMem(void *dev_ptr, size_t mem_size) { if (p2p_data_size != 0) { p2p_mem_base_ = MallocP2PMem(p2p_data_size); if (p2p_mem_base_ == nullptr) { + REPORT_CALL_ERROR("E19999", "MallocFeatureMapMem fail, p2p_data_size:%zu, model_id:%u, check invalid when %s", + p2p_data_size, model_id_, __FUNCTION__); GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, "Alloc p2p memory failed,size: %zu", p2p_data_size); return ACL_ERROR_GE_MEMORY_ALLOCATION; } @@ -427,6 +443,8 @@ Status DavinciModel::InitVariableMem() { if (TotalVarMemSize() && (var_mem_base_ == nullptr)) { Status ret = VarManager::Instance(session_id_)->MallocVarMemory(TotalVarMemSize()); if (ret != SUCCESS) { + REPORT_CALL_ERROR("E19999", "MallocVarMemory fail, var_size:%zu, model_id:%u, check invalid when %s", + TotalVarMemSize(), model_id_, __FUNCTION__); GELOGE(ret, "Malloc variable memory failed."); return ret; } @@ -567,6 +585,8 @@ Status DavinciModel::SetTSDevice() { GELOGD("SetTSDevice: %u.", core_type); rtError_t rt_ret = rtSetTSDevice(core_type); if (rt_ret != RT_ERROR_NONE) { + REPORT_CALL_ERROR("E19999", "Call rtSetTSDevice failed, core_type:%u, model_id:%u, when DavinciModel %s", + core_type, model_id_, __FUNCTION__); GELOGE(RT_FAILED, "SetTSDevice failed, ret: 0x%X", rt_ret); return RT_ERROR_TO_GE_STATUS(rt_ret); } @@ -575,7 +595,7 @@ Status DavinciModel::SetTSDevice() { Status DavinciModel::OpDebugRegister() { if (GetDumpProperties().IsOpDebugOpen()) { - uint32_t op_debug_mode = GetDumpProperties().GetOpDebugMode(); + uint32_t op_debug_mode = GetDumpProperties().GetOpDebugMode(); auto ret = opdebug_register_.RegisterDebugForModel(rt_model_handle_, op_debug_mode, data_dumper_); if (ret != SUCCESS) { GELOGE(ret,"Register known shape op debug failed, ret: 0x%X",ret); @@ -885,6 +905,8 @@ Status DavinciModel::InitNodes(const ComputeGraphPtr &compute_graph) { rtError_t rt_ret = rtMemcpy(addr, size, tensor_device_addrs.data(), size, RT_MEMCPY_HOST_TO_DEVICE); if (rt_ret != RT_ERROR_NONE) { + REPORT_CALL_ERROR("E19999", "Call rtMemcpy failed, size:%zu, ret: 0x%X when DavinciModel %s", + size, rt_ret, __FUNCTION__); GELOGE(RT_FAILED, "rtMemcpy error, ret: 0x%X", rt_ret); GE_CHK_RT(rtFree(addr)); return RT_ERROR_TO_GE_STATUS(rt_ret); @@ -959,6 +981,11 @@ Status DavinciModel::InitDataOp(const ComputeGraphPtr &graph, const NodePtr &nod const vector output_offset_list = op_desc->GetOutputOffset(); if (output_size_list.empty() || virtual_addr_list.empty() || (output_size_list.size() != virtual_addr_list.size()) || (output_offset_list.size() != virtual_addr_list.size())) { + REPORT_INNER_ERROR( + "E19999", "Check data fail in op:%s(%s), output_desc size:%zu output addr size:%zu output offset size:%zu " + "not equal or has empty, model_id:%u, when DavinciModel %s", + op_desc->GetName().c_str(), op_desc->GetType().c_str(), + output_size_list.size(), virtual_addr_list.size(), output_offset_list.size(), model_id_, __FUNCTION__); GELOGE(PARAM_INVALID, "Data[%s] init failed: output size is %zu, virtual_addr size is %zu, offset size is %zu.", op_desc->GetName().c_str(), output_size_list.size(), virtual_addr_list.size(), output_offset_list.size()); return PARAM_INVALID; @@ -1076,12 +1103,22 @@ Status DavinciModel::InitNetOutput(const ComputeGraphPtr &graph, const NodePtr & const vector virtual_addr_list = ModelUtils::GetInputDataAddrs(runtime_param_, op_desc); const vector input_offset_list = op_desc->GetInputOffset(); GE_IF_BOOL_EXEC(input_offset_list.size() != virtual_addr_list.size(), - GELOGE(PARAM_INVALID, "virtual_addr size should be equal to offset size."); return PARAM_INVALID;); + REPORT_INNER_ERROR( + "E19999", "Check data fail in op:%s(%s), input addr size:%zu input offset size:%zu " + "not equal, model_id:%u, when DavinciModel %s", + op_desc->GetName().c_str(), op_desc->GetType().c_str(), + virtual_addr_list.size(), input_offset_list.size(), model_id_, __FUNCTION__); + GELOGE(PARAM_INVALID, "virtual_addr size should be equal to offset size."); + return PARAM_INVALID;); if (input_size_list.empty() && virtual_addr_list.empty()) { GELOGI("NetOutput[%s] is empty.", op_desc->GetName().c_str()); return SUCCESS; } if (input_size_list.empty() || input_size_list.size() != virtual_addr_list.size()) { + REPORT_INNER_ERROR( + "E19999", "Check data fail in op:%s(%s), input_desc size:%zu input addr size:%zu not equal or has empty, " + "model_id:%u, when DavinciModel %s", op_desc->GetName().c_str(), op_desc->GetType().c_str(), + input_size_list.size(), virtual_addr_list.size(), model_id_, __FUNCTION__); GELOGE(PARAM_INVALID, "NetOutput[%s] init failed: Input size is %zu, Input addr is %zu", op_desc->GetName().c_str(), input_size_list.size(), virtual_addr_list.size()); return PARAM_INVALID; @@ -1179,6 +1216,9 @@ Status DavinciModel::GetGetDynamicDimsNodeInfo(const NodePtr &node) { auto in_anchor = node->GetAllInDataAnchors().at(get_dynamic_dims_index); auto peer_out_anchor = in_anchor->GetPeerOutAnchor(); if (peer_out_anchor == nullptr) { + REPORT_INNER_ERROR("E19999", "In anchor index:%zu in op:%s(%s) peer anchor is nullptr, model_id:%u, check invalid " + "when DavinciModel %s", get_dynamic_dims_index, + node->GetName().c_str(), node->GetType().c_str(), model_id_, __FUNCTION__); GELOGE(PARAM_INVALID, "Out anchor of getdynmaicdims node should not be nullptr."); return PARAM_INVALID; } @@ -1190,12 +1230,18 @@ Status DavinciModel::GetGetDynamicDimsNodeInfo(const NodePtr &node) { auto input_addr = ModelUtils::GetInputDataAddrs(runtime_param_, node->GetOpDesc()); auto input_size = ModelUtils::GetInputSize(node->GetOpDesc()); if (input_addr.empty() || input_size.empty()) { + REPORT_INNER_ERROR("E19999", "input_addr size:%zu or input_length size:%zu in op:%s(%s) has empty, model_id:%u " + "check invalid when DavinciModel %s", input_addr.size(), input_size.size(), + node->GetName().c_str(), node->GetType().c_str(), model_id_, __FUNCTION__); GELOGE(PARAM_INVALID, "Not set output of %s", op_desc->GetName().c_str()); return PARAM_INVALID; } auto input_desc = node->GetOpDesc()->GetInputDescPtr(get_dynamic_dims_index); GE_CHECK_NOTNULL(input_desc); if (input_desc->GetShape().GetDims().empty()) { + REPORT_INNER_ERROR("E19999", "input_desc_index:%zu in op:%s(%s) shape dim is empty, model_id:%u, check invalid " + "when DavinciModel %s", get_dynamic_dims_index, + node->GetName().c_str(), node->GetType().c_str(), model_id_, __FUNCTION__); GELOGE(PARAM_INVALID, "Not set output desc shape of %s.", op_desc->GetName().c_str()); return PARAM_INVALID; } @@ -1240,6 +1286,8 @@ Status DavinciModel::GetRealOutputSizeOfCase(const ComputeGraphPtr &graph, size_ for (const auto &name : func_desc->GetSubgraphInstanceNames()) { const auto &subgraph = graph->GetSubgraph(name); if (subgraph == nullptr) { + REPORT_INNER_ERROR("E19999", "Get name:%s subgraph in graph:%s fail, model_id:%u, check invalid " + "when DavinciModel %s", name.c_str(), graph->GetName().c_str(), model_id_, __FUNCTION__); GELOGE(GE_GRAPH_EMPTY_SUBGRAPH, "Subgraph not found, name: %s.", name.c_str()); return GE_GRAPH_EMPTY_SUBGRAPH; } @@ -1252,6 +1300,10 @@ Status DavinciModel::GetRealOutputSizeOfCase(const ComputeGraphPtr &graph, size_ size_t batch_index = static_cast(stoi(batch_label.substr(batch_label.rfind('_') + 1))); GELOGD("Batch index of %s is %zu.", op_desc->GetName().c_str(), batch_index); if (batch_index > all_gears_info_.size()) { + REPORT_INNER_ERROR("E19999", "Batch_index:%zu in op:%s(%s) > all_gears_info.size:%zu, model_id:%u, " + "check invalid when DavinciModel %s", batch_index, + op_desc->GetName().c_str(), op_desc->GetType().c_str(), + all_gears_info_.size(), model_id_, __FUNCTION__); GELOGE(PARAM_INVALID, "The value of ATTR_NAME_BATCH_LABEL is invalid."); return PARAM_INVALID; } @@ -1261,6 +1313,9 @@ Status DavinciModel::GetRealOutputSizeOfCase(const ComputeGraphPtr &graph, size_ GE_CHECK_NOTNULL(tensor_desc); int64_t data_size = 0; if (TensorUtils::GetTensorSizeInBytes(*tensor_desc, data_size) != GRAPH_SUCCESS) { + REPORT_INNER_ERROR("E19999", "Get input TensorSize in op:%s(%s) failed, input_index:%zu, model_id:%u" + "when DavinciModel %s", op_desc->GetName().c_str(), op_desc->GetType().c_str(), + input_index, model_id_, __FUNCTION__); GELOGE(FAILED, "Get tensor size in bytes failed."); return FAILED; } @@ -1302,6 +1357,9 @@ Status DavinciModel::GetGearAndRealOutShapeInfo(const ComputeGraphPtr &graph, co for (auto &it : dynamic_output_shape) { auto gear_index = static_cast(it[0]); if (gear_index > all_gears_info_.size()) { + REPORT_INNER_ERROR("E19999", "gear index:%zu in op:%s(%s) > all_gears_info.size:%zu in model:%u check invalid" + "when DavinciModel %s", gear_index, op_desc->GetName().c_str(), op_desc->GetType().c_str(), + all_gears_info_.size(), model_id_, __FUNCTION__); GELOGE(PARAM_INVALID, "The value of cur index: %zu is invalid.", static_cast(it[0])); return PARAM_INVALID; } @@ -1350,6 +1408,8 @@ Status DavinciModel::GetLabelGotoAddr(uint32_t label_index, rtMemType_t mem_type } if (label_index >= label_list_.size()) { + REPORT_INNER_ERROR("E19999", "Param label index:%u >= label_list_.size:%zu in model:%u, check invalid" + "when DavinciModel %s", label_index, label_list_.size(), model_id_, __FUNCTION__); GELOGE(INTERNAL_ERROR, "Invalid label id:%u, label size:%zu", label_index, label_list_.size()); return INTERNAL_ERROR; } @@ -1359,6 +1419,8 @@ Status DavinciModel::GetLabelGotoAddr(uint32_t label_index, rtMemType_t mem_type arg_size = label_used.size() * sizeof(rtLabelDevInfo); rtError_t rt_ret = rtMalloc(&arg_addr, arg_size, mem_type); if (rt_ret != RT_ERROR_NONE) { + REPORT_CALL_ERROR("E19999", "Call rtMalloc failed, size:%u, ret: 0x%X when DavinciModel %s", + arg_size, rt_ret, __FUNCTION__); GELOGE(RT_FAILED, "Call rtMalloc failed, error: %#x", rt_ret); return RT_ERROR_TO_GE_STATUS(rt_ret); } @@ -1366,6 +1428,7 @@ Status DavinciModel::GetLabelGotoAddr(uint32_t label_index, rtMemType_t mem_type label_goto_args_[label_index] = { arg_addr, arg_size }; rt_ret = rtLabelListCpy(label_used.data(), label_used.size(), arg_addr, arg_size); if (rt_ret != RT_ERROR_NONE) { + REPORT_CALL_ERROR("E19999", "Call rtLabelListCpy failed, ret: 0x%X when DavinciModel %s", rt_ret, __FUNCTION__); GELOGE(RT_FAILED, "Call rtLabelListCpy failed, error: %#x", rt_ret); return RT_ERROR_TO_GE_STATUS(rt_ret); } @@ -1380,15 +1443,24 @@ Status DavinciModel::GetLabelGotoAddr(uint32_t label_index, rtMemType_t mem_type Status DavinciModel::InitLabelSet(const OpDescPtr &op_desc) { uint32_t label_index = 0; if (!AttrUtils::GetInt(op_desc, ATTR_NAME_LABEL_SWITCH_INDEX, label_index)) { + REPORT_INNER_ERROR("E19999", "Get Attr:%s in op:%s(%s) fail, model_id:%u, check invalid" + "when DavinciModel %s", ATTR_NAME_LABEL_SWITCH_INDEX.c_str(), + op_desc->GetName().c_str(), op_desc->GetType().c_str(), model_id_, __FUNCTION__); GELOGE(INTERNAL_ERROR, "InitLabelSet: %s attr [%s] not exist.", op_desc->GetName().c_str(), ATTR_NAME_LABEL_SWITCH_INDEX.c_str()); return INTERNAL_ERROR; } if (label_index >= LabelNum()) { + REPORT_INNER_ERROR("E19999", "label_switch_index:%u in op:%s(%s) >= label_num:%u in model:%u, check invalid" + "when DavinciModel %s", label_index, op_desc->GetName().c_str(), op_desc->GetType().c_str(), + LabelNum(), model_id_, __FUNCTION__); GELOGE(INTERNAL_ERROR, "InitLabelSet: label index: %u >= label size: %u.", label_index, LabelNum()); return INTERNAL_ERROR; } if (label_id_indication_.count(label_index) > 0) { + REPORT_INNER_ERROR("E19999", "label_switch_index:%u in op:%s(%s) is already used in model:%u, check invalid" + "when DavinciModel %s", label_index, op_desc->GetName().c_str(), op_desc->GetType().c_str(), + model_id_, __FUNCTION__); GELOGE(INTERNAL_ERROR, "InitLabelSet: %s label index: %u already used.", op_desc->GetName().c_str(), label_index); return INTERNAL_ERROR; } @@ -1400,6 +1472,9 @@ Status DavinciModel::InitLabelSet(const OpDescPtr &op_desc) { } else if (stream_list_.size() > stream_id) { stream = stream_list_[stream_id]; } else { + REPORT_INNER_ERROR("E19999", "stream_id:%u in op:%s(%s) >= stream size:%zu in model:%u, check invalid" + "when DavinciModel %s", stream_id, op_desc->GetName().c_str(), op_desc->GetType().c_str(), + stream_list_.size(), model_id_, __FUNCTION__); GELOGE(INTERNAL_ERROR, "InitLabelSet: stream index: %u >= stream size: %zu.", stream_id, stream_list_.size()); return INTERNAL_ERROR; } @@ -1407,6 +1482,7 @@ Status DavinciModel::InitLabelSet(const OpDescPtr &op_desc) { rtLabel_t rt_label = nullptr; rtError_t rt_error = rtLabelCreateExV2(&rt_label, rt_model_handle_, stream); if (rt_error != RT_ERROR_NONE || rt_label == nullptr) { + REPORT_CALL_ERROR("E19999", "Call rtLabelCreateExV2 failed, ret: 0x%X when DavinciModel %s", rt_error, __FUNCTION__); GELOGE(INTERNAL_ERROR, "InitLabelSet: %s create label failed, error=0x%x.", op_desc->GetName().c_str(), rt_error); return INTERNAL_ERROR; } @@ -1445,6 +1521,9 @@ Status DavinciModel::InitVariable(const OpDescPtr &op_desc, map &input_queue_ids, const std::vector &output_queue_ids) { if (input_queue_ids.empty() && output_queue_ids.empty()) { + REPORT_INNER_ERROR("E19999", "Param input_queue_ids.size:%zu or output_queue_ids.size:%zu is empty, model_id:%u," + "check invalid when DavinciModel %s", input_queue_ids.size(), output_queue_ids.size(), + model_id_, __FUNCTION__); GELOGE(ACL_ERROR_GE_EXEC_MODEL_QUEUE_ID_INVALID, "Param is empty"); return ACL_ERROR_GE_EXEC_MODEL_QUEUE_ID_INVALID; } @@ -1467,12 +1546,18 @@ Status DavinciModel::LoadWithQueue() { } if (input_queue_ids_.size() != input_data_info_.size()) { + REPORT_INNER_ERROR("E19999", "Param input_queue_ids_.size:%zu != input_data_info_.size:%zu, model_id:%u," + "check invalid when DavinciModel %s", input_queue_ids_.size(), input_data_info_.size(), + model_id_, __FUNCTION__); GELOGE(ACL_ERROR_GE_EXEC_MODEL_QUEUE_ID_INVALID, "Input queue ids not match model: input_queue=%zu input_data=%zu", input_queue_ids_.size(), input_data_info_.size()); return ACL_ERROR_GE_EXEC_MODEL_QUEUE_ID_INVALID; } if (output_queue_ids_.size() != output_data_info_.size()) { + REPORT_INNER_ERROR("E19999", "Param output_queue_ids_.size:%zu != output_data_info_.size:%zu, model_id:%u," + "check invalid when DavinciModel %s", output_queue_ids_.size(), output_data_info_.size(), + model_id_, __FUNCTION__); GELOGE(ACL_ERROR_GE_EXEC_MODEL_QUEUE_ID_INVALID, "Output queue ids not match model: output_queue=%zu output_data=%zu", output_queue_ids_.size(), output_data_info_.size()); @@ -1504,6 +1589,7 @@ Status DavinciModel::BindInputQueue() { for (size_t i = 0; i < input_queue_ids_.size(); ++i) { auto it = input_data_info_.find(i); if (it == input_data_info_.end()) { + GELOGE(FAILED, "Input not match: tensor num=%zu, Queue id index=%zu", input_data_info_.size(), i); return FAILED; } @@ -1520,6 +1606,7 @@ Status DavinciModel::BindInputQueue() { rtError_t rt_ret = rtModelBindQueue(rt_model_handle_, queue_id, RT_MODEL_INPUT_QUEUE); if (rt_ret != RT_ERROR_NONE) { + REPORT_CALL_ERROR("E19999", "Call rtModelBindQueue failed, ret: 0x%X when DavinciModel %s", rt_ret, __FUNCTION__); GELOGE(RT_FAILED, "Call rtModelBindQueue failed, ret: 0x%X", rt_ret); return RT_ERROR_TO_GE_STATUS(rt_ret); } @@ -1540,6 +1627,8 @@ Status DavinciModel::CpuModelDequeue(uint32_t queue_id) { GELOGI("Set CpuKernel model dequeue task enter."); std::shared_ptr dequeue_task = MakeShared(rt_entry_stream_); if (dequeue_task == nullptr) { + REPORT_INNER_ERROR("E19999", "New CpuTaskModelDequeue failed, model_id:%u, when DavinciModel %s", + model_id_, __FUNCTION__); GELOGE(MEMALLOC_FAILED, "Make CpuTaskModelDequeue task failed."); return MEMALLOC_FAILED; } @@ -1562,6 +1651,8 @@ Status DavinciModel::CpuTaskModelZeroCopy(std::vector &mbuf_list, GELOGI("Set CpuKernel model zero_copy task enter."); std::shared_ptr zero_copy = MakeShared(rt_entry_stream_); if (zero_copy == nullptr) { + REPORT_INNER_ERROR("E19999", "New CpuTaskZeroCopy failed, model_id:%u, when DavinciModel %s", + model_id_, __FUNCTION__); GELOGE(MEMALLOC_FAILED, "Make CpuTaskZeroCopy task failed."); return MEMALLOC_FAILED; } @@ -1584,12 +1675,16 @@ Status DavinciModel::BindOutputQueue() { for (size_t i = 0; i < output_queue_ids_.size(); ++i) { auto it = output_data_info_.find(i); if (it == output_data_info_.end()) { + REPORT_INNER_ERROR("E19999", "Index:%zu can't find in output_data_info_ size:%zu in model_id:%u, check invalid " + "when DavinciModel %s", i, output_data_info_.size(), model_id_, __FUNCTION__); GELOGE(FAILED, "Output not match: tensor num=%zu, Queue id index=%zu", output_data_info_.size(), i); return FAILED; } uint32_t queue_id = output_queue_ids_[i]; if (it->second.GetDataInfo().empty()) { + REPORT_INNER_ERROR("E19999", "Index:%zu out_data_info in model:%u is empty, check invalid " + "when DavinciModel %s", i, model_id_, __FUNCTION__); GELOGE(INTERNAL_ERROR, "the %zu output_queue not set data_info.", i); return INTERNAL_ERROR; } @@ -1600,6 +1695,8 @@ Status DavinciModel::BindOutputQueue() { rtError_t rt_ret = rtModelBindQueue(rt_model_handle_, queue_id, RT_MODEL_OUTPUT_QUEUE); if (rt_ret != RT_ERROR_NONE) { + REPORT_CALL_ERROR("E19999", "Call rtModelBindQueue failed, queue_id:%u, ret: 0x%X when DavinciModel %s", + queue_id, rt_ret, __FUNCTION__); GELOGE(RT_FAILED, "Call rtModelBindQueue failed, ret: 0x%X", rt_ret); return RT_ERROR_TO_GE_STATUS(rt_ret); } @@ -1621,12 +1718,16 @@ Status DavinciModel::BindOutputQueue() { Status DavinciModel::CpuModelPrepareOutput(uintptr_t addr, uint32_t size) { GELOGI("Set CpuKernel model enqueue task enter."); if (input_mbuf_list_.empty()) { + REPORT_INNER_ERROR("E19999", "input_mbuf_list_ is empty, model_id:%u, check invalid when %s", + model_id_, __FUNCTION__); GELOGE(FAILED, "Need input mbuf for fill output mbuf head info."); return FAILED; } std::shared_ptr prepare_output = MakeShared(rt_entry_stream_); if (prepare_output == nullptr) { + REPORT_INNER_ERROR("E19999", "New CpuTaskPrepareOutput failed, model_id:%u, when DavinciModel %s", + model_id_, __FUNCTION__); GELOGE(MEMALLOC_FAILED, "Make CpuTaskPrepareOutput task failed."); return MEMALLOC_FAILED; } @@ -1651,6 +1752,8 @@ Status DavinciModel::CpuActiveStream() { GELOGI("Set CpuKernel active stream task enter."); std::shared_ptr active_entry = MakeShared(rt_entry_stream_); if (active_entry == nullptr) { + REPORT_INNER_ERROR("E19999", "New CpuTaskActiveEntry failed, model_id:%u, when DavinciModel %s", + model_id_, __FUNCTION__); GELOGE(MEMALLOC_FAILED, "Make CpuTaskActiveEntry task failed."); return MEMALLOC_FAILED; } @@ -1672,6 +1775,8 @@ Status DavinciModel::CpuWaitEndGraph() { GELOGI("Set CpuKernel wait end graph task enter."); std::shared_ptr wait_endgraph = MakeShared(rt_entry_stream_); if (wait_endgraph == nullptr) { + REPORT_INNER_ERROR("E19999", "New CpuTaskWaitEndGraph failed, model_id:%u, when DavinciModel %s", + model_id_, __FUNCTION__); GELOGE(MEMALLOC_FAILED, "Make CpuTaskWaitEndGraph task failed."); return MEMALLOC_FAILED; } @@ -1690,6 +1795,8 @@ Status DavinciModel::BindEnqueue() { for (size_t i = 0; i < output_queue_ids_.size(); ++i) { auto it = output_data_info_.find(i); if (it == output_data_info_.end()) { + REPORT_INNER_ERROR("E19999", "Index:%zu can't find in output_data_info_ size:%zu in model_id:%u, check invalid " + "when DavinciModel %s", i, output_data_info_.size(), model_id_, __FUNCTION__); GELOGE(FAILED, "Output not match: tensor num=%zu, Queue id index=%zu", output_data_info_.size(), i); return FAILED; } @@ -1706,6 +1813,8 @@ Status DavinciModel::CpuModelEnqueue(uint32_t queue_id, uintptr_t out_mbuf) { GELOGI("Set CpuKernel model enqueue task enter."); std::shared_ptr model_enqueue = MakeShared(rt_entry_stream_); if (model_enqueue == nullptr) { + REPORT_INNER_ERROR("E19999", "New CpuTaskModelEnqueue failed, model_id:%u, when DavinciModel %s", + model_id_, __FUNCTION__); GELOGE(MEMALLOC_FAILED, "Make CpuTaskModelEnqueue task failed."); return MEMALLOC_FAILED; } @@ -1726,6 +1835,8 @@ Status DavinciModel::CpuModelRepeat() { GELOGI("Set CpuKernel repeat task enter."); std::shared_ptr model_repeat = MakeShared(rt_entry_stream_); if (model_repeat == nullptr) { + REPORT_INNER_ERROR("E19999", "New CpuTaskModelRepeat failed, model_id:%u, when DavinciModel %s", + model_id_, __FUNCTION__); GELOGE(MEMALLOC_FAILED, "Make CpuTaskModelRepeat task failed."); return MEMALLOC_FAILED; } @@ -1759,6 +1870,8 @@ Status DavinciModel::GetInputOutputDescInfo(vector &input_d vector &input_formats, vector &output_formats, bool by_dims) { if (input_addrs_list_.empty() || input_addrs_list_[0].size() != 1) { + REPORT_INNER_ERROR("E19999", "input_addrs_list_ is empty or first member size != 1, model_id:%u, " + "check invalid when DavinciModel %s", model_id_, __FUNCTION__); GELOGE(FAILED, "OP List Pointer is null or input_desc size is not 1!"); return FAILED; } @@ -1869,6 +1982,9 @@ Status DavinciModel::InitAippType(uint32_t index, const OpDescPtr &op_desc, cons } else if (data_mode == "dynamic_aipp_conf") { aipp_type = DYNAMIC_AIPP_NODE; } else { + REPORT_INNER_ERROR("E19999", "Attr:%s data_mode:%s in op:%s(%s), model_id:%u, check invalid when DavinciModel %s", + ATTR_DATA_RELATED_AIPP_MODE.c_str(), data_mode.c_str(), + op_desc->GetName().c_str(), op_desc->GetType().c_str(), model_id_, __FUNCTION__); GELOGE(ACL_ERROR_GE_AIPP_MODE_INVALID, "The info of aipp releated info %s is invalid with index %u.", data_mode.c_str(), index); return ACL_ERROR_GE_AIPP_MODE_INVALID; @@ -2018,7 +2134,11 @@ Status DavinciModel::GetInputDescInfo(vector &input_descs, void DavinciModel::CreateOutput(uint32_t index, const OpDescPtr &op_desc, InputOutputDescInfo &output, uint32_t &format_result) { /// netoutput input tensor desc - GE_IF_BOOL_EXEC(op_desc->GetInputDescPtr(index) == nullptr, GELOGE(FAILED, "OpDesc GetInputDescPtr is nullptr"); + GE_IF_BOOL_EXEC(op_desc->GetInputDescPtr(index) == nullptr, + REPORT_INNER_ERROR("E19999", "input_desc index:%u in op:%s(%s) not exist, model_id:%u, " + "check invalid when DavinciModel %s", index, + op_desc->GetName().c_str(), op_desc->GetType().c_str(), model_id_, __FUNCTION__); + GELOGE(FAILED, "OpDesc GetInputDescPtr is nullptr"); return); Format format = op_desc->GetInputDescPtr(index)->GetFormat(); GeShape shape = op_desc->GetInputDescPtr(index)->GetShape(); @@ -2108,6 +2228,8 @@ Status DavinciModel::CopyInputData(const InputData &input_data, bool device_data const std::vector &blobs = input_data.blobs; for (const auto &data : input_data_info_) { if (data.first >= blobs.size()) { + REPORT_INNER_ERROR("E19999", "index:%u in input_data_info_ >= input_data.blobs.size:%zu, model_id:%u, " + "check invalid when DavinciModel %s", data.first, blobs.size(), model_id_, __FUNCTION__); GELOGE(FAILED, "Blobs not match: blobs=%zu, tensor=%zu, index=%u, size=%ld, op_name(%s)", blobs.size(), input_data_info_.size(), data.first, data.second.GetDataInfo().at(0).first, data.second.GetOpName().c_str()); @@ -2257,8 +2379,12 @@ Status DavinciModel::SinkModelProfile() { try { reported_data = model_load_info.dump(kInteval, ' ', false, Json::error_handler_t::ignore); } catch (std::exception &e) { + REPORT_INNER_ERROR("E19999", "Convert model_load_info JSON to string failed, model_id:%u, reason:%s, " + "when DavinciModel %s", model_id_, e.what(), __FUNCTION__); GELOGE(FAILED, "Failed to convert JSON to string, reason: %s.", e.what()); } catch (...) { + REPORT_INNER_ERROR("E19999", "Convert model_load_info JSON to string failed, model_id:%u, when DavinciModel %s", + model_id_, __FUNCTION__); GELOGE(FAILED, "Failed to convert JSON to string."); } reported_data.append(",") @@ -2293,8 +2419,12 @@ Status DavinciModel::SinkTimeProfile(const InputData ¤t_data) { try { reported_data = model_time_info.dump(kInteval, ' ', false, Json::error_handler_t::ignore); } catch (std::exception &e) { + REPORT_INNER_ERROR("E19999", "Convert model_time_info JSON to string failed, model_id:%u, reason:%s, " + "when DavinciModel %s", model_id_, e.what(), __FUNCTION__); GELOGE(FAILED, "Failed to convert JSON to string, reason: %s.", e.what()); } catch (...) { + REPORT_INNER_ERROR("E19999", "Convert model_time_info JSON to string failed, model_id:%u, when DavinciModel %s", + model_id_, __FUNCTION__); GELOGE(FAILED, "Failed to convert JSON to string."); } reported_data.append(",") @@ -2361,6 +2491,9 @@ Status DavinciModel::CopyOutputData(uint32_t data_id, OutputData &output_data, r output_data.index = data_id; output_data.model_id = model_id_; if (output_data.blobs.size() != output_data_info_.size()) { + REPORT_INNER_ERROR("E19999", "output_data.blobs.size:%zu != output_data_info.size:%zu, model_id:%u, " + "check invalid when DavinciModel %s", + output_data.blobs.size(), output_data_info_.size(), model_id_, __FUNCTION__); GELOGE(FAILED, "Output data buffer num=%zu not equal model data num=%zu", output_data.blobs.size(), output_data_info_.size()); return FAILED; @@ -2370,6 +2503,8 @@ Status DavinciModel::CopyOutputData(uint32_t data_id, OutputData &output_data, r size_t idx = 0; for (const auto &output : output_data_info_) { if (output.first >= blobs.size()) { + REPORT_INNER_ERROR("E19999", "index:%u in output_data_info_ >= output_data.blobs.size:%zu, model_id:%u, " + "check invalid when DavinciModel %s", output.first, blobs.size(), model_id_, __FUNCTION__); GELOGE(FAILED, "Blobs not match: blobs=%zu, tensor=%zu, index=%u, size=%ld", blobs.size(), input_data_info_.size(), output.first, output.second.GetDataInfo().at(0).first); return FAILED; @@ -2388,6 +2523,9 @@ Status DavinciModel::CopyOutputData(uint32_t data_id, OutputData &output_data, r if (is_dynamic_) { GELOGI("No need to check output data size."); } else if (buffer.length < mem_size) { + REPORT_INNER_ERROR("E19999", "Buffer.length:%lu in output blob < mem_size:%lu in output_data_info, index:%u, " + "model_id:%u, check invalid when DavinciModel %s", buffer.length, mem_size, output.first, + model_id_, __FUNCTION__); GELOGE(FAILED, "Tensor data size=%lu, buffer size=%lu", mem_size, buffer.length); return FAILED; } else if (buffer.length > mem_size) { @@ -2424,6 +2562,10 @@ Status DavinciModel::InitOutputTensorInfo(const OpDescPtr &op_desc) { GE_CHECK_NOTNULL(input_desc); auto ret = TensorUtils::GetTensorSizeInBytes(*input_desc, size); GE_IF_BOOL_EXEC(ret != GRAPH_SUCCESS, + REPORT_INNER_ERROR("E19999", "Get input TensorSize in op:%s(%s) failed, input_index:%zu, " + "model_id:%u when DavinciModel %s", + op_desc->GetName().c_str(), op_desc->GetType().c_str(), i, + model_id_, __FUNCTION__); GELOGE(ret, "Get size from TensorDesc failed, op:%s, input id:%zu", op_desc->GetName().c_str(), i); return ret); const GeShape &shape = input_desc->GetShape(); @@ -2466,6 +2608,8 @@ Status DavinciModel::GenOutputTensorInfo(OutputData *output_data, vector data_buf(new (std::nothrow) uint8_t[output_buffer_size[i]]); if (data_buf == nullptr) { + REPORT_INNER_ERROR("E19999", "New buffer failed, size:%ld, model_id:%u when DavinciModel %s", + output_buffer_size[i], model_id_, __FUNCTION__); GELOGE(GE_GRAPH_MALLOC_FAILED, "Malloc buffer failed."); return GE_GRAPH_MALLOC_FAILED; } @@ -2577,6 +2721,7 @@ void *DavinciModel::Run(DavinciModel *model) { GELOGI("Model Run thread start, model_id:%u.", model_id); rtError_t rt_ret = rtSetDevice(static_cast(device_id)); if (rt_ret != RT_ERROR_NONE) { + GELOGE(FAILED, "Model run rtsetdevice failed."); return nullptr; } @@ -2953,6 +3098,8 @@ Status DavinciModel::MallocKnownArgs() { if (total_args_size_ != 0) { rt_ret = rtMalloc(&args_, total_args_size_, RT_MEMORY_HBM); if (rt_ret != RT_ERROR_NONE) { + REPORT_CALL_ERROR("E19999", "Call rtMalloc failed, size:%u, ret: 0x%X when DavinciModel %s", + total_args_size_, rt_ret, __FUNCTION__); GELOGE(RT_FAILED, "Call rtMalloc failed, ret: 0x%X", rt_ret); return RT_ERROR_TO_GE_STATUS(rt_ret); } @@ -2961,6 +3108,8 @@ Status DavinciModel::MallocKnownArgs() { if (total_hybrid_args_size_ != 0) { rt_ret = rtMalloc(&hybrid_addrs_, total_hybrid_args_size_, RT_MEMORY_HBM); if (rt_ret != RT_ERROR_NONE) { + REPORT_CALL_ERROR("E19999", "Call rtMalloc failed, size:%u, ret: 0x%X when DavinciModel %s", + total_hybrid_args_size_, rt_ret, __FUNCTION__); GELOGE(RT_FAILED, "Call rtMalloc failed, ret: 0x%X", rt_ret); return RT_ERROR_TO_GE_STATUS(rt_ret); } @@ -2970,6 +3119,8 @@ Status DavinciModel::MallocKnownArgs() { GELOGI("Begin to allocate fixed addr."); rt_ret = rtMalloc(&fixed_addrs_, total_fixed_addr_size_, RT_MEMORY_HBM); if (rt_ret != RT_ERROR_NONE) { + REPORT_CALL_ERROR("E19999", "Call rtMalloc failed, size:%u, ret: 0x%X when DavinciModel %s", + total_hybrid_args_size_, rt_ret, __FUNCTION__); GELOGE(RT_FAILED, "Call rtMalloc failed, ret: 0x%X", rt_ret); return RT_ERROR_TO_GE_STATUS(rt_ret); } @@ -3213,6 +3364,9 @@ bool DavinciModel::CheckInputAndModelSize(const int64_t &input_size, const int64 } // The input and model input size can not be exactly equal because user input is not definite. if ((input_size + kDataMemAlignSizeCompare) < op_size) { + REPORT_INNER_ERROR("E19999", "input size:%ld from user add align:%u > input_op_size:%ld in model, model_id:%u, " + "check invalid when DavinciModel %s", + input_size, kDataMemAlignSizeCompare, op_size, model_id_, __FUNCTION__); GELOGE(ACL_ERROR_GE_PARAM_INVALID, "Input size [%ld] can not be smaller than op size [%ld] after 64-byte alignment", input_size, op_size); return false; @@ -3262,6 +3416,9 @@ Status DavinciModel::CopyModelData(const InputData &input_data, OutputData &outp Status DavinciModel::UpdateIoTaskArgs(const std::map &data_info, bool is_input, const vector &blobs, bool is_dynamic, const string &batch_label) { if (blobs.size() != data_info.size()) { + REPORT_INNER_ERROR("E19999", "%s blob size:%ld from user != op_size:%ld in model, mode_id:%u" + "check invalid when DavinciModel %s", input_or_output.c_str(), + blobs.size(), data_info.size(), model_id_, __FUNCTION__); GELOGE(ACL_ERROR_GE_PARAM_INVALID, "Verify %s data num failed: model requires %zu, but user actually feeds %zu", is_input ? "input" : "output", data_info.size(), blobs.size()); return ACL_ERROR_GE_PARAM_INVALID; @@ -3269,6 +3426,9 @@ Status DavinciModel::UpdateIoTaskArgs(const std::map & for (const auto &data : data_info) { if (data.first >= blobs.size()) { // check data index. + REPORT_INNER_ERROR("E19999", "%s data index:%u from model >= blobs.size:%zu from user, mode_id:%u" + "check invalid when DavinciModel %s", input_or_output.c_str(), + data.first, blobs.size(), model_id_, __FUNCTION__); GELOGE(ACL_ERROR_GE_PARAM_INVALID, "Verify %s data num failed: can not find No.%u data, because user only feeds %zu", is_input ? "input" : "output", data.first, blobs.size()); @@ -3277,6 +3437,9 @@ Status DavinciModel::UpdateIoTaskArgs(const std::map & const DataBuffer &buffer = blobs[data.first]; // index of data. if (buffer.data == nullptr) { + REPORT_INNER_ERROR("E19999", "%s buffer from user is nullptr, index:%u, mode_id:%u" + "check invalid when DavinciModel %s", input_or_output.c_str(), + data.first, model_id_, __FUNCTION__); GELOGE(ACL_ERROR_GE_PARAM_INVALID, "data_buf.data is nullptr, index=%u", data.first); return ACL_ERROR_GE_PARAM_INVALID; } @@ -3294,6 +3457,8 @@ Status DavinciModel::UpdateIoTaskArgs(const std::map & GELOGI("[IMAS] Find addr %p need direct copy from user malloc input %p", basic_addr, buffer.data); rtError_t rt_ret = rtMemcpy(basic_addr, data_size, buffer.data, buffer.length, RT_MEMCPY_DEVICE_TO_DEVICE); if (rt_ret != RT_ERROR_NONE) { + REPORT_CALL_ERROR("E19999", "Call rtMemcpy failed, size:%lu, model_id:%u, when DavinciModel %s", + data_size, model_id_, __FUNCTION__); GELOGE(rt_ret, "Non-zero copy data node copy failed"); return RT_ERROR_TO_GE_STATUS(rt_ret); } @@ -3375,11 +3540,18 @@ Status DavinciModel::InitConstant(const OpDescPtr &op_desc) { auto v_output_size = ModelUtils::GetOutputSize(op_desc); auto v_output_addr = ModelUtils::GetOutputDataAddrs(runtime_param_, op_desc); GE_IF_BOOL_EXEC(v_weights.empty() || v_output_size.empty() || v_output_addr.empty(), + REPORT_INNER_ERROR("E19999", "weight.size:%zu output_length.size:%zu output_addr.size:%zu in " + "op:%s(%s) has empty, model_id:%u, check invalid when DavinciModel %s", + v_weights.size(),v_output_size.size(), v_output_addr.size(), + op_desc->GetName().c_str(), op_desc->GetType().c_str() ,model_id_, __FUNCTION__); GELOGE(PARAM_INVALID, "const op:%s not set output", op_desc->GetName().c_str()); return PARAM_INVALID;); GeTensor *tensor = const_cast(v_weights[0].get()); GE_IF_BOOL_EXEC(static_cast(v_output_size[0]) < tensor->GetData().size(), + REPORT_INNER_ERROR("E19999", "Output size:%zu < weight size:%zu in op:%s(%s) model_id:%u, " + "check invalid when DavinciModel %s", v_output_size[0], tensor->GetData().size(), + op_desc->GetName().c_str(), op_desc->GetType().c_str() ,model_id_, __FUNCTION__); GELOGE(PARAM_INVALID, "output size:%ld less than weight data size:%zu", v_output_size[0], tensor->GetData().size()); return PARAM_INVALID;); @@ -3429,6 +3601,8 @@ Status DavinciModel::InitTbeHandle(const OpDescPtr &op_desc) { auto kernel = ge_model_->GetTBEKernelStore().FindKernel(op_desc->GetName()); auto tbe_kernel = (kernel != nullptr) ? kernel : op_desc->TryGetExtAttr(OP_EXTATTR_NAME_TBE_KERNEL, TBEKernelPtr()); if (tbe_kernel == nullptr) { + REPORT_INNER_ERROR("E19999", "Get tbe_kernel for op:%s(%s) fail, model_id:%u, when DavinciModel %s", + op_desc->GetName().c_str(), op_desc->GetType().c_str() ,model_id_, __FUNCTION__); GELOGE(INTERNAL_ERROR, "TBE: %s can't find tvm bin file!", op_desc->GetName().c_str()); return INTERNAL_ERROR; } @@ -3455,6 +3629,9 @@ Status DavinciModel::InitTbeHandle(const OpDescPtr &op_desc) { } else if (json_string == "RT_DEV_BINARY_MAGIC_ELF_AIVEC") { binary.magic = RT_DEV_BINARY_MAGIC_ELF_AIVEC; } else { + REPORT_INNER_ERROR("E19999", "Attr:%s value:%s in op:%s(%s), model_id:%u, check invalid when DavinciModel %s", + TVM_ATTR_NAME_MAGIC.c_str(), json_string.c_str(), + op_desc->GetName().c_str(), op_desc->GetType().c_str() ,model_id_, __FUNCTION__); GELOGE(PARAM_INVALID, "TBE: Invalid parameter magic number! json: %s", json_string.c_str()); return PARAM_INVALID; } @@ -3544,6 +3721,11 @@ Status DavinciModel::InitStreamSwitch(const OpDescPtr &op_desc) { GE_LOGI_IF(!ge::AttrUtils::GetListInt(op_desc, ATTR_NAME_ACTIVE_STREAM_LIST, active_stream_list), "GetInt ACTIVE_STREAM_LIST failed."); if (active_stream_list.size() != kTrueBranchStreamNum) { + REPORT_INNER_ERROR("E19999", "Attr:%s active_stream_list.size:%zu in op:%s(%s) != kTrueBranchStreamNum:%u, " + "model_id:%u, check invalid when DavinciModel %s", + ATTR_NAME_ACTIVE_STREAM_LIST.c_str(), active_stream_list.size(), + op_desc->GetName().c_str(), op_desc->GetType().c_str(), + kTrueBranchStreamNum, model_id_, __FUNCTION__); GELOGE(INTERNAL_ERROR, "Stream num of switch true branch must be %u.", kTrueBranchStreamNum); return INTERNAL_ERROR; } @@ -3558,6 +3740,9 @@ Status DavinciModel::InitStreamSwitch(const OpDescPtr &op_desc) { Status DavinciModel::InitStreamSwitchN(const OpDescPtr &op_desc) { std::vector active_stream_list; if (!AttrUtils::GetListInt(op_desc, ATTR_NAME_ACTIVE_STREAM_LIST, active_stream_list)) { + REPORT_INNER_ERROR("E19999", "Get Attr:%s from op:%s(%s) fail, model_id:%u, when DavinciModel %s", + ATTR_NAME_ACTIVE_STREAM_LIST.c_str(), + op_desc->GetName().c_str(), op_desc->GetType().c_str(), model_id_, __FUNCTION__); GELOGE(INTERNAL_ERROR, "StreamSwitchNOp get attr ACTIVE_STREAM failed."); return INTERNAL_ERROR; } @@ -3569,6 +3754,9 @@ Status DavinciModel::InitStreamSwitchN(const OpDescPtr &op_desc) { uint32_t batch_num = 0; if (!AttrUtils::GetInt(op_desc, ATTR_NAME_BATCH_NUM, batch_num)) { + REPORT_INNER_ERROR("E19999", "Get Attr:%s from op:%s(%s) fail, model_id:%u, when DavinciModel %s", + ATTR_NAME_BATCH_NUM.c_str(), + op_desc->GetName().c_str(), op_desc->GetType().c_str(), model_id_, __FUNCTION__); GELOGE(FAILED, "Failed to get attr ATTR_NAME_BATCH_NUM, StreamSwitchN: %s.", op_desc->GetName().c_str()); return FAILED; } @@ -3586,6 +3774,9 @@ Status DavinciModel::SetDynamicBatchInfo(const OpDescPtr &op_desc, uint32_t batc std::vector batch_shape; const std::string attr_name = ATTR_NAME_PRED_VALUE + "_" + std::to_string(i); if (!AttrUtils::GetListInt(op_desc, attr_name, batch_shape)) { + REPORT_INNER_ERROR("E19999", "Get Attr:%s from op:%s(%s) fail, model_id:%u, when DavinciModel %s", + attr_name.c_str(), + op_desc->GetName().c_str(), op_desc->GetType().c_str(), model_id_, __FUNCTION__); GELOGE(FAILED, "Get attr ATTR_NAME_PRED_VALUE failed, Node: %s", op_desc->GetName().c_str()); batch_info_.clear(); return FAILED; @@ -3714,6 +3905,8 @@ Status DavinciModel::NnExecute(rtStream_t stream, bool async_mode, const InputDa // Add active entry stream for special env. Status DavinciModel::AddHeadStream() { if (active_stream_list_.empty()) { + REPORT_INNER_ERROR("E19999", "active_stream_list is empty in model:%u, check invalid when DavinciModel %s", + model_id_, __FUNCTION__); GELOGE(INTERNAL_ERROR, "Active stream is empty, stream list size: %zu, stream indication size: %zu.", stream_list_.size(), active_stream_indication_.size()); return INTERNAL_ERROR; @@ -3733,6 +3926,8 @@ Status DavinciModel::AddHeadStream() { for (auto s : active_stream_list_) { std::shared_ptr active_entry = MakeShared(rt_head_stream_); if (active_entry == nullptr) { + REPORT_INNER_ERROR("E19999", "New CpuTaskActiveEntry failed, model_id:%u, when DavinciModel %s", + model_id_, __FUNCTION__); GELOGE(MEMALLOC_FAILED, "Make CpuTaskActiveEntry task failed."); return MEMALLOC_FAILED; } @@ -3864,6 +4059,8 @@ Status DavinciModel::TransAllVarData(ComputeGraphPtr &graph, uint32_t graph_id) rtContext_t ctx = nullptr; rtError_t rt_ret = rtCtxGetCurrent(&ctx); if (rt_ret != RT_ERROR_NONE) { + REPORT_CALL_ERROR("E19999", "Call rtCtxGetCurrent failed, model_id:%u, when DavinciModel %s", + model_id_, __FUNCTION__); GELOGE(RT_FAILED, "Failed to get current context, error_code is: 0x%X.", rt_ret); return RT_ERROR_TO_GE_STATUS(rt_ret); } @@ -3894,6 +4091,7 @@ void DavinciModel::SetDataDumperArgs(const ComputeGraphPtr &graph, const map infos = ge::StringUtils::Split(input, ':'); if (infos.size() != kAippInfoNum) { + REPORT_INNER_ERROR("E19999", "Attr:%s in op:%s(%s), aipp input size:%zu != kAippInfoNum:%u, model_id:%u, " + "check invalid when DavinciModel %s", ATTR_NAME_AIPP_INPUTS.c_str(), + op_desc->GetName().c_str(), op_desc->GetType().c_str(), infos.size(), kAippInfoNum, + model_id_, __FUNCTION__); GELOGE(ACL_ERROR_GE_AIPP_MODE_INVALID, "origin input str is invalid[%zu, %u].", infos.size(), kAippInfoNum); return ACL_ERROR_GE_AIPP_MODE_INVALID; } @@ -3976,6 +4178,8 @@ Status DavinciModel::InitOrigInputInfo(uint32_t index, const OpDescPtr &op_desc) Status DavinciModel::GetOrigInputInfo(uint32_t index, OriginInputInfo &orig_input_info) const { const auto it = orig_input_info_.find(index); if (it == orig_input_info_.end()) { + REPORT_INNER_ERROR("E19999", "Get index:%u from orig_input_info_ fail, model_id:%u, when DavinciModel %s", + index, model_id_, __FUNCTION__); GELOGE(ACL_ERROR_GE_AIPP_NOT_EXIST, "There is not AIPP related with index %u.", index); return ACL_ERROR_GE_AIPP_NOT_EXIST; } @@ -3992,6 +4196,9 @@ void DavinciModel::ParseAIPPInfo(std::string in_out_info, InputOutputDims &dims_ GELOGI("ParseAIPPInfo: origin str: %s", in_out_info.c_str()); std::vector infos = ge::StringUtils::Split(in_out_info, ':'); if (infos.size() != kAippInfoNum) { + REPORT_INNER_ERROR("E19999", "in_out_info:%s size:%zu != kAippInfoNum:%u, model_id:%u, " + "check invalid when DavinciModel %s", in_out_info.c_str(), infos.size(), kAippInfoNum, + model_id_, __FUNCTION__); GELOGE(ACL_ERROR_GE_AIPP_MODE_INVALID, "origin input str is invalid[%zu, %u].", infos.size(), kAippInfoNum); return; } @@ -4054,7 +4261,13 @@ Status DavinciModel::GetAllAippInputOutputDims(uint32_t index, vector &output_dims) const { const auto it = aipp_dims_info_.find(index); if (it == aipp_dims_info_.end()) { +<<<<<<< Updated upstream GELOGE(ACL_ERROR_GE_AIPP_NOT_EXIST, "There is not AIPP related with index %u.", index); +======= + REPORT_INNER_ERROR("E19999", "Get index:%u from aipp_dims_info_ fail, model_id:%u, when DavinciModel %s", + index, model_id_, __FUNCTION__); + GELOGE(ACL_ERROR_GE_AIPP_NOT_EXIST, "there is not AIPP related with index %u.", index); +>>>>>>> Stashed changes return ACL_ERROR_GE_AIPP_NOT_EXIST; } @@ -4085,6 +4298,8 @@ Status DavinciModel::InitL1DataDumperArgs() { if (rtDumpAddrSet(rt_model_handle_, l1_fusion_addr_, kDumpL1FusionOpMByteSize, kDumpFlagOfL1Fusion) != RT_ERROR_NONE) { // l1_fusion_addr_ will be free when DavinciModel destruct + REPORT_CALL_ERROR("E19999", "Call rtDumpAddrSet failed, model_id:%u, when DavinciModel %s", + model_id_, __FUNCTION__); GELOGE(FAILED, "Call rtDumpAddrSet failed"); return FAILED; } diff --git a/ge/graph/manager/graph_caching_allocator.cc b/ge/graph/manager/graph_caching_allocator.cc index cc8bd90d..e87a2a0f 100644 --- a/ge/graph/manager/graph_caching_allocator.cc +++ b/ge/graph/manager/graph_caching_allocator.cc @@ -111,6 +111,7 @@ Status CachingAllocator::Initialize(uint32_t device_id) { } auto bin_ptr = new (std::nothrow) BlockBin(BlockComparator); if (bin_ptr == nullptr) { + REPORT_CALL_ERROR("E19999", "New BlockBin fail, device_id:%u, when CachingAllocator %s", device_id, __FUNCTION__); GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, "Alloc BlockBin failed."); return ACL_ERROR_GE_MEMORY_ALLOCATION; } @@ -146,6 +147,8 @@ uint8_t *CachingAllocator::Malloc(size_t size, uint8_t *org_ptr, uint32_t device ptr = block->ptr; } if (ptr == nullptr) { + REPORT_INNER_ERROR("E19999", "FindFreeBlock fail, size:%zu, device_id:%u, when CachingAllocator %s", + size, device_id, __FUNCTION__); GELOGE(FAILED, "Malloc failed device id = %u, size= %zu", device_id, size); } return ptr; @@ -154,6 +157,8 @@ uint8_t *CachingAllocator::Malloc(size_t size, uint8_t *org_ptr, uint32_t device Status CachingAllocator::Free(uint8_t *ptr, uint32_t device_id) { GELOGI("Free device id = %u", device_id); if (ptr == nullptr) { + REPORT_INNER_ERROR("E19999", "Param ptr is nullptr, device_id:%u, check invalid when CachingAllocator %s", + device_id, __FUNCTION__); GELOGE(PARAM_INVALID, "Invalid memory pointer"); return ge::PARAM_INVALID; } @@ -161,6 +166,8 @@ Status CachingAllocator::Free(uint8_t *ptr, uint32_t device_id) { std::lock_guard lock(mutex_); auto it = allocated_blocks_.find(ptr); if (it == allocated_blocks_.end()) { + REPORT_INNER_ERROR("E19999", "Param ptr not allocated before, device_id:%u, check invalid when CachingAllocator %s", + device_id, __FUNCTION__); GELOGE(PARAM_INVALID, "Invalid memory pointer"); return ge::PARAM_INVALID; } @@ -218,6 +225,8 @@ Block *CachingAllocator::FindFreeBlock(size_t size, uint8_t *org_ptr, uint32_t d Block key(device_id, size, org_ptr); BlockBin *bin = GetBlockBin(size); if (bin == nullptr) { + REPORT_INNER_ERROR("E19999", "GetBlockBin fail, size:%zu, device_id:%u, when CachingAllocator %s", + size, device_id, __FUNCTION__); GELOGE(ge::FAILED, "Get block bin failed size = %zu", size); return nullptr; } @@ -249,6 +258,8 @@ Block *CachingAllocator::SplitBlock(Block *block, size_t size, BlockBin &bin, ui Block *remaining = block; Block *new_block = new (std::nothrow) Block(device_id, size, &bin, block->ptr); if (new_block == nullptr) { + REPORT_CALL_ERROR("E19999", "New Block fail, size:%zu, device_id:%u, when CachingAllocator %s", + size, device_id, __FUNCTION__); GELOGE(ge::FAILED, "Alloc block failed size = %zu", size); return block; } @@ -293,11 +304,15 @@ Status CachingAllocator::TryExtendCache(size_t size, uint32_t device_id) { Status CachingAllocator::AddToBlockBin(uint8_t *ptr, size_t size, uint32_t device_id) { BlockBin *bin = GetBlockBin(size); if (bin == nullptr) { + REPORT_INNER_ERROR("E19999", "GetBlockBin fail, size:%zu, device_id:%u, when CachingAllocator %s", + size, device_id, __FUNCTION__); GELOGE(ge::FAILED, "Get block bin failed size = %zu", size); return ge::FAILED; } Block *block = new (std::nothrow) Block(device_id, size, bin, nullptr); if (block == nullptr) { + REPORT_CALL_ERROR("E19999", "New Block fail, size:%zu, device_id:%u, when CachingAllocator %s", + size, device_id, __FUNCTION__); GELOGE(ge::FAILED, "Alloc block failed size = %zu", size); return ge::FAILED; }