@@ -38,9 +38,11 @@ AllocationAttr::AllocationAttr(void *try_reuse_addr) : AllocationAttr(0, try_reu | |||
NpuMemoryAllocator *NpuMemoryAllocator::GetAllocator() { | |||
int32_t device_id = 0; | |||
if (rtGetDevice(&device_id) != RT_ERROR_NONE) { | |||
GELOGE(RT_FAILED, "[Get][Device] Failed."); | |||
REPORT_INNER_ERROR("E19999", "rtGetDevice failed when %s.", __FUNCTION__); | |||
auto rt_result = rtGetDevice(&device_id); | |||
if (rt_result != RT_ERROR_NONE) { | |||
GELOGE(RT_FAILED, "[Get][Device] Failed, result:%d.", rt_result); | |||
REPORT_INNER_ERROR("E19999", "rtGetDevice failed when NpuMemoryAllocator %s, result:%d.", | |||
__FUNCTION__, rt_result); | |||
return nullptr; | |||
} | |||
@@ -58,9 +60,9 @@ void *NpuMemoryAllocator::Allocate(std::size_t size, AllocationAttr *attr) { | |||
} | |||
if (allocate_size == 0) { | |||
GELOGE(MEMALLOC_FAILED, "[Check][Param:size_t]Memory size is 0, device_id = %u, size = %zu.", | |||
GELOGE(MEMALLOC_FAILED, "[Check][Param:size_t]Memory size is 0, device_id = %u, size = %zu.", | |||
device_id_, allocate_size); | |||
REPORT_INNER_ERROR("E19999", "Memory size is 0, device_id = %u, size = %zu when %s.", | |||
REPORT_INNER_ERROR("E19999", "Memory size is 0, device_id = %u, size = %zu when %s.", | |||
device_id_, allocate_size, __FUNCTION__); | |||
return nullptr; | |||
} | |||
@@ -72,9 +74,9 @@ void *NpuMemoryAllocator::Allocate(std::size_t size, AllocationAttr *attr) { | |||
buffer = MemManager::Instance().HostMemInstance(RT_MEMORY_HBM).Malloc(allocate_size); | |||
} else { | |||
if (allocate_size > kMaxHbmMemorySize) { | |||
GELOGE(PARAM_INVALID, "[Check][Param:size_t]Invalid HBM memory size: %zu bigger than limit:%lu, check invalid.", | |||
GELOGE(PARAM_INVALID, "[Check][Param:size_t]Invalid HBM memory size: %zu bigger than limit:%lu, check invalid.", | |||
allocate_size, kMaxHbmMemorySize); | |||
REPORT_CALL_ERROR("E19999", "Invalid HBM memory size: %zu bigger than limit:%lu, check invalid when %s.", | |||
REPORT_CALL_ERROR("E19999", "Invalid HBM memory size: %zu bigger than limit:%lu, check invalid when %s.", | |||
allocate_size, kMaxHbmMemorySize, __FUNCTION__); | |||
return nullptr; | |||
} | |||
@@ -94,9 +96,9 @@ void *NpuMemoryAllocator::Allocate(std::size_t size, AllocationAttr *attr) { | |||
.Malloc(allocate_size, reinterpret_cast<uint8_t *>(try_reuse_addr), device_id_); | |||
} | |||
if (buffer == nullptr) { | |||
GELOGE(MEMALLOC_FAILED, "[Malloc][Memory] Failed, device_id = %u, size = %zu.", | |||
GELOGE(MEMALLOC_FAILED, "[Malloc][Memory] Failed, device_id = %u, size = %zu.", | |||
device_id_, allocate_size); | |||
REPORT_CALL_ERROR("E19999", "malloc memory failed, device_id = %u, size = %zu when %s.", | |||
REPORT_CALL_ERROR("E19999", "malloc memory failed, device_id = %u, size = %zu when %s.", | |||
device_id_, allocate_size, __FUNCTION__); | |||
return nullptr; | |||
} | |||
@@ -59,9 +59,8 @@ Status GraphExecutionContext::Synchronize(rtStream_t rt_stream) { | |||
return SUCCESS; | |||
} | |||
GELOGE(RT_FAILED, | |||
"[Invoke][rtStreamSynchronize] failed, ret = %d", rt_ret); | |||
REPORT_CALL_ERROR("E19999", | |||
GELOGE(RT_FAILED, "[Invoke][rtStreamSynchronize] failed, ret = %d", rt_ret); | |||
REPORT_CALL_ERROR("E19999", | |||
"invoke rtStreamSynchronize failed when GraphExecutionContext %s, ret = %d", __FUNCTION__, rt_ret); | |||
return RT_FAILED; | |||
} | |||
@@ -51,10 +51,10 @@ void HybridModelAsyncExecutor::SetModelName(const string &model_name) { | |||
} | |||
Status HybridModelAsyncExecutor::EnqueueData(const shared_ptr<InputDataWrapper> &data) { | |||
if(data_inputer_->Push(data) != SUCCESS){ | |||
if (data_inputer_->Push(data) != SUCCESS) { | |||
REPORT_CALL_ERROR("E19999", "Data queue is full, please call again later when %s, model_id %u.", | |||
__FUNCTION__, model_id_); | |||
GELOGE(domi::DATA_QUEUE_ISFULL, | |||
GELOGE(domi::DATA_QUEUE_ISFULL, | |||
"[Push][Data] Data queue is full, please call again later, model_id %u ", model_id_); | |||
return domi::DATA_QUEUE_ISFULL; | |||
} | |||
@@ -65,9 +65,9 @@ Status HybridModelAsyncExecutor::EnqueueData(const shared_ptr<InputDataWrapper> | |||
Status HybridModelAsyncExecutor::Start(const std::shared_ptr<ModelListener> &listener) { | |||
GELOGD("HybridModelExecutor::Start IN, has listener = %d", listener != nullptr); | |||
std::lock_guard<std::mutex> lk(mu_); | |||
if(run_flag_){ | |||
REPORT_INNER_ERROR("E19999", "Model already started when HybridModelAsyncExecutor %s, model_id:%u.", | |||
__FUNCTION__, model_id_); | |||
if (run_flag_) { | |||
REPORT_INNER_ERROR("E19999", | |||
"Model already started when HybridModelAsyncExecutor %s, model_id:%u.", __FUNCTION__, model_id_); | |||
GELOGE(INTERNAL_ERROR, "[Check][RunState] Model already started, model_id:%u.", model_id_); | |||
return INTERNAL_ERROR; | |||
} | |||
@@ -80,8 +80,8 @@ Status HybridModelAsyncExecutor::Start(const std::shared_ptr<ModelListener> &lis | |||
return RunInternal(); | |||
}); | |||
GE_CHK_BOOL_RET_STATUS(future_.valid(), INTERNAL_ERROR, | |||
"[Check][RunState] Failed to start, model_id:%u.", model_id_); | |||
GE_CHK_BOOL_RET_STATUS(future_.valid(), INTERNAL_ERROR, | |||
"[Check][RunState] Failed to start, model_id:%u.", model_id_); | |||
GELOGD("HybridModelExecutor::Start successfully"); | |||
return SUCCESS; | |||
} | |||
@@ -115,8 +115,8 @@ Status HybridModelAsyncExecutor::Init() { | |||
executor_ = std::unique_ptr<HybridModelExecutor>(new(std::nothrow) HybridModelExecutor(model_, device_id_, stream_)); | |||
GE_CHECK_NOTNULL(executor_); | |||
GE_CHK_STATUS_RET(executor_->Init(), | |||
"[Init][HybridModelExecutor] failed, model_id:%u.", model_id_); | |||
GE_CHK_STATUS_RET(executor_->Init(), | |||
"[Init][HybridModelExecutor] failed, model_id:%u.", model_id_); | |||
GE_CHK_STATUS_RET(DumpOpDebug(), "[Dump][OpDebug] failed, model_id:%u.", model_id_); | |||
GELOGI("HybridModel stage nums:%zu", model_->GetRootGraphItem()->NumGroups()); | |||
@@ -124,8 +124,8 @@ Status HybridModelAsyncExecutor::Init() { | |||
pipe_executor_ = | |||
std::unique_ptr<HybridModelPipelineExecutor>(new(std::nothrow) HybridModelPipelineExecutor(model_, device_id_)); | |||
GE_CHECK_NOTNULL(pipe_executor_); | |||
GE_CHK_STATUS_RET(pipe_executor_->Init(), | |||
"[Init][HybridModelPipelineExecutor] failed, model_id:%u.", model_id_); | |||
GE_CHK_STATUS_RET(pipe_executor_->Init(), | |||
"[Init][HybridModelPipelineExecutor] failed, model_id:%u.", model_id_); | |||
} | |||
GE_CHK_STATUS_RET(InitInputDesc(), "[Init][InputDesc] failed, model_id:%u.", model_id_); | |||
@@ -136,8 +136,8 @@ Status HybridModelAsyncExecutor::Init() { | |||
Status HybridModelAsyncExecutor::PreRun(InputData ¤t_data, HybridModelExecutor::ExecuteArgs &args) { | |||
GE_CHK_STATUS_RET(SyncVarData(), "[Invoke][SyncVarData] failed, model_id:%u.", model_id_); | |||
RECORD_MODEL_EXECUTION_EVENT(executor_->GetContext(), "[SyncVarData] End"); | |||
GE_CHK_STATUS_RET(PrepareInputs(current_data, args), | |||
"[Invoke][PrepareInputs] failed to copy input data to model, model_id:%u.", model_id_); | |||
GE_CHK_STATUS_RET(PrepareInputs(current_data, args), | |||
"[Invoke][PrepareInputs] failed to copy input data to model, model_id:%u.", model_id_); | |||
RECORD_MODEL_EXECUTION_EVENT(executor_->GetContext(), "[CopyInputData] End"); | |||
return SUCCESS; | |||
} | |||
@@ -213,7 +213,7 @@ Status HybridModelAsyncExecutor::HandleResult(Status exec_ret, | |||
if (exec_ret != SUCCESS) { | |||
GELOGE(exec_ret, "[Check][Param:Status] failed to execute graph. model_id = %u", model_id_); | |||
REPORT_INNER_ERROR("E19999", | |||
REPORT_INNER_ERROR("E19999", | |||
"failed to execute graph when HybridModelAsyncExecutor %s. model_id = %u", __FUNCTION__, model_id_); | |||
return OnComputeDone(data_id, INTERNAL_ERROR, output_tensor_info_list); | |||
} | |||
@@ -250,10 +250,10 @@ Status HybridModelAsyncExecutor::SyncVarData() { | |||
Status HybridModelAsyncExecutor::PrepareInputs(const InputData ¤t_data, HybridModelExecutor::ExecuteArgs &args) { | |||
if (current_data.blobs.size() < input_tensor_desc_.size()) { | |||
GELOGE(PARAM_INVALID, | |||
GELOGE(PARAM_INVALID, | |||
"[Check][Size]Blob size mismatches, expect at least %zu, but got %zu, model_id = %u", | |||
input_tensor_desc_.size(), current_data.blobs.size(), model_id_); | |||
REPORT_INNER_ERROR("E19999", | |||
REPORT_INNER_ERROR("E19999", | |||
"Blob size mismatches, expect at least %zu, but got %zu when HybridModelAsyncExecutor %s, model_id = %u.", | |||
input_tensor_desc_.size(), current_data.blobs.size(), __FUNCTION__, model_id_); | |||
return PARAM_INVALID; | |||
@@ -267,12 +267,12 @@ Status HybridModelAsyncExecutor::PrepareInputs(const InputData ¤t_data, Hy | |||
auto tensor_size = input_sizes_[input_index]; | |||
if (is_input_dynamic_[input_index]) { | |||
if (input_index >= current_data.shapes.size()) { | |||
GELOGE(PARAM_INVALID, | |||
GELOGE(PARAM_INVALID, | |||
"[Check][Range]Shape index out of range, index = %zu, shape size = %zu model_id = %u.", | |||
input_index, current_data.shapes.size(), model_id_); | |||
REPORT_INNER_ERROR("E19999", | |||
REPORT_INNER_ERROR("E19999", | |||
"Shape index out of range, index = %zu, shape size = %zu when HybridModelAsyncExecutor %s, model_id = %u.", | |||
input_index, current_data.shapes.size(), __FUNCTION__, model_id_); | |||
input_index, current_data.shapes.size(), __FUNCTION__, model_id_); | |||
return PARAM_INVALID; | |||
} | |||
auto &tensor_desc = input_tensor_desc_[input_index]; | |||
@@ -287,12 +287,12 @@ Status HybridModelAsyncExecutor::PrepareInputs(const InputData ¤t_data, Hy | |||
} | |||
// range[k].second can be -1 | |||
if (shape.GetDim(k) < range[k].first || (range[k].second >= 0 && shape.GetDim(k) > range[k].second)) { | |||
GELOGE(PARAM_INVALID, | |||
GELOGE(PARAM_INVALID, | |||
"[Check][Range]Dim out of range, shape idx = %zu, dim idx = %zu, dim = %ld, range = [%ld, %ld], model_id = %u.", | |||
input_index, k, shape.GetDim(k), range[k].first, range[k].second, model_id_); | |||
REPORT_INNER_ERROR("E19999", | |||
REPORT_INNER_ERROR("E19999", | |||
"Dim out of range, shape idx = %zu, dim idx = %zu, dim = %ld, range = [%ld, %ld], model_id = %u.", | |||
input_index, k, shape.GetDim(k), range[k].first, range[k].second, model_id_); | |||
input_index, k, shape.GetDim(k), range[k].first, range[k].second, model_id_); | |||
return PARAM_INVALID; | |||
} | |||
} | |||
@@ -318,14 +318,14 @@ Status HybridModelAsyncExecutor::PrepareInputs(const InputData ¤t_data, Hy | |||
const DataBuffer &data_buf = blobs[input_index]; | |||
auto mem_size = static_cast<uint64_t>(tensor_size); | |||
if(mem_size < data_buf.length){ | |||
REPORT_INNER_ERROR("E19999", | |||
if (mem_size < data_buf.length) { | |||
REPORT_INNER_ERROR("E19999", | |||
"input data size(%lu) does not match model required size(%lu) when %s, ret failed, model_id = %u.", | |||
data_buf.length, mem_size, __FUNCTION__, model_id_); | |||
GELOGE(PARAM_INVALID, | |||
"[Check][Size]input data size(%lu) does not match model required size(%lu), ret failed, model_id = %u.", | |||
data_buf.length, mem_size, model_id_); | |||
return PARAM_INVALID; | |||
return PARAM_INVALID; | |||
} | |||
if (data_buf.length > 0) { | |||
GELOGI("[IMAS]CopyPlainData memcpy graph_%u type[F] output[%zu] memaddr[%p] mem_size[%zu] datasize[%lu]", | |||
@@ -433,7 +433,7 @@ Status HybridModelAsyncExecutor::CopyOutputs(HybridModelExecutor::ExecuteArgs &a | |||
GELOGE(INTERNAL_ERROR, | |||
"[Check][Size]output[%zu] tensor size(%zu) is not enough for output shape [%s], model_id = %u.", | |||
i, output_tensor.GetSize(), tensor_desc->GetShape().ToString().c_str(), model_id_); | |||
REPORT_INNER_ERROR("E19999", | |||
REPORT_INNER_ERROR("E19999", | |||
"output[%zu] tensor size(%zu) is not enough for output shape [%s] model_id = %u," | |||
" when HybridModelAsyncExecutor %s.", | |||
i, output_tensor.GetSize(), tensor_desc->GetShape().ToString().c_str(), model_id_, __FUNCTION__); | |||
@@ -513,14 +513,14 @@ Status HybridModelAsyncExecutor::Execute(const vector<GeTensor> &inputs, vector< | |||
} | |||
HybridModelExecutor::ExecuteArgs args; | |||
GE_CHK_STATUS_RET(PrepareInputs(input_data, args), | |||
GE_CHK_STATUS_RET(PrepareInputs(input_data, args), | |||
"[Invoke][PrepareInputs]Failed to copy input data to model, model_id = %u", model_id_); | |||
GELOGD("Done copying input data successfully."); | |||
GE_CHK_STATUS_RET(executor_->Execute(args), "[Invoke][Execute] Failed, model_id = %u.", model_id_); | |||
std::vector<ge::OutputTensorInfo> output_tensor_info_list; | |||
OutputData output_data; | |||
GE_CHK_STATUS_RET(CopyOutputs(args, &output_data, output_tensor_info_list), | |||
GE_CHK_STATUS_RET(CopyOutputs(args, &output_data, output_tensor_info_list), | |||
"[Invoke][CopyOutputs]Failed to copy outputs, model_id = %u.", model_id_); | |||
GELOGD("Done copying output data successfully. output count = %zu", output_tensor_info_list.size()); | |||
@@ -572,7 +572,7 @@ Status HybridModelAsyncExecutor::DumpOpDebug() { | |||
loop_cond = const_cast<void *>(varible_loop_cond->GetData()); | |||
} | |||
data_dumper_.SetLoopAddr(global_step, loop_per_iter, loop_cond); | |||
GE_CHK_STATUS_RET(data_dumper_.LoadDumpInfo(), | |||
GE_CHK_STATUS_RET(data_dumper_.LoadDumpInfo(), | |||
"[Invoke][LoadDumpInfo] failed in hybrid engine, model_id = %u.", model_id_); | |||
GELOGD("Dump op debug SUCCESS in hybrid engine"); | |||
} | |||
@@ -59,9 +59,9 @@ Status StageExecutor::Start(const std::vector<TensorValue> &inputs, const std::v | |||
task_queue_.Pop(task_info); | |||
GELOGD("[Executor: %d] Got task, stage = %d, iteration = %ld", id_, task_info.stage, task_info.iteration); | |||
if (task_info.iteration >= pipe_config_->iteration_end) { | |||
GELOGE(INTERNAL_ERROR, "[Check][Range][Executor: %d] Unexpected iteration: %d.", | |||
GELOGE(INTERNAL_ERROR, "[Check][Range][Executor: %d] Unexpected iteration: %ld.", | |||
id_, task_info.iteration); | |||
REPORT_INNER_ERROR("E19999", "[Executor: %d] Unexpected iteration: %d when StageExecutor %s.", | |||
REPORT_INNER_ERROR("E19999", "[Executor: %d] Unexpected iteration: %ld when StageExecutor %s.", | |||
id_, task_info.iteration, __FUNCTION__); | |||
return INTERNAL_ERROR; | |||
} | |||
@@ -69,16 +69,16 @@ Status StageExecutor::Start(const std::vector<TensorValue> &inputs, const std::v | |||
if (task_info.event != nullptr) { | |||
GELOGD("[%d] Add StreamWaitEvent", id_); | |||
GE_CHK_RT_RET(rtStreamWaitEvent(stream_, task_info.event)); | |||
RECORD_MODEL_EXECUTION_EVENT(&context_, "[iteration = %d] [Stage = %d] End", task_info.iteration - 1, | |||
RECORD_MODEL_EXECUTION_EVENT(&context_, "[iteration = %ld] [Stage = %d] End", task_info.iteration - 1, | |||
task_info.stage); | |||
} | |||
RECORD_MODEL_EXECUTION_EVENT(&context_, "[iteration = %d] [Stage = %d] Start", task_info.iteration, | |||
RECORD_MODEL_EXECUTION_EVENT(&context_, "[iteration = %lld] [Stage = %d] Start", task_info.iteration, | |||
task_info.stage); | |||
if (task_info.stage == 0) { | |||
GELOGD("[Executor: %d] To ResetExecutionContext", id_); | |||
GE_CHK_STATUS_RET(ResetExecutionContext(context_), | |||
GE_CHK_STATUS_RET(ResetExecutionContext(context_), | |||
"[Invoke][ResetExecutionContext][Executor: %d] Failed to reset context", id_); | |||
context_.iteration = task_info.iteration; | |||
GE_CHK_STATUS_RET_NOLOG(SetInputs(inputs, input_desc)); | |||
@@ -96,10 +96,10 @@ Status StageExecutor::Start(const std::vector<TensorValue> &inputs, const std::v | |||
auto sync_result = Synchronize(); | |||
if (sync_result != SUCCESS) { | |||
GELOGE(sync_result, | |||
"[Invoke][Synchronize][Executor: %d] Failed to sync result:%d. iteration = %d", | |||
GELOGE(sync_result, | |||
"[Invoke][Synchronize][Executor: %d] Failed to sync result:%d. iteration = %ld", | |||
id_, sync_result, task_info.iteration); | |||
REPORT_CALL_ERROR("E19999", "[Executor: %d] Failed to sync result:%d when StageExecutor %s. iteration = %d", | |||
REPORT_CALL_ERROR("E19999", "[Executor: %d] Failed to sync result:%d when StageExecutor %s. iteration = %ld", | |||
id_, sync_result, __FUNCTION__, task_info.iteration); | |||
context_.profiler->Dump(std::cout); | |||
context_.callback_manager->Destroy(); | |||
@@ -107,11 +107,11 @@ Status StageExecutor::Start(const std::vector<TensorValue> &inputs, const std::v | |||
return sync_result; | |||
} | |||
RECORD_MODEL_EXECUTION_EVENT(&context_, "[iteration = %d] [Stage = %d] End", task_info.iteration, task_info.stage); | |||
RECORD_MODEL_EXECUTION_EVENT(&context_, "[iteration = %ld] [Stage = %d] End", task_info.iteration, task_info.stage); | |||
// if not end stage | |||
if (task_info.stage >= pipe_config_->num_stages - 1) { | |||
RECORD_MODEL_EXECUTION_EVENT(&context_, "[iteration = %d] Schedule End", task_info.iteration); | |||
RECORD_MODEL_EXECUTION_EVENT(&context_, "[iteration = %ld] Schedule End", task_info.iteration); | |||
GELOGD("[Executor: %d] End of iteration [%ld]", id_, task_info.iteration); | |||
context_.callback_manager->Destroy(); | |||
RuntimeInferenceContext::DestroyContext(std::to_string(context_.context_id)); | |||
@@ -261,7 +261,7 @@ Status HybridModelPipelineExecutor::Execute(HybridModelExecutor::ExecuteArgs &ar | |||
if (ret != SUCCESS) { | |||
GELOGE(ret, "[Invoke][Synchronize] failed for [Executor: %zu].", i); | |||
REPORT_CALL_ERROR("E19999", "[Executor: %zu] failed to Synchronize result when HybridModelPipelineExecutor %s.", | |||
i, __FUNCTION__); | |||
i, __FUNCTION__); | |||
has_error = true; | |||
continue; | |||
} | |||
@@ -49,9 +49,9 @@ void HybridProfiler::RecordEvent(EventType event_type, const char *fmt, ...) { | |||
va_end(args); | |||
auto index = counter_++; | |||
if (index >= static_cast<int>(events_.size())) { | |||
GELOGE(INTERNAL_ERROR, | |||
GELOGE(INTERNAL_ERROR, | |||
"[Check][Range]index out of range. index = %d, max event size = %zu", index, events_.size()); | |||
REPORT_INNER_ERROR("E19999", "index out of range when HybridProfiler %s. index = %d, max event size = %zu", | |||
REPORT_INNER_ERROR("E19999", "index out of range when HybridProfiler %s. index = %d, max event size = %zu", | |||
__FUNCTION__, index, events_.size()); | |||
return; | |||
} | |||
@@ -67,10 +67,10 @@ Status ShapeInferenceState::UpdateInputShape(int idx, const GeTensorDesc &target | |||
Format format = input_desc.GetFormat(); | |||
DataType data_type = input_desc.GetDataType(); | |||
if (TensorUtils::CalcTensorMemSize(shape, format, data_type, tensor_size) != GRAPH_SUCCESS) { | |||
GELOGE(FAILED, "[Invoke][CalcTensorMemSize] failed for [%s].", | |||
GELOGE(FAILED, "[Invoke][CalcTensorMemSize] failed for [%s].", | |||
node_item.NodeName().c_str()); | |||
REPORT_CALL_ERROR("E19999", "CalcTensorMemSize failed for [%s] when ShapeInferenceState %s.", | |||
node_item.NodeName().c_str(), __FUNCTION__); | |||
REPORT_CALL_ERROR("E19999", "CalcTensorMemSize failed for [%s] when ShapeInferenceState %s.", | |||
node_item.NodeName().c_str(), __FUNCTION__); | |||
return FAILED; | |||
} | |||
} | |||
@@ -124,18 +124,18 @@ Status ShapeInferenceState::AwaitShapesReady(const GraphExecutionContext &contex | |||
} | |||
if (context.GetStatus() != SUCCESS) { | |||
GELOGE(FAILED, "[Check][Status][%s] Await pending shape cancelled.", | |||
GELOGE(FAILED, "[Check][Status][%s] Await pending shape cancelled.", | |||
node_item.NodeName().c_str()); | |||
REPORT_CALL_ERROR("E19999", "[%s] Await pending shape cancelled when %s.", | |||
REPORT_CALL_ERROR("E19999", "[%s] Await pending shape cancelled when %s.", | |||
node_item.NodeName().c_str(), __FUNCTION__); | |||
break; | |||
} | |||
} | |||
if (!wait_success) { | |||
GELOGE(FAILED, "[Check][Status][%s] Wait for shape timeout:%d.", | |||
GELOGE(FAILED, "[Check][Status][%s] Wait for shape timeout:%d.", | |||
node_item.NodeName().c_str(), kWaitInternal); | |||
REPORT_CALL_ERROR("E19999", "[%s] Wait for shape timeout:%d when %s.", | |||
REPORT_CALL_ERROR("E19999", "[%s] Wait for shape timeout:%d when %s.", | |||
node_item.NodeName().c_str(), kWaitInternal, __FUNCTION__); | |||
return FAILED; | |||
} | |||
@@ -76,7 +76,7 @@ Status CallbackManager::CallbackProcess(rtContext_t context) { | |||
auto rt_err = rtEventSynchronize(event); | |||
if (rt_err != RT_ERROR_NONE) { | |||
GELOGE(RT_FAILED, "[Invoke][rtEventSynchronize] failed. ret = %d", rt_err); | |||
REPORT_CALL_ERROR("E19999", | |||
REPORT_CALL_ERROR("E19999", | |||
"Invoke rtEventSynchronize failed when CallbackManager %s, ret = %d.", __FUNCTION__, rt_err); | |||
GE_CHK_RT(rtEventDestroy(event)); | |||
return RT_FAILED; | |||
@@ -50,10 +50,10 @@ NodeStatePtr SubgraphContext::GetOrCreateNodeState(const NodeItem *node_item) { | |||
Status SubgraphContext::SetInput(int index, const TensorValue &tensor) { | |||
if (static_cast<size_t>(index) >= all_inputs_.size()) { | |||
GELOGE(INTERNAL_ERROR, | |||
"[Check][Param:index]output index output range. all input num = %zu, input index = %d", | |||
"[Check][Param:index]input index out of range. all input num = %zu, input index = %d", | |||
all_inputs_.size(), index); | |||
REPORT_INNER_ERROR("E19999", | |||
"input param index out range when SubgraphContext %s, all input num = %zu, input index = %d.", | |||
REPORT_INNER_ERROR("E19999", | |||
"input param index out of range when SubgraphContext %s, all input num = %zu, input index = %d.", | |||
__FUNCTION__, all_inputs_.size(), index); | |||
return INTERNAL_ERROR; | |||
} | |||
@@ -70,9 +70,9 @@ Status SubgraphContext::SetOutput(const NodeItem &node_item, int output_index, c | |||
auto index = node_item.output_start + output_index; | |||
if ((output_index >= node_item.num_outputs) || (static_cast<size_t>(index) >= all_outputs_.size())) { | |||
GELOGE(INTERNAL_ERROR, | |||
"[Check][Param:output_index]output index output range. all output num = %zu, node_item = %s," | |||
"[Check][Param:output_index]output index out of range. all output num = %zu, node_item = %s," | |||
"output index = %d.", all_outputs_.size(), node_item.DebugString().c_str(), output_index); | |||
REPORT_INNER_ERROR("E19999", "output index output range when SubgraphContext %s. " | |||
REPORT_INNER_ERROR("E19999", "output index out of range when SubgraphContext %s. " | |||
"all output num = %zu, node_item = %s, output index = %d.", | |||
__FUNCTION__, all_outputs_.size(), node_item.DebugString().c_str(), output_index); | |||
return INTERNAL_ERROR; | |||
@@ -129,9 +129,10 @@ Status SubgraphContext::Await(const NodePtr &node) { | |||
void SubgraphContext::OnError(Status error) { | |||
if (error != END_OF_SEQUENCE) { | |||
GELOGE(error, "[Check][Param:error][%s] Error occurred while executing graph.", graph_item_->GetName().c_str()); | |||
REPORT_INNER_ERROR("E19999", "[%s] Error occurred while executing graph when SubgraphContext %s.", | |||
graph_item_->GetName().c_str(), __FUNCTION__); | |||
GELOGE(error, "[Check][Param:error][%s] Error:%d occurred while executing graph.", | |||
graph_item_->GetName().c_str(), error); | |||
REPORT_INNER_ERROR("E19999", "[%s] Error:%d occurred while executing graph when SubgraphContext %s.", | |||
graph_item_->GetName().c_str(), error, __FUNCTION__); | |||
} | |||
node_done_manager_.Destroy(); | |||
} | |||
@@ -44,7 +44,7 @@ Status SubgraphExecutor::Init(const std::vector<TensorValue> &inputs, | |||
const std::vector<ConstGeTensorDescPtr> &input_desc) { | |||
subgraph_context_.reset(new(std::nothrow)SubgraphContext(graph_item_, context_)); | |||
GE_CHECK_NOTNULL(subgraph_context_); | |||
GE_CHK_STATUS_RET(subgraph_context_->Init(), | |||
GE_CHK_STATUS_RET(subgraph_context_->Init(), | |||
"[Init][SubgraphContext][%s] Failed to init subgraph context.", graph_item_->GetName().c_str()); | |||
shape_inference_engine_.reset(new(std::nothrow) ShapeInferenceEngine(context_, subgraph_context_.get())); | |||
@@ -68,11 +68,12 @@ Status SubgraphExecutor::InitInputsForUnknownShape(const std::vector<TensorValue | |||
// Number of inputs of parent node should be greater or equal than that of subgraph | |||
auto input_nodes = graph_item_->GetInputNodes(); | |||
if (inputs.size() < input_nodes.size()) { | |||
GELOGE(INTERNAL_ERROR, | |||
GELOGE(INTERNAL_ERROR, | |||
"[Check][Size][%s] Number of inputs [%zu] is not sufficient for subgraph which needs [%zu] inputs.", | |||
graph_item_->GetName().c_str(), inputs.size(), input_nodes.size()); | |||
REPORT_INNER_ERROR("E19999", | |||
"Number of inputs [%zu] is not sufficient for subgraph which needs [%zu] inputs when SubgraphExecutor %s.", | |||
REPORT_INNER_ERROR("E19999", | |||
"[%s] Number of inputs [%zu] is not sufficient for subgraph which needs [%zu] inputs," | |||
"check invalid when SubgraphExecutor %s.", | |||
graph_item_->GetName().c_str(), inputs.size(), input_nodes.size(), __FUNCTION__); | |||
return INTERNAL_ERROR; | |||
} | |||
@@ -117,8 +118,9 @@ Status SubgraphExecutor::InitInputsForKnownShape(const std::vector<TensorValue> | |||
GELOGE(INTERNAL_ERROR, | |||
"[Check][Size][%s] Number of inputs [%zu] is not sufficient for subgraph which needs at lease [%d] inputs", | |||
graph_item_->GetName().c_str(), inputs.size(), parent_input_index + 1); | |||
REPORT_INNER_ERROR("E19999", | |||
"[%s] Number of inputs [%zu] is not sufficient for subgraph which needs at lease [%d] inputs when %s.", | |||
REPORT_INNER_ERROR("E19999", | |||
"[%s] Number of inputs [%zu] is not sufficient for subgraph which needs at lease [%d] inputs," | |||
"check invalid when %s.", | |||
graph_item_->GetName().c_str(), inputs.size(), parent_input_index + 1, __FUNCTION__); | |||
return INTERNAL_ERROR; | |||
} | |||
@@ -387,9 +389,10 @@ Status SubgraphExecutor::GetOutputs(vector<TensorValue> &outputs, std::vector<Co | |||
"[Invoke][GetOutputDescList][%s] Failed to get output tensor desc.", graph_item_->GetName().c_str()); | |||
if (outputs.size() != output_desc.size()) { | |||
GELOGE(INTERNAL_ERROR, | |||
"[Check][Size]Number of output tensors(%zu) mismatch number of output tensor desc(%zu).", | |||
"[Check][Size]Number of outputs(%zu) mismatch number of output_desc(%zu).", | |||
outputs.size(), output_desc.size()); | |||
REPORT_INNER_ERROR("E19999", "Number of output tensors(%zu) mismatch number of output tensor desc(%zu) when %s.", | |||
REPORT_INNER_ERROR("E19999", "Number of outputs(%zu) mismatch number of output_desc(%zu)," | |||
"check invlid when SubgraphExecutor %s.", | |||
outputs.size(), output_desc.size(), __FUNCTION__); | |||
return INTERNAL_ERROR; | |||
} | |||
@@ -413,9 +416,11 @@ Status SubgraphExecutor::SetOutputsToParentNode(TaskContext &task_context) { | |||
"[Invoke][GetOutputDescList][%s] Failed to get output tensor desc.", graph_item_->GetName().c_str()); | |||
if (outputs.size() != output_desc_list.size()) { | |||
GELOGE(INTERNAL_ERROR, "[Check][Size][%s] num output tensors = %zu, num output tensor desc = %zu", | |||
GELOGE(INTERNAL_ERROR, "[Check][Size][%s] num of output tensors = %zu, num of output tensor desc = %zu not equal", | |||
graph_item_->GetName().c_str(), outputs.size(), output_desc_list.size()); | |||
REPORT_INNER_ERROR("E19999", "[%s] num output tensors = %zu, num output tensor desc = %zu when %s", | |||
REPORT_INNER_ERROR("E19999", | |||
"%s num of output tensors = %zu, num of output tensor desc = %zu not equal," | |||
"check invalid when SubgraphExecutor %s", | |||
graph_item_->GetName().c_str(), outputs.size(), output_desc_list.size(), __FUNCTION__); | |||
return INTERNAL_ERROR; | |||
} | |||
@@ -105,7 +105,7 @@ Status NodeDoneCallback::PrepareConstInputs(const NodeItem &node_item) { | |||
"[Check][Size][%s] Tensor size is not enough. output index = %d, required size = %ld, tensor = %s.", | |||
node_item.NodeName().c_str(), output_idx, tensor_size, | |||
output_tensor->DebugString().c_str()); | |||
REPORT_INNER_ERROR("E19999", | |||
REPORT_INNER_ERROR("E19999", | |||
"[%s] Tensor size is not enough. output index = %d, required size = %ld, tensor = %s when %s.", | |||
node_item.NodeName().c_str(), output_idx, tensor_size, | |||
output_tensor->DebugString().c_str(), __FUNCTION__); | |||
@@ -423,7 +423,7 @@ Status ExecutionEngine::ValidateInputTensors(const NodeState &node_state, const | |||
"[Check][Size] for [%s] Input[%d]: tensor size mismatches. expected: %ld, but given %zu.", | |||
task_context.GetNodeName(), i, expected_size, input_tensor->GetSize()); | |||
REPORT_INNER_ERROR("E19999", "[%s] Input[%d]: tensor size mismatches. expected: %ld, but given %zu when %s.", | |||
task_context.GetNodeName(), i, expected_size, input_tensor->GetSize(), __FUNCTION__); | |||
task_context.GetNodeName(), i, expected_size, input_tensor->GetSize(), __FUNCTION__); | |||
return INTERNAL_ERROR; | |||
} | |||
} | |||
@@ -205,8 +205,8 @@ Status ShapeInferenceEngine::UpdatePeerNodeShape(const Node &node) { | |||
auto peer_input_desc = peer_op_desc->MutableInputDesc(peer_anchor->GetIdx()); | |||
if (peer_input_desc == nullptr) { | |||
GELOGE(GRAPH_FAILED, "[Call][MutableInputDesc] for %s return nullptr.", peer_op_desc->GetName().c_str()); | |||
REPORT_CALL_ERROR("E19999", "%s call MutableInputDesc return nullptr when ShapeInferenceEngine %s.", | |||
peer_op_desc->GetName().c_str(), __FUNCTION__); | |||
REPORT_CALL_ERROR("E19999", "%s call MutableInputDesc return nullptr when ShapeInferenceEngine %s.", | |||
peer_op_desc->GetName().c_str(), __FUNCTION__); | |||
continue; | |||
} | |||
@@ -231,11 +231,11 @@ Status ShapeInferenceEngine::CanonicalizeShape(GeTensorDesc &tensor_desc, | |||
const auto &tensor_shape = tensor_desc.MutableShape(); | |||
if (tensor_shape.IsUnknownShape()) { | |||
if (!fallback_with_range) { | |||
GELOGE(INTERNAL_ERROR, | |||
"[Is][UnknownShape] Output shape is still unknown after shape inference. shape = [%s].", | |||
GELOGE(INTERNAL_ERROR, | |||
"[Is][UnknownShape] Output shape is still unknown after shape inference. shape = [%s].", | |||
tensor_shape.ToString().c_str()); | |||
REPORT_INNER_ERROR("E19999", "Output shape is still unknown after shape inference. " | |||
"shape = [%s] when ShapeInferenceEngine %s.", tensor_shape.ToString().c_str(), __FUNCTION__); | |||
"shape = [%s] when ShapeInferenceEngine %s.", tensor_shape.ToString().c_str(), __FUNCTION__); | |||
return INTERNAL_ERROR; | |||
} | |||
@@ -243,10 +243,10 @@ Status ShapeInferenceEngine::CanonicalizeShape(GeTensorDesc &tensor_desc, | |||
std::vector<std::pair<int64_t, int64_t>> shape_range; | |||
GE_CHK_GRAPH_STATUS_RET(tensor_desc.GetShapeRange(shape_range), "Failed to get shape range"); | |||
if (shape_range.size() != shape.size()) { | |||
GELOGE(INTERNAL_ERROR, "[Check][Size] Number of shape ranges (%zu) mismatches that of dims (%zu).", | |||
GELOGE(INTERNAL_ERROR, "[Check][Size] Number of shape ranges (%zu) mismatches that of dims (%zu).", | |||
shape_range.size(), shape.size()); | |||
REPORT_INNER_ERROR("E19999", "Number of shape ranges (%zu) mismatches that of dims (%zu)" | |||
" when ShapeInferenceEngine %s.", shape_range.size(), shape.size(), __FUNCTION__); | |||
REPORT_INNER_ERROR("E19999", "Number of shape ranges (%zu) mismatches that of dims (%zu)" | |||
" when ShapeInferenceEngine %s.", shape_range.size(), shape.size(), __FUNCTION__); | |||
return INTERNAL_ERROR; | |||
} | |||
@@ -270,10 +270,10 @@ Status ShapeInferenceEngine::CalcTensorSize(DataType data_type, | |||
GELOGD("To calc tensor size by shape = [%s]", GeShape(shape).ToString().c_str()); | |||
uint32_t type_size; | |||
if (!TypeUtils::GetDataTypeLength(data_type, type_size)) { | |||
GELOGE(INTERNAL_ERROR, "[Get][DataTypeLength] failed for type:%s.", | |||
GELOGE(INTERNAL_ERROR, "[Get][DataTypeLength] failed for type:%s.", | |||
TypeUtils::DataTypeToSerialString(data_type).c_str()); | |||
REPORT_CALL_ERROR("E19999", "GetDataTypeLength failed for type:%s when ShapeInferenceEngine %s.", | |||
TypeUtils::DataTypeToSerialString(data_type).c_str(), __FUNCTION__); | |||
REPORT_CALL_ERROR("E19999", "GetDataTypeLength failed for type:%s when ShapeInferenceEngine %s.", | |||
TypeUtils::DataTypeToSerialString(data_type).c_str(), __FUNCTION__); | |||
return INTERNAL_ERROR; | |||
} | |||
@@ -300,23 +300,24 @@ Status ShapeInferenceEngine::CalcOutputTensorSizes(const NodeItem &node_item, bo | |||
const auto &shape = tensor_desc->MutableShape(); | |||
// modify on copy | |||
auto dims = shape.GetDims(); | |||
auto _status = CanonicalizeShape(*tensor_desc, dims, fallback_with_range); | |||
if(_status != SUCCESS){ | |||
REPORT_CALL_ERROR("E19999", "Invoke CanonicalizeShape failed when ShapeInferenceEngine %s, node:%s, output:%zu.", | |||
node_item.NodeName().c_str(), __FUNCTION__, output_index); | |||
auto status_result = CanonicalizeShape(*tensor_desc, dims, fallback_with_range); | |||
if (status_result != SUCCESS) { | |||
REPORT_CALL_ERROR("E19999", | |||
"Invoke CanonicalizeShape failed when ShapeInferenceEngine %s, node:%s, output:%zu.", | |||
node_item.NodeName().c_str(), __FUNCTION__, output_index); | |||
GELOGE(ge::FAILED, "[Canonicalize][Shape] failed for [%s], output %zu.", | |||
node_item.NodeName().c_str(), output_index); | |||
return _status; | |||
node_item.NodeName().c_str(), output_index); | |||
return status_result; | |||
} | |||
int64_t tensor_size; | |||
_status = CalcTensorSize(tensor_desc->GetDataType(), dims, tensor_size); | |||
if(_status != SUCCESS){ | |||
status_result = CalcTensorSize(tensor_desc->GetDataType(), dims, tensor_size); | |||
if (status_result != SUCCESS) { | |||
REPORT_CALL_ERROR("E19999", "Invoke CalcTensorSize failed when ShapeInferenceEngine %s, node:%s, output:%zu.", | |||
node_item.NodeName().c_str(), __FUNCTION__, output_index); | |||
GELOGE(ge::FAILED, "[Calc][TensorSize] failed for [%s], output %zu.", | |||
node_item.NodeName().c_str(), output_index); | |||
return _status; | |||
} | |||
node_item.NodeName().c_str(), output_index); | |||
return status_result; | |||
} | |||
GELOGD("[%s] Tensor size of output %zu = %ld", node_item.NodeName().c_str(), output_index, tensor_size); | |||
(void) TensorUtils::SetSize(*tensor_desc, tensor_size); | |||
} | |||
@@ -95,8 +95,8 @@ Status GraphItem::GroupNodes() { | |||
int group = node->group; | |||
if (group != last_group) { | |||
if (seen_groups.find(group) != seen_groups.end()) { | |||
GELOGE(INTERNAL_ERROR, | |||
"[Order][NodeGroup]Unordered node group found. node = %s, group = %d", node->NodeName().c_str(), group); | |||
GELOGE(INTERNAL_ERROR, | |||
"[Find][Group]Unordered node group found. node = %s, group = %d", node->NodeName().c_str(), group); | |||
return INTERNAL_ERROR; | |||
} else { | |||
last_group = group; | |||
@@ -71,8 +71,10 @@ Status SetOutputNameAttr(ComputeGraph &graph) { | |||
} | |||
} | |||
GE_CHK_BOOL_EXEC(ge::AttrUtils::SetListStr(&graph, ATTR_MODEL_OUT_NODES_NAME, output_names), | |||
GELOGE(FAILED, "[Invoke][SetListStr] of ATTR_MODEL_OUT_NODES_NAME failed."); | |||
return FAILED); | |||
GELOGE(FAILED, "[Invoke][SetListStr] failed, name:%s.", ATTR_MODEL_OUT_NODES_NAME.c_str()); | |||
REPORT_CALL_ERROR("E19999", "SetListStr failed when %s, name:%s.", | |||
__FUNCTION__, ATTR_MODEL_OUT_NODES_NAME.c_str()); | |||
return FAILED); | |||
return SUCCESS; | |||
} | |||
@@ -109,10 +111,11 @@ Status CollectDependenciesForFusedGraph(NodeItem &node_item, std::set<OpDesc *> | |||
GE_CHECK_NOTNULL(src_op_desc); | |||
if (src_node->GetType() != DATA_TYPE) { | |||
GELOGE(UNSUPPORTED, | |||
"[Check][NodeType][%s::%s] Node in fused subgraph can only depend on Data nodes, but depend on %s", | |||
"[Check][NodeType][%s::%s] Node in fused subgraph can only depend on Data nodes," | |||
"but depend on %s actually", | |||
node_item.NodeName().c_str(), node->GetName().c_str(), src_node->GetType().c_str()); | |||
REPORT_INNER_ERROR("E19999", | |||
"[%s::%s] Node in fused subgraph can only depend on Data nodes, but depend on %s when %s.", | |||
REPORT_INNER_ERROR("E19999", "[%s::%s] Node in fused subgraph can only depend on Data nodes," | |||
" but depend on %s actually, check invalid when %s.", | |||
node_item.NodeName().c_str(), node->GetName().c_str(), src_node->GetType().c_str(), __FUNCTION__); | |||
return UNSUPPORTED; | |||
} | |||
@@ -134,12 +137,13 @@ Status HybridModelBuilder::Build() { | |||
hybrid_model_.model_name_ = ge_root_model_->GetRootGraph()->GetName(); | |||
GELOGI("[%s] Start to build hybrid model.", GetGraphName()); | |||
GE_CHK_STATUS_RET(InitRuntimeParams(), "[Invoke][InitRuntimeParams] failed, model_name_:[%s]", GetGraphName()); | |||
GE_CHK_STATUS_RET(RecoverGraphUnknownFlag(), "[Invoke][RecoverGraphUnknownFlag] failed, model_name_:[%s]", GetGraphName()); | |||
GE_CHK_STATUS_RET(RecoverGraphUnknownFlag(), | |||
"[Invoke][RecoverGraphUnknownFlag] failed, model_name_:[%s]", GetGraphName()); | |||
GE_CHK_STATUS_RET(IndexSpecialNodes(), "[Invoke][IndexSpecialNodes] failed, model_name_:[%s]", GetGraphName()); | |||
GE_CHK_STATUS_RET(IndexTaskDefs(), "[Invoke][IndexTaskDefs] failed, model_name_:[%s]", GetGraphName()); | |||
GE_CHK_STATUS_RET(InitWeights(), "[Invoke][InitWeights] failed, model_name_:[%s]", GetGraphName()); | |||
GE_CHK_STATUS_RET(LoadGraph(), "[Invoke][LoadGraph] failed, model_name_:[%s]", GetGraphName()); | |||
GE_CHK_STATUS_RET(AssignUninitializedConstantOps(), | |||
GE_CHK_STATUS_RET(AssignUninitializedConstantOps(), | |||
"[Invoke][AssignUninitializedConstantOps] failed, model_name_:[%s]", GetGraphName()); | |||
GE_CHK_STATUS_RET(TransAllVarData(), "[Invoke][TransAllVarData] failed, model_name_:[%s]", GetGraphName()); | |||
GE_CHK_STATUS_RET(CopyVarData(), "[Invoke][CopyVarData] failed, model_name_:[%s]", GetGraphName()); | |||
@@ -335,6 +339,8 @@ Status HybridModelBuilder::ParseDependentInputNodes(NodeItem &node_item, const s | |||
if (input_index < 0) { | |||
GELOGE(INTERNAL_ERROR, "[Get][InputIndex]failed, node:[%s] inputname: %s.", | |||
node_item.NodeName().c_str(), input_name.c_str()); | |||
REPORT_CALL_ERROR("E19999", "GetInputIndexByName failed when HybridModelBuilder %s, node:[%s] inputname: %s.", | |||
__FUNCTION__, node_item.NodeName().c_str(), input_name.c_str()); | |||
return INTERNAL_ERROR; | |||
} | |||
@@ -423,20 +429,24 @@ Status HybridModelBuilder::UpdateAnchorStatus(const NodePtr &node) { | |||
auto peer_anchor = anchor->GetPeerOutAnchor(); | |||
if (peer_anchor == nullptr) { | |||
if (AnchorUtils::SetStatus(anchor, ANCHOR_SUSPEND) != GRAPH_SUCCESS) { | |||
GELOGE(INTERNAL_ERROR, "[Invoke][SetStatus] failed, node:[%s].", node->GetName().c_str()); | |||
REPORT_CALL_ERROR("E19999", "SetStatus failed, node:[%s] when %s.", node->GetName().c_str(), __FUNCTION__); | |||
GELOGE(INTERNAL_ERROR, "[Invoke][SetStatus] failed to set ANCHOR_SUSPEND, node:[%s].", | |||
node->GetName().c_str()); | |||
REPORT_CALL_ERROR("E19999", "SetStatus failed to set ANCHOR_SUSPEND, node:[%s] when HybridModelBuilder %s.", | |||
node->GetName().c_str(), __FUNCTION__); | |||
return INTERNAL_ERROR; | |||
} | |||
} else if (peer_anchor->GetOwnerNode()->GetType() == CONSTANT) { | |||
if (AnchorUtils::SetStatus(anchor, ANCHOR_CONST) != GRAPH_SUCCESS) { | |||
GELOGE(INTERNAL_ERROR, "[Invoke][SetStatus] failed, node:[%s].", node->GetName().c_str()); | |||
REPORT_CALL_ERROR("E19999", "SetStatus failed, node:[%s] when %s.", node->GetName().c_str(), __FUNCTION__); | |||
GELOGE(INTERNAL_ERROR, "[Invoke][SetStatus] failed to set ANCHOR_CONST, node:[%s].", node->GetName().c_str()); | |||
REPORT_CALL_ERROR("E19999", "SetStatus failed to set ANCHOR_CONST, node:[%s] when HybridModelBuilder %s.", | |||
node->GetName().c_str(), __FUNCTION__); | |||
return INTERNAL_ERROR; | |||
} | |||
} else { | |||
if (AnchorUtils::SetStatus(anchor, ANCHOR_DATA) != GRAPH_SUCCESS) { | |||
GELOGE(INTERNAL_ERROR, "[Invoke][SetStatus] failed, node:[%s].", node->GetName().c_str()); | |||
REPORT_CALL_ERROR("E19999", "SetStatus failed, node:[%s] when %s.", node->GetName().c_str(), __FUNCTION__); | |||
GELOGE(INTERNAL_ERROR, "[Invoke][SetStatus] failed to set ANCHOR_DATA, node:[%s].", node->GetName().c_str()); | |||
REPORT_CALL_ERROR("E19999", "SetStatus failed to set ANCHOR_DATA, node:[%s] when HybridModelBuilder %s.", | |||
node->GetName().c_str(), __FUNCTION__); | |||
return INTERNAL_ERROR; | |||
} | |||
} | |||
@@ -563,6 +573,8 @@ Status HybridModelBuilder::MergeNetOutputNode(ComputeGraph &graph) { | |||
if (input_desc == nullptr) { | |||
GELOGE(INTERNAL_ERROR, "[Invoke][MutableInputDesc][%s] Failed to get input desc[%d]", | |||
net_output_desc->GetName().c_str(), index); | |||
REPORT_CALL_ERROR("E19999", "[%s] Failed to get input desc[%d] when HybridModelBuilder %s.", | |||
net_output_desc->GetName().c_str(), index, __FUNCTION__); | |||
return INTERNAL_ERROR; | |||
} | |||
@@ -763,7 +775,7 @@ Status HybridModelBuilder::LoadGraph() { | |||
GELOGI("Before merging subgraphs DirectNodesSize = %zu, GetAllNodesSize = %zu", | |||
root_graph->GetDirectNodesSize(), | |||
root_graph->GetAllNodesSize()); | |||
GE_CHK_GRAPH_STATUS_RET(UnfoldSubgraphs(root_graph, merged_graph), | |||
GE_CHK_GRAPH_STATUS_RET(UnfoldSubgraphs(root_graph, merged_graph), | |||
"[Invoke][UnfoldSubgraphs]Failed to unfold subgraphs, model_name_:%s.", GetGraphName()); | |||
root_graph = std::move(merged_graph); | |||
GELOGI("After merging subgraphs DirectNodesSize = %zu, GetAllNodesSize = %zu", | |||
@@ -786,7 +798,7 @@ Status HybridModelBuilder::LoadGraph() { | |||
op_desc->SetId(index++); | |||
} | |||
GE_DUMP(root_graph, "hybrid_merged_graph"); | |||
GE_CHK_STATUS_RET(LoadDynamicSubgraph(*root_graph, true), | |||
GE_CHK_STATUS_RET(LoadDynamicSubgraph(*root_graph, true), | |||
"[Invoke][LoadDynamicSubgraph]Failed to load root graph, model_name_:%s.", GetGraphName()); | |||
GELOGD("Done loading root graph successfully."); | |||
GE_CHK_STATUS_RET(hybrid_model_.root_graph_item_->GroupNodes(), | |||
@@ -825,7 +837,7 @@ Status HybridModelBuilder::LoadGraph() { | |||
} | |||
} | |||
GE_CHK_STATUS_RET(ParseDependentByParallelGroup(), | |||
GE_CHK_STATUS_RET(ParseDependentByParallelGroup(), | |||
"[Invoke][ParseDependentByParallelGroup]Failed to establish dependencies for hccl ops, model_name_:%s.", | |||
GetGraphName()); | |||
GELOGI("Done loading all subgraphs successfully."); | |||
@@ -941,7 +953,7 @@ Status HybridModelBuilder::InitConstantOps() { | |||
auto op_desc = var_node->GetOpDesc(); | |||
auto v_weights = ModelUtils::GetWeights(op_desc); | |||
if (v_weights.empty()) { | |||
GELOGE(INTERNAL_ERROR, "[Check][Size][%s] Constant no not have value", var_node->GetName().c_str()); | |||
GELOGE(INTERNAL_ERROR, "[Check][Size][%s] Constant op has no weight", var_node->GetName().c_str()); | |||
return INTERNAL_ERROR; | |||
} | |||
auto *ge_tensor = const_cast<GeTensor *>(v_weights[0].get()); | |||
@@ -955,7 +967,7 @@ Status HybridModelBuilder::InitConstantOps() { | |||
GELOGD("Init tensor with host constant %s size = %zu", var_name.c_str(), aligned_tensor.MutableData().GetSize()); | |||
if (MemManager::Instance().HostMemInstance(RT_MEMORY_HBM).Malloc(aligned_tensor.GetAlignedPtr(), | |||
aligned_tensor.GetData().size()) == nullptr) { | |||
GELOGE(MEMALLOC_FAILED, "Malloc host memory for an existed GeTensor failed, model_name_:%s.", GetGraphName()); | |||
GELOGE(MEMALLOC_FAILED, "[Malloc][HostMemory] for an existed GeTensor failed, model_name_:%s.", GetGraphName()); | |||
return MEMALLOC_FAILED; | |||
} | |||
var_tensor.reset(new(std::nothrow)TensorValue(aligned_tensor.MutableData().data(), | |||
@@ -1006,6 +1018,8 @@ Status HybridModelBuilder::InitVariableTensors() { | |||
int64_t tensor_size = 0; | |||
if (TensorUtils::CalcTensorMemSize(output_tensor.GetShape(), output_tensor.GetFormat(), output_tensor.GetDataType(), | |||
tensor_size) != SUCCESS) { | |||
REPORT_CALL_ERROR("E19999", "CalcTensorMemSize failed when HybridModelBuilder %s, node name:%s", | |||
__FUNCTION__, it.first.c_str()); | |||
GELOGE(INTERNAL_ERROR, "[Calculate][TensorMemSize] failed, node name:%s", it.first.c_str()); | |||
return INTERNAL_ERROR; | |||
} | |||
@@ -1016,7 +1030,7 @@ Status HybridModelBuilder::InitVariableTensors() { | |||
} | |||
if (MemManager::Instance().HostMemInstance(RT_MEMORY_HBM).Malloc(mem_info.host_aligned_ptr, | |||
tensor_size) == nullptr) { | |||
GELOGE(MEMALLOC_FAILED, | |||
GELOGE(MEMALLOC_FAILED, | |||
"[Malloc][HostMem] for an existed GeTensor failed, Host variable [%s].", it.first.c_str()); | |||
return MEMALLOC_FAILED; | |||
} | |||
@@ -1078,7 +1092,7 @@ Status HybridModelBuilder::InitWeights() { | |||
const GeTensorDesc &tensor_desc = ge_tensor->GetTensorDesc(); | |||
int64_t tensor_size = 0; | |||
GE_CHK_GRAPH_STATUS_RET(TensorUtils::GetSize(*op_desc->MutableOutputDesc(0), tensor_size), | |||
"[Invoke][GetSize][%s] Failed to get tensor size", | |||
"[Invoke][GetSize][%s] Failed to get output tensor size", | |||
node->GetName().c_str()); | |||
int64_t data_offset = 0; | |||
GE_CHK_GRAPH_STATUS_RET(TensorUtils::GetDataOffset(tensor_desc, data_offset), | |||
@@ -1333,16 +1347,18 @@ Status HybridModelBuilder::GetPeerNodeAcrossSubGraphs(const NodePtr &data_node, | |||
GELOGD("To get peer node of %s::%s", sub_graph->GetName().c_str(), data_node->GetName().c_str()); | |||
auto wrapped_node = data_node->GetOwnerComputeGraph()->GetParentNode(); | |||
if (wrapped_node == nullptr) { | |||
REPORT_INNER_ERROR("E19999", "[%s] Node is in root graph when HybridModelBuilder %s.", | |||
data_node->GetName().c_str(), __FUNCTION__); | |||
GELOGE(INTERNAL_ERROR, "[Invoke][GetParentNode][%s] Node is in root graph.", data_node->GetName().c_str()); | |||
return INTERNAL_ERROR; | |||
} | |||
auto data_op_desc = data_node->GetOpDesc(); | |||
uint32_t parent_index = 0; | |||
if (!AttrUtils::GetInt(data_op_desc, ATTR_NAME_PARENT_NODE_INDEX, parent_index)) { | |||
GELOGE(INTERNAL_ERROR, | |||
"[Invoke][GetInt][%s] Failed to get attr [%s]", | |||
data_op_desc->GetName().c_str(), | |||
ATTR_NAME_PARENT_NODE_INDEX.c_str()); | |||
REPORT_CALL_ERROR("E19999", "[%s] Failed to get attr [%s] when HybridModelBuilder %s.", | |||
data_op_desc->GetName().c_str(), ATTR_NAME_PARENT_NODE_INDEX.c_str(), __FUNCTION__); | |||
GELOGE(INTERNAL_ERROR, "[Invoke][GetInt][%s] Failed to get attr [%s]", | |||
data_op_desc->GetName().c_str(), ATTR_NAME_PARENT_NODE_INDEX.c_str()); | |||
return INTERNAL_ERROR; | |||
} | |||
@@ -1350,7 +1366,9 @@ Status HybridModelBuilder::GetPeerNodeAcrossSubGraphs(const NodePtr &data_node, | |||
GE_CHECK_NOTNULL(wrapped_node_in_anchor); | |||
auto src_out_anchor = wrapped_node_in_anchor->GetPeerOutAnchor(); | |||
if (src_out_anchor == nullptr || src_out_anchor->GetOwnerNode() == nullptr) { | |||
GELOGE(INTERNAL_ERROR, | |||
REPORT_INNER_ERROR("E19999", "[%s] Parent node do not have peer anchor when HybridModelBuilder %s.", | |||
data_node->GetName().c_str(), __FUNCTION__); | |||
GELOGE(INTERNAL_ERROR, | |||
"[Check][ParentNode][%s] Parent node do not have peer anchor.", data_node->GetName().c_str()); | |||
return INTERNAL_ERROR; | |||
} | |||
@@ -1374,10 +1392,13 @@ Status HybridModelBuilder::GetPeerNodeAcrossSubGraphs(const NodePtr &data_node, | |||
auto src_graph = NodeUtils::GetSubgraph(*src_wrapped_node, kSubgraphIndex); | |||
GE_CHECK_NOTNULL(src_graph); | |||
auto src_net_output_node = src_graph->FindFirstNodeMatchType(NETOUTPUT); | |||
GE_CHK_BOOL_TRUE_EXEC_WITH_LOG(src_net_output_node == nullptr, | |||
return INTERNAL_ERROR, | |||
"[Invoke][FindFirstNodeMatchType]Failed to find NetOutput in subgraph: %s", | |||
src_graph->GetName().c_str()); | |||
if (src_net_output_node == nullptr) { | |||
REPORT_INNER_ERROR("E19999", "Failed to find NetOutput in subgraph: %s when HybridModelBuilder %s", | |||
src_graph->GetName().c_str(), __FUNCTION__); | |||
GELOGE(INTERNAL_ERROR, "[Invoke][FindFirstNodeMatchType]Failed to find NetOutput in subgraph: %s", | |||
src_graph->GetName().c_str()); | |||
return INTERNAL_ERROR; | |||
} | |||
auto net_output_desc = src_net_output_node->GetOpDesc(); | |||
GE_CHECK_NOTNULL(net_output_desc); | |||
@@ -1567,10 +1588,10 @@ Status HybridModelBuilder::GetParentNodeOutputIndex(const OpDesc &op_desc, int i | |||
auto input_desc = op_desc.MutableInputDesc(index); | |||
GE_CHECK_NOTNULL(input_desc); | |||
if (!AttrUtils::GetInt(input_desc, ATTR_NAME_PARENT_NODE_INDEX, out_index)) { | |||
GELOGE(INTERNAL_ERROR, "[Invoke][GetInt]NetOutput input tensor %d, attr %s not found.", | |||
index, ATTR_NAME_PARENT_NODE_INDEX.c_str()); | |||
REPORT_CALL_ERROR("E19999", "NetOutput input tensor %d, attr %s not found when %s.", | |||
index, ATTR_NAME_PARENT_NODE_INDEX.c_str(), __FUNCTION__); | |||
GELOGE(INTERNAL_ERROR, "[Invoke][GetInt]NetOutput %s input tensor %d, attr %s not found.", | |||
op_desc.GetName().c_str(), index, ATTR_NAME_PARENT_NODE_INDEX.c_str()); | |||
REPORT_CALL_ERROR("E19999", "NetOutput %s input tensor %d, attr %s not found when %s.", | |||
op_desc.GetName().c_str(), index, ATTR_NAME_PARENT_NODE_INDEX.c_str(), __FUNCTION__); | |||
return INTERNAL_ERROR; | |||
} | |||
return SUCCESS; | |||
@@ -2026,10 +2047,10 @@ Status HybridModelBuilder::BuildInputMapping(GraphItem &graph_item, | |||
data_op_index++; | |||
} else { | |||
if (!AttrUtils::GetInt(node->GetOpDesc(), ATTR_NAME_PARENT_NODE_INDEX, data_index)) { | |||
GELOGE(FAILED, | |||
"[Invoke][GetInt][%s] Failed to get attr [%s]", | |||
node->GetName().c_str(), | |||
ATTR_NAME_PARENT_NODE_INDEX.c_str()); | |||
GELOGE(FAILED, "[Invoke][GetInt][%s] Failed to get attr [%s]", | |||
node->GetName().c_str(), ATTR_NAME_PARENT_NODE_INDEX.c_str()); | |||
REPORT_CALL_ERROR("E19999", "call GetInt failed when HybridModelBuilder %s, [%s] Failed to get attr [%s]", | |||
__FUNCTION__, node->GetName().c_str(), ATTR_NAME_PARENT_NODE_INDEX.c_str()); | |||
return FAILED; | |||
} | |||
} | |||