Browse Source

modify code to add report errmsg

tags/v1.3.0
liudingyan 3 years ago
parent
commit
bab2821062
12 changed files with 147 additions and 127 deletions
  1. +9
    -7
      ge/hybrid/common/npu_memory_allocator.cc
  2. +2
    -2
      ge/hybrid/common/tensor_value.cc
  3. +1
    -1
      ge/hybrid/executor/hybrid_execution_context.cc
  4. +65
    -52
      ge/hybrid/executor/hybrid_model_async_executor.cc
  5. +1
    -1
      ge/hybrid/executor/hybrid_model_executor.cc
  6. +9
    -11
      ge/hybrid/executor/hybrid_model_pipeline_executor.cc
  7. +2
    -2
      ge/hybrid/executor/hybrid_profiler.cc
  8. +2
    -2
      ge/hybrid/executor/node_done_manager.cc
  9. +9
    -9
      ge/hybrid/executor/node_state.cc
  10. +20
    -20
      ge/hybrid/executor/worker/execution_engine.cc
  11. +26
    -18
      ge/hybrid/executor/worker/shape_inference_engine.cc
  12. +1
    -2
      ge/hybrid/executor/worker/task_compile_engine.cc

+ 9
- 7
ge/hybrid/common/npu_memory_allocator.cc View File

@@ -39,7 +39,7 @@ AllocationAttr::AllocationAttr(void *try_reuse_addr) : AllocationAttr(0, try_reu
NpuMemoryAllocator *NpuMemoryAllocator::GetAllocator() {
int32_t device_id = 0;
if (rtGetDevice(&device_id) != RT_ERROR_NONE) {
GELOGE(RT_FAILED, "[Get][Device] Failed when %s.", __FUNCTION__);
GELOGE(RT_FAILED, "[Get][Device] Failed.");
REPORT_INNER_ERROR("E19999", "rtGetDevice failed when %s.", __FUNCTION__);
return nullptr;
}
@@ -58,8 +58,8 @@ void *NpuMemoryAllocator::Allocate(std::size_t size, AllocationAttr *attr) {
}

if (allocate_size == 0) {
GELOGE(MEMALLOC_FAILED, "[Check][Param:size_t]Memory size is 0, device_id = %u, size = %zu when %s.",
device_id_, allocate_size, __FUNCTION__);
GELOGE(MEMALLOC_FAILED, "[Check][Param:size_t]Memory size is 0, device_id = %u, size = %zu.",
device_id_, allocate_size);
REPORT_INNER_ERROR("E19999", "Memory size is 0, device_id = %u, size = %zu when %s.",
device_id_, allocate_size, __FUNCTION__);
return nullptr;
@@ -72,8 +72,10 @@ void *NpuMemoryAllocator::Allocate(std::size_t size, AllocationAttr *attr) {
buffer = MemManager::Instance().HostMemInstance(RT_MEMORY_HBM).Malloc(allocate_size);
} else {
if (allocate_size > kMaxHbmMemorySize) {
GELOGE(PARAM_INVALID, "[Check][Param:size_t]Invalid HBM memory size: %zu when %s.", allocate_size, __FUNCTION__);
REPORT_CALL_ERROR("E19999", "Invalid HBM memory size: %zu when %s.", allocate_size, __FUNCTION__);
GELOGE(PARAM_INVALID, "[Check][Param:size_t]Invalid HBM memory size: %zu bigger than limit:%lu, check invalid.",
allocate_size, kMaxHbmMemorySize);
REPORT_CALL_ERROR("E19999", "Invalid HBM memory size: %zu bigger than limit:%lu, check invalid when %s.",
allocate_size, kMaxHbmMemorySize, __FUNCTION__);
return nullptr;
}
void *try_reuse_addr = nullptr;
@@ -92,8 +94,8 @@ void *NpuMemoryAllocator::Allocate(std::size_t size, AllocationAttr *attr) {
.Malloc(allocate_size, reinterpret_cast<uint8_t *>(try_reuse_addr), device_id_);
}
if (buffer == nullptr) {
GELOGE(MEMALLOC_FAILED, "[Malloc][Memory] Failed, device_id = %u, size = %zu when %s.",
device_id_, allocate_size, __FUNCTION__);
GELOGE(MEMALLOC_FAILED, "[Malloc][Memory] Failed, device_id = %u, size = %zu.",
device_id_, allocate_size);
REPORT_CALL_ERROR("E19999", "malloc memory failed, device_id = %u, size = %zu when %s.",
device_id_, allocate_size, __FUNCTION__);
return nullptr;


+ 2
- 2
ge/hybrid/common/tensor_value.cc View File

@@ -32,7 +32,7 @@ std::unique_ptr<TensorBuffer> TensorBuffer::Create(NpuMemoryAllocator *allocator
}

if (allocator == nullptr) {
GELOGE(INTERNAL_ERROR, "[Check][Param:NpuMemoryAllocator] allocator is NULL, when %s.", __FUNCTION__);
GELOGE(INTERNAL_ERROR, "[Check][Param:NpuMemoryAllocator] allocator is NULL.");
REPORT_INNER_ERROR("E19999", "input allocator is NULL, when %s.", __FUNCTION__);
return nullptr;
}
@@ -43,7 +43,7 @@ std::unique_ptr<TensorBuffer> TensorBuffer::Create(NpuMemoryAllocator *allocator
}
buffer = allocator->Allocate(size, attr);
if (buffer == nullptr) {
GELOGE(MEMALLOC_FAILED, "[Allocate][Memory] Failed. size = %zu, when %s.", size, __FUNCTION__);
GELOGE(MEMALLOC_FAILED, "[Allocate][Memory] Failed. size = %zu.", size);
REPORT_CALL_ERROR("E19999", "allocate failed, size = %zu, when %s.", size, __FUNCTION__);
return nullptr;
}


+ 1
- 1
ge/hybrid/executor/hybrid_execution_context.cc View File

@@ -60,7 +60,7 @@ Status GraphExecutionContext::Synchronize(rtStream_t rt_stream) {
}

GELOGE(RT_FAILED,
"[Invoke][rtStreamSynchronize] failed when GraphExecutionContext %s, ret = %d", __FUNCTION__, rt_ret);
"[Invoke][rtStreamSynchronize] failed, ret = %d", rt_ret);
REPORT_CALL_ERROR("E19999",
"invoke rtStreamSynchronize failed when GraphExecutionContext %s, ret = %d", __FUNCTION__, rt_ret);
return RT_FAILED;


+ 65
- 52
ge/hybrid/executor/hybrid_model_async_executor.cc View File

@@ -51,8 +51,13 @@ void HybridModelAsyncExecutor::SetModelName(const string &model_name) {
}

Status HybridModelAsyncExecutor::EnqueueData(const shared_ptr<InputDataWrapper> &data) {
GE_CHK_STATUS_EXEC(data_inputer_->Push(data), return domi::DATA_QUEUE_ISFULL,
"[Push][Data] Data queue is full, please call again later, model_id %u ", model_id_);
if(data_inputer_->Push(data) != SUCCESS){
REPORT_CALL_ERROR("E19999", "Data queue is full, please call again later when %s, model_id %u.",
__FUNCTION__, model_id_);
GELOGE(domi::DATA_QUEUE_ISFULL,
"[Push][Data] Data queue is full, please call again later, model_id %u ", model_id_);
return domi::DATA_QUEUE_ISFULL;
}
GELOGD("EnqueueData successfully. model_id = %u, data_index = %u", data->GetInput().model_id, data->GetInput().index);
return SUCCESS;
}
@@ -60,9 +65,12 @@ Status HybridModelAsyncExecutor::EnqueueData(const shared_ptr<InputDataWrapper>
Status HybridModelAsyncExecutor::Start(const std::shared_ptr<ModelListener> &listener) {
GELOGD("HybridModelExecutor::Start IN, has listener = %d", listener != nullptr);
std::lock_guard<std::mutex> lk(mu_);
GE_CHK_BOOL_RET_STATUS(!run_flag_, INTERNAL_ERROR,
"[Check][RunState] Model already started when HybridModelAsyncExecutor %s.", __FUNCTION__);

if(run_flag_){
REPORT_INNER_ERROR("E19999", "Model already started when HybridModelAsyncExecutor %s, model_id:%u.",
__FUNCTION__, model_id_);
GELOGE(INTERNAL_ERROR, "[Check][RunState] Model already started, model_id:%u.", model_id_);
return INTERNAL_ERROR;
}
run_flag_ = true;
listener_ = listener;
future_ = std::async(std::launch::async, [&]() -> Status {
@@ -73,7 +81,7 @@ Status HybridModelAsyncExecutor::Start(const std::shared_ptr<ModelListener> &lis
});

GE_CHK_BOOL_RET_STATUS(future_.valid(), INTERNAL_ERROR,
"[Check][RunState] Failed to start when HybridModelAsyncExecutor %s.", __FUNCTION__);
"[Check][RunState] Failed to start, model_id:%u.", model_id_);
GELOGD("HybridModelExecutor::Start successfully");
return SUCCESS;
}
@@ -108,8 +116,8 @@ Status HybridModelAsyncExecutor::Init() {
executor_ = std::unique_ptr<HybridModelExecutor>(new(std::nothrow) HybridModelExecutor(model_, device_id_, stream_));
GE_CHECK_NOTNULL(executor_);
GE_CHK_STATUS_RET(executor_->Init(),
"[Init][HybridModelExecutor] failed when HybridModelAsyncExecutor %s.", __FUNCTION__);
GE_CHK_STATUS_RET(DumpOpDebug(), "[Dump][OpDebug] failed when HybridModelAsyncExecutor %s.", __FUNCTION__);
"[Init][HybridModelExecutor] failed, model_id:%u.", model_id_);
GE_CHK_STATUS_RET(DumpOpDebug(), "[Dump][OpDebug] failed, model_id:%u.", model_id_);

GELOGI("HybridModel stage nums:%zu", model_->GetRootGraphItem()->NumGroups());
if (model_->GetRootGraphItem()->NumGroups() >= kMinimumPiplineStages) {
@@ -117,19 +125,19 @@ Status HybridModelAsyncExecutor::Init() {
std::unique_ptr<HybridModelPipelineExecutor>(new(std::nothrow) HybridModelPipelineExecutor(model_, device_id_));
GE_CHECK_NOTNULL(pipe_executor_);
GE_CHK_STATUS_RET(pipe_executor_->Init(),
"[Init][HybridModelPipelineExecutor] failed when HybridModelAsyncExecutor %s.", __FUNCTION__);
"[Init][HybridModelPipelineExecutor] failed, model_id:%u.", model_id_);
}

GE_CHK_STATUS_RET(InitInputDesc(), "[Init][InputDesc] failed when HybridModelAsyncExecutor %s.", __FUNCTION__);
GE_CHK_STATUS_RET(InitInputDesc(), "[Init][InputDesc] failed, model_id:%u.", model_id_);

return SUCCESS;
}

Status HybridModelAsyncExecutor::PreRun(InputData &current_data, HybridModelExecutor::ExecuteArgs &args) {
GE_CHK_STATUS_RET(SyncVarData(), "[Invoke][SyncVarData] failed when HybridModelAsyncExecutor %s.", __FUNCTION__);
GE_CHK_STATUS_RET(SyncVarData(), "[Invoke][SyncVarData] failed, model_id:%u.", model_id_);
RECORD_MODEL_EXECUTION_EVENT(executor_->GetContext(), "[SyncVarData] End");
GE_CHK_STATUS_RET(PrepareInputs(current_data, args),
"[Invoke][PrepareInputs] failed to copy input data to model when HybridModelAsyncExecutor %s.", __FUNCTION__);
"[Invoke][PrepareInputs] failed to copy input data to model, model_id:%u.", model_id_);
RECORD_MODEL_EXECUTION_EVENT(executor_->GetContext(), "[CopyInputData] End");
return SUCCESS;
}
@@ -160,7 +168,7 @@ Status HybridModelAsyncExecutor::RunInternal() {
GE_CHK_BOOL_TRUE_EXEC_WITH_LOG(
ret != SUCCESS, (void) HandleResult(ret, current_data.index, args, data_wrapper->GetOutput());
CsaInteract::GetInstance().StoreInternalErrorCode(ret, ERROR_MODULE_FMK, JOBSUBSTATE_GRAPH_EXEC);
continue, "[Invoke][PreRun] failed when HybridModelAsyncExecutor %s.", __FUNCTION__); // [No need to check value]
continue, "[Invoke][PreRun] failed, model_id:%u.", model_id_); // [No need to check value]

if (pipe_executor_ != nullptr) {
GELOGI("HybridModel will execute in pipeline mode");
@@ -204,9 +212,7 @@ Status HybridModelAsyncExecutor::HandleResult(Status exec_ret,
}

if (exec_ret != SUCCESS) {
GELOGE(exec_ret,
"[Check][Param:Status] failed to execute graph when HybridModelAsyncExecutor %s. model_id = %u",
__FUNCTION__, model_id_);
GELOGE(exec_ret, "[Check][Param:Status] failed to execute graph. model_id = %u", model_id_);
REPORT_INNER_ERROR("E19999",
"failed to execute graph when HybridModelAsyncExecutor %s. model_id = %u", __FUNCTION__, model_id_);
return OnComputeDone(data_id, INTERNAL_ERROR, output_tensor_info_list);
@@ -245,11 +251,11 @@ Status HybridModelAsyncExecutor::SyncVarData() {
Status HybridModelAsyncExecutor::PrepareInputs(const InputData &current_data, HybridModelExecutor::ExecuteArgs &args) {
if (current_data.blobs.size() < input_tensor_desc_.size()) {
GELOGE(PARAM_INVALID,
"[Check][Size]Blob size mismatches, expect at least %zu, but got %zu when HybridModelAsyncExecutor %s.",
input_tensor_desc_.size(), current_data.blobs.size(), __FUNCTION__);
"[Check][Size]Blob size mismatches, expect at least %zu, but got %zu, model_id = %u",
input_tensor_desc_.size(), current_data.blobs.size(), model_id_);
REPORT_INNER_ERROR("E19999",
"Blob size mismatches, expect at least %zu, but got %zu when HybridModelAsyncExecutor %s.",
input_tensor_desc_.size(), current_data.blobs.size(), __FUNCTION__);
"Blob size mismatches, expect at least %zu, but got %zu when HybridModelAsyncExecutor %s, model_id = %u.",
input_tensor_desc_.size(), current_data.blobs.size(), __FUNCTION__, model_id_);
return PARAM_INVALID;
}

@@ -262,11 +268,11 @@ Status HybridModelAsyncExecutor::PrepareInputs(const InputData &current_data, Hy
if (is_input_dynamic_[input_index]) {
if (input_index >= current_data.shapes.size()) {
GELOGE(PARAM_INVALID,
"[Check][Range]Shape index out of range, index = %zu, shape size = %zu when HybridModelAsyncExecutor %s.",
input_index, current_data.shapes.size(), __FUNCTION__);
"[Check][Range]Shape index out of range, index = %zu, shape size = %zu model_id = %u.",
input_index, current_data.shapes.size(), model_id_);
REPORT_INNER_ERROR("E19999",
"Shape index out of range, index = %zu, shape size = %zu when HybridModelAsyncExecutor %s.",
input_index, current_data.shapes.size(), __FUNCTION__);
"Shape index out of range, index = %zu, shape size = %zu when HybridModelAsyncExecutor %s, model_id = %u.",
input_index, current_data.shapes.size(), __FUNCTION__, model_id_);
return PARAM_INVALID;
}
auto &tensor_desc = input_tensor_desc_[input_index];
@@ -274,7 +280,7 @@ Status HybridModelAsyncExecutor::PrepareInputs(const InputData &current_data, Hy
std::vector<std::pair<int64_t, int64_t>> range;
auto range_ret = tensor_desc->GetShapeRange(range);
GE_CHK_BOOL_RET_STATUS(range_ret == GRAPH_SUCCESS, INTERNAL_ERROR,
"[Invoke][GetShapeRange] failed, ret=%u.", range_ret);
"[Invoke][GetShapeRange] failed, ret=%u, model_id = %u.", range_ret, model_id_);
for (size_t k = 0; k < range.size(); ++k) {
if (k >= shape.GetDimNum()) {
break;
@@ -282,11 +288,11 @@ Status HybridModelAsyncExecutor::PrepareInputs(const InputData &current_data, Hy
// range[k].second can be -1
if (shape.GetDim(k) < range[k].first || (range[k].second >= 0 && shape.GetDim(k) > range[k].second)) {
GELOGE(PARAM_INVALID,
"[Check][Range]Dim out of range, shape idx = %zu, dim idx = %zu, dim = %ld, range = [%ld, %ld]",
input_index, k, shape.GetDim(k), range[k].first, range[k].second);
"[Check][Range]Dim out of range, shape idx = %zu, dim idx = %zu, dim = %ld, range = [%ld, %ld], model_id = %u.",
input_index, k, shape.GetDim(k), range[k].first, range[k].second, model_id_);
REPORT_INNER_ERROR("E19999",
"Dim out of range, shape idx = %zu, dim idx = %zu, dim = %ld, range = [%ld, %ld]",
input_index, k, shape.GetDim(k), range[k].first, range[k].second);
"Dim out of range, shape idx = %zu, dim idx = %zu, dim = %ld, range = [%ld, %ld], model_id = %u.",
input_index, k, shape.GetDim(k), range[k].first, range[k].second, model_id_);
return PARAM_INVALID;
}
}
@@ -294,8 +300,8 @@ Status HybridModelAsyncExecutor::PrepareInputs(const InputData &current_data, Hy
args.input_desc[input_index] = tensor_desc;
GELOGD("Update shape of input[%zu] to [%s]", input_index, tensor_desc->MutableShape().ToString().c_str());
GE_CHK_GRAPH_STATUS_RET(TensorUtils::GetTensorMemorySizeInBytes(*tensor_desc, tensor_size),
"[Invoke][GetTensorMemorySizeInBytes]Failed to calc tensor size, index = %zu, shape = [%s]",
input_index, tensor_desc->GetShape().ToString().c_str());
"[Invoke][GetTensorMemorySizeInBytes]Failed to calc tensor size, index = %zu, shape = [%s], model_id = %u.",
input_index, tensor_desc->GetShape().ToString().c_str(), model_id_);
GELOGD("Input tensor[%zu] size = %zu", input_index, tensor_size);
}

@@ -311,12 +317,16 @@ Status HybridModelAsyncExecutor::PrepareInputs(const InputData &current_data, Hy
GELOGD("To copy input data for input[%zu]", input_index);
const DataBuffer &data_buf = blobs[input_index];
auto mem_size = static_cast<uint64_t>(tensor_size);
GE_CHK_BOOL_RET_STATUS(mem_size >= data_buf.length,
PARAM_INVALID,
"[Check][Size]input data size(%lu) does not match model required size(%lu), ret failed.",
data_buf.length,
mem_size);

if(mem_size < data_buf.length){
REPORT_INNER_ERROR("E19999",
"input data size(%lu) does not match model required size(%lu) when %s, ret failed, model_id = %u.",
data_buf.length, mem_size, __FUNCTION__, model_id_);
GELOGE(PARAM_INVALID,
"[Check][Size]input data size(%lu) does not match model required size(%lu), ret failed, model_id = %u.",
data_buf.length, mem_size, model_id_);
return PARAM_INVALID;
}
if (data_buf.length > 0) {
GELOGI("[IMAS]CopyPlainData memcpy graph_%u type[F] output[%zu] memaddr[%p] mem_size[%zu] datasize[%lu]",
model_->root_runtime_param_.graph_id,
@@ -371,7 +381,7 @@ Status HybridModelAsyncExecutor::OnComputeDone(uint32_t data_index, uint32_t res
GELOGD("OnComputeDone. model id = %u, data index = %u, execution ret = %u", model_id_, data_index, result_code);
if (listener_ != nullptr) {
GE_CHK_STATUS(listener_->OnComputeDone(model_id_, data_index, result_code, outputs),
"[Invoke][OnComputeDone] failed.");
"[Invoke][OnComputeDone] failed, model_id = %u.", model_id_);
}

return result_code;
@@ -385,12 +395,11 @@ Status HybridModelAsyncExecutor::CopyOutputs(HybridModelExecutor::ExecuteArgs &a
std::vector<TensorValue> &output_tensors = args.outputs;
if (output_tensor_desc_list.size() != output_tensors.size()) {
GELOGE(INTERNAL_ERROR,
"[Check][Size]Output sizes mismatch. From op_desc = %zu, and from output tensors = %zu "
"when HybridModelAsyncExecutor %s.",
output_tensor_desc_list.size(), output_tensors.size(), __FUNCTION__);
REPORT_INNER_ERROR("E19999", "Output sizes mismatch. From op_desc = %zu, and from output tensors = %zu "
"when HybridModelAsyncExecutor %s.",
output_tensor_desc_list.size(), output_tensors.size(), __FUNCTION__);
"[Check][Size]Output sizes mismatch. From op_desc = %zu, and from output tensors = %zu, model_id = %u.",
output_tensor_desc_list.size(), output_tensors.size(), model_id_);
REPORT_INNER_ERROR("E19999", "Output sizes mismatch. From op_desc = %zu, and from output tensors = %zu, "
"when HybridModelAsyncExecutor %s, model_id = %u.",
output_tensor_desc_list.size(), output_tensors.size(), __FUNCTION__, model_id_);
return INTERNAL_ERROR;
}

@@ -422,10 +431,12 @@ Status HybridModelAsyncExecutor::CopyOutputs(HybridModelExecutor::ExecuteArgs &a
GE_CHECK_LE(output_size, UINT32_MAX);
if (output_tensor.GetSize() < static_cast<size_t>(output_size)) {
GELOGE(INTERNAL_ERROR,
"[Check][Size]output[%zu] tensor size(%zu) is not enough for output shape [%s]",
i, output_tensor.GetSize(), tensor_desc->GetShape().ToString().c_str());
REPORT_INNER_ERROR("E19999", "output[%zu] tensor size(%zu) is not enough for output shape [%s]",
i, output_tensor.GetSize(), tensor_desc->GetShape().ToString().c_str());
"[Check][Size]output[%zu] tensor size(%zu) is not enough for output shape [%s], model_id = %u.",
i, output_tensor.GetSize(), tensor_desc->GetShape().ToString().c_str(), model_id_);
REPORT_INNER_ERROR("E19999",
"output[%zu] tensor size(%zu) is not enough for output shape [%s] model_id = %u,"
" when HybridModelAsyncExecutor %s.",
i, output_tensor.GetSize(), tensor_desc->GetShape().ToString().c_str(), model_id_, __FUNCTION__);
return INTERNAL_ERROR;
}

@@ -481,7 +492,7 @@ Status HybridModelAsyncExecutor::Execute(const std::vector<DataBuffer> &inputs,
args.input_desc.emplace_back(tensor_desc_ptr);
}

GE_CHK_STATUS_RET(executor_->Execute(args), "[Invoke][Execute] Failed when HybridModelAsyncExecutor %s.", __FUNCTION__);
GE_CHK_STATUS_RET(executor_->Execute(args), "[Invoke][Execute] Failed, model_id = %u.", model_id_);
for (const auto &output_tensor_desc : args.output_desc) {
output_desc.emplace_back(*output_tensor_desc);
}
@@ -502,14 +513,15 @@ Status HybridModelAsyncExecutor::Execute(const vector<GeTensor> &inputs, vector<
}

HybridModelExecutor::ExecuteArgs args;
GE_CHK_STATUS_RET(PrepareInputs(input_data, args), "[Invoke][PrepareInputs]Failed to copy input data to model");
GE_CHK_STATUS_RET(PrepareInputs(input_data, args),
"[Invoke][PrepareInputs]Failed to copy input data to model, model_id = %u", model_id_);
GELOGD("Done copying input data successfully.");
GE_CHK_STATUS_RET(executor_->Execute(args), "[Invoke][Execute] Failed.");
GE_CHK_STATUS_RET(executor_->Execute(args), "[Invoke][Execute] Failed, model_id = %u.", model_id_);

std::vector<ge::OutputTensorInfo> output_tensor_info_list;
OutputData output_data;
GE_CHK_STATUS_RET(CopyOutputs(args, &output_data, output_tensor_info_list),
"[Invoke][CopyOutputs]Failed to copy outputs.");
"[Invoke][CopyOutputs]Failed to copy outputs, model_id = %u.", model_id_);
GELOGD("Done copying output data successfully. output count = %zu", output_tensor_info_list.size());

int out_index = 0;
@@ -560,7 +572,8 @@ Status HybridModelAsyncExecutor::DumpOpDebug() {
loop_cond = const_cast<void *>(varible_loop_cond->GetData());
}
data_dumper_.SetLoopAddr(global_step, loop_per_iter, loop_cond);
GE_CHK_STATUS_RET(data_dumper_.LoadDumpInfo(), "[Invoke][LoadDumpInfo] failed in hybrid engine");
GE_CHK_STATUS_RET(data_dumper_.LoadDumpInfo(),
"[Invoke][LoadDumpInfo] failed in hybrid engine, model_id = %u.", model_id_);
GELOGD("Dump op debug SUCCESS in hybrid engine");
}
return SUCCESS;


+ 1
- 1
ge/hybrid/executor/hybrid_model_executor.cc View File

@@ -72,7 +72,7 @@ Status HybridModelExecutor::Execute(HybridModelExecutor::ExecuteArgs &args) {
if (ret == END_OF_SEQUENCE) {
args.is_eos = true;
} else {
GE_CHK_STATUS_RET(ret, "[Invoke][ExecuteGraphInternal]Failed when HybridModelExecutor %s.", __FUNCTION__);
GE_CHK_STATUS_RET(ret, "[Invoke][ExecuteGraphInternal] Failed, ret:%d.", ret);
}
return SUCCESS;
}


+ 9
- 11
ge/hybrid/executor/hybrid_model_pipeline_executor.cc View File

@@ -59,8 +59,8 @@ Status StageExecutor::Start(const std::vector<TensorValue> &inputs, const std::v
task_queue_.Pop(task_info);
GELOGD("[Executor: %d] Got task, stage = %d, iteration = %ld", id_, task_info.stage, task_info.iteration);
if (task_info.iteration >= pipe_config_->iteration_end) {
GELOGE(INTERNAL_ERROR, "[Check][Range][Executor: %d] Unexpected iteration: %d when StageExecutor %s.",
id_, task_info.iteration, __FUNCTION__);
GELOGE(INTERNAL_ERROR, "[Check][Range][Executor: %d] Unexpected iteration: %d.",
id_, task_info.iteration);
REPORT_INNER_ERROR("E19999", "[Executor: %d] Unexpected iteration: %d when StageExecutor %s.",
id_, task_info.iteration, __FUNCTION__);
return INTERNAL_ERROR;
@@ -97,10 +97,10 @@ Status StageExecutor::Start(const std::vector<TensorValue> &inputs, const std::v
auto sync_result = Synchronize();
if (sync_result != SUCCESS) {
GELOGE(sync_result,
"[Invoke][Synchronize][Executor: %d] Failed to sync result when StageExecutor %s. iteration = %d",
id_, __FUNCTION__, task_info.iteration);
REPORT_CALL_ERROR("E19999", "[Executor: %d] Failed to sync result when StageExecutor %s. iteration = %d",
id_, __FUNCTION__, task_info.iteration);
"[Invoke][Synchronize][Executor: %d] Failed to sync result:%d. iteration = %d",
id_, sync_result, task_info.iteration);
REPORT_CALL_ERROR("E19999", "[Executor: %d] Failed to sync result:%d when StageExecutor %s. iteration = %d",
id_, sync_result, __FUNCTION__, task_info.iteration);
context_.profiler->Dump(std::cout);
context_.callback_manager->Destroy();
RuntimeInferenceContext::DestroyContext(std::to_string(context_.context_id));
@@ -249,8 +249,7 @@ Status HybridModelPipelineExecutor::Execute(HybridModelExecutor::ExecuteArgs &ar
GELOGD("Start to sync result of executor[%zu]", i);
auto ret = futures[i].get();
if (ret != SUCCESS) {
GELOGE(ret, "[Check][Result][Executor: %zu] Failed to schedule tasks when HybridModelPipelineExecutor %s.",
i, __FUNCTION__);
GELOGE(ret, "[Check][Result][Executor: %zu] Failed to schedule tasks.", i);
REPORT_INNER_ERROR("E19999", "[Executor: %zu] Failed to schedule tasks when HybridModelPipelineExecutor %s.",
i, __FUNCTION__);
has_error = true;
@@ -260,8 +259,7 @@ Status HybridModelPipelineExecutor::Execute(HybridModelExecutor::ExecuteArgs &ar
ret = stage_executors_[i]->Synchronize();

if (ret != SUCCESS) {
GELOGE(ret, "[Invoke][Synchronize] failed for [Executor: %zu] when HybridModelPipelineExecutor %s.",
i, __FUNCTION__);
GELOGE(ret, "[Invoke][Synchronize] failed for [Executor: %zu].", i);
REPORT_CALL_ERROR("E19999", "[Executor: %zu] failed to Synchronize result when HybridModelPipelineExecutor %s.",
i, __FUNCTION__);
has_error = true;
@@ -279,7 +277,7 @@ Status HybridModelPipelineExecutor::Execute(HybridModelExecutor::ExecuteArgs &ar
iteration_ = config_.iteration_end;

if (has_error) {
GELOGE(FAILED, "[Check][Error]Error occurred while execution when HybridModelPipelineExecutor %s.", __FUNCTION__);
GELOGE(FAILED, "[Check][Error]Error occurred while execution.");
REPORT_INNER_ERROR("E19999", "Error occurred while execution when HybridModelPipelineExecutor %s.", __FUNCTION__);
return FAILED;
}


+ 2
- 2
ge/hybrid/executor/hybrid_profiler.cc View File

@@ -40,8 +40,8 @@ void HybridProfiler::RecordEvent(EventType event_type, const char *fmt, ...) {

char buf[kEventDescMax];
if (vsnprintf_s(buf, kEventDescMax, kEventDescMax - 1, fmt, args) == -1) {
GELOGE(FAILED, "[Parse][Param:fmt]Format %s failed when HybridProfiler %s.", fmt, __FUNCTION__);
REPORT_INNER_ERROR("E19999", "Parse Format %s failed when HybridProfiler %s.", fmt, __FUNCTION__);
GELOGE(FAILED, "[Parse][Param:fmt]Format %s failed.", fmt);
REPORT_CALL_ERROR("E19999", "Parse Format %s failed when HybridProfiler %s.", fmt, __FUNCTION__);
va_end(args);
return;
}


+ 2
- 2
ge/hybrid/executor/node_done_manager.cc View File

@@ -28,8 +28,8 @@ bool NodeDoneManager::Cond::Await() {
if (!cv_.wait_for(lk,
std::chrono::seconds(kDefaultWaitTimeoutInSec),
[&]() { return is_released_ || is_cancelled_; })) {
GELOGE(INTERNAL_ERROR, "[Invoke][wait_for]Wait timed out when %s.", __FUNCTION__);
REPORT_INNER_ERROR("E19999", "wait timed out when %s.", __FUNCTION__);
GELOGE(INTERNAL_ERROR, "[Invoke][wait_for]Wait timed out.");
REPORT_INNER_ERROR("E19999", "wait timed out[%d] when %s.", kDefaultWaitTimeoutInSec, __FUNCTION__);
return false;
}



+ 9
- 9
ge/hybrid/executor/node_state.cc View File

@@ -67,8 +67,8 @@ Status ShapeInferenceState::UpdateInputShape(int idx, const GeTensorDesc &target
Format format = input_desc.GetFormat();
DataType data_type = input_desc.GetDataType();
if (TensorUtils::CalcTensorMemSize(shape, format, data_type, tensor_size) != GRAPH_SUCCESS) {
GELOGE(FAILED, "[Invoke][CalcTensorMemSize] failed for [%s] when ShapeInferenceState %s.",
node_item.NodeName().c_str(), __FUNCTION__);
GELOGE(FAILED, "[Invoke][CalcTensorMemSize] failed for [%s].",
node_item.NodeName().c_str());
REPORT_CALL_ERROR("E19999", "CalcTensorMemSize failed for [%s] when ShapeInferenceState %s.",
node_item.NodeName().c_str(), __FUNCTION__);
return FAILED;
@@ -124,8 +124,8 @@ Status ShapeInferenceState::AwaitShapesReady(const GraphExecutionContext &contex
}

if (context.GetStatus() != SUCCESS) {
GELOGE(FAILED, "[Check][Status][%s] Await pending shape cancelled when %s.",
node_item.NodeName().c_str(), __FUNCTION__);
GELOGE(FAILED, "[Check][Status][%s] Await pending shape cancelled.",
node_item.NodeName().c_str());
REPORT_CALL_ERROR("E19999", "[%s] Await pending shape cancelled when %s.",
node_item.NodeName().c_str(), __FUNCTION__);
break;
@@ -133,10 +133,10 @@ Status ShapeInferenceState::AwaitShapesReady(const GraphExecutionContext &contex
}

if (!wait_success) {
GELOGE(FAILED, "[Check][Status][%s] Wait for shape timeout when %s.",
node_item.NodeName().c_str(), __FUNCTION__);
REPORT_CALL_ERROR("E19999", "[%s] Wait for shape timeout when %s.",
node_item.NodeName().c_str(), __FUNCTION__);
GELOGE(FAILED, "[Check][Status][%s] Wait for shape timeout:%d.",
node_item.NodeName().c_str(), kWaitInternal);
REPORT_CALL_ERROR("E19999", "[%s] Wait for shape timeout:%d when %s.",
node_item.NodeName().c_str(), kWaitInternal, __FUNCTION__);
return FAILED;
}
}
@@ -241,7 +241,7 @@ Status NodeState::WaitForPrepareDone() {
if (prepare_future_.valid()) {
GELOGD("[%s] Start to wait for prepare future.", GetName().c_str());
GE_CHK_STATUS_RET(prepare_future_.get(),
"[Check][Status][%s] PreRun failed when NodeState %s.", GetName().c_str(), __FUNCTION__);
"[Check][Status][%s] PreRun failed.", GetName().c_str());
}

return SUCCESS;


+ 20
- 20
ge/hybrid/executor/worker/execution_engine.cc View File

@@ -102,9 +102,9 @@ Status NodeDoneCallback::PrepareConstInputs(const NodeItem &node_item) {

if (output_tensor->GetSize() < static_cast<size_t>(tensor_size)) {
GELOGE(INTERNAL_ERROR,
"[Check][Size][%s] Tensor size is not enough. output index = %d, required size = %ld, tensor = %s when %s.",
"[Check][Size][%s] Tensor size is not enough. output index = %d, required size = %ld, tensor = %s.",
node_item.NodeName().c_str(), output_idx, tensor_size,
output_tensor->DebugString().c_str(), __FUNCTION__);
output_tensor->DebugString().c_str());
REPORT_INNER_ERROR("E19999",
"[%s] Tensor size is not enough. output index = %d, required size = %ld, tensor = %s when %s.",
node_item.NodeName().c_str(), output_idx, tensor_size,
@@ -175,7 +175,7 @@ Status NodeDoneCallback::GetTaskDescInfo(const NodePtr node, const HybridModel *
Status NodeDoneCallback::ProfilingReport() {
auto node = context_->GetNodeItem().node;
if (node == nullptr) {
GELOGE(PARAM_INVALID, "[Get][Node] value is nullptr when %s.", __FUNCTION__);
GELOGE(PARAM_INVALID, "[Get][Node] value is nullptr.");
REPORT_INNER_ERROR("E19999", "Get node failed, when %s.", __FUNCTION__);
return PARAM_INVALID;
}
@@ -193,7 +193,7 @@ Status NodeDoneCallback::ProfilingReport() {
std::vector<TaskDescInfo> task_desc_info;
auto profiling_ret = GetTaskDescInfo(node, model, task_desc_info);
if (profiling_ret != RT_ERROR_NONE) {
GELOGE(profiling_ret, "[Get][TaskDescInfo] of node:%s failed, when %s.", node->GetName().c_str(), __FUNCTION__);
GELOGE(profiling_ret, "[Get][TaskDescInfo] of node:%s failed.", node->GetName().c_str());
REPORT_CALL_ERROR("E19999", "GetTaskDescInfo of node:%s failed, when %s.", node->GetName().c_str(), __FUNCTION__);
return profiling_ret;
}
@@ -206,7 +206,7 @@ Status NodeDoneCallback::ProfilingReport() {
Status NodeDoneCallback::DumpDynamicNode() {
auto node = context_->GetNodeItem().node;
if (node == nullptr) {
GELOGE(PARAM_INVALID, "[Get][Node] value is nullptr when %s.", __FUNCTION__);
GELOGE(PARAM_INVALID, "[Get][Node] value is nullptr.");
REPORT_INNER_ERROR("E19999", "get node is nullptr when %s.", __FUNCTION__);
return PARAM_INVALID;
}
@@ -250,12 +250,12 @@ Status NodeDoneCallback::DumpDynamicNode() {
void *global_step = context_->GetExecutionContext()->global_step;
dump_op_.SetLoopAddr(global_step, loop_per_iter, loop_cond);

GE_CHK_STATUS_RET(dump_op_.LaunchDumpOp(), "[Launch][DumpOp] failed in hybird model when %s.", __FUNCTION__);
GE_CHK_STATUS_RET(dump_op_.LaunchDumpOp(), "[Launch][DumpOp] failed in hybird model.");

auto rt_ret = rtStreamSynchronize(stream);
if (rt_ret != RT_ERROR_NONE) {
GELOGE(rt_ret, "[Call][rtStreamSynchronize] failed when %s.", __FUNCTION__);
REPORT_CALL_ERROR("E19999", "call rtStreamSynchronize failed when %s.", __FUNCTION__);
GELOGE(rt_ret, "[Call][rtStreamSynchronize] failed, ret = %d.", rt_ret);
REPORT_CALL_ERROR("E19999", "call rtStreamSynchronize failed when %s, ret = %d.", __FUNCTION__, rt_ret);
return rt_ret;
}
return SUCCESS;
@@ -270,12 +270,12 @@ Status NodeDoneCallback::OnNodeDone() {
const DumpProperties &dump_properties = context_->GetDumpProperties();
if (dump_properties.IsDumpOpen() || context_->IsOverFlow()) {
GELOGI("Start to dump dynamic shape op");
GE_CHK_STATUS_RET(DumpDynamicNode(), "[Call][DumpDynamicNode] Failed when %s.", __FUNCTION__);
GE_CHK_STATUS_RET(DumpDynamicNode(), "[Call][DumpDynamicNode] Failed.");
}

if (ProfilingManager::Instance().ProfilingModelExecuteOn()) {
GE_CHK_STATUS_RET(ProfilingReport(), "[Report][Profiling] of node[%s] failed when %s.",
node_item.NodeName().c_str(), __FUNCTION__);
GE_CHK_STATUS_RET(ProfilingReport(), "[Report][Profiling] of node[%s] failed.",
node_item.NodeName().c_str());
}

// release workspace
@@ -298,7 +298,7 @@ Status NodeDoneCallback::OnNodeDone() {
}

GE_CHK_STATUS_RET(context_->PropagateOutputs(),
"[Propagate][Outputs] of [%s] failed when %s.", node_item.NodeName().c_str(), __FUNCTION__);
"[Propagate][Outputs] of [%s] failed.", node_item.NodeName().c_str());

RECORD_CALLBACK_EVENT(graph_context_, context_->GetNodeName(), "[PropagateOutputs] End");
}
@@ -338,8 +338,8 @@ Status ExecutionEngine::DoExecuteAsync(NodeState &node_state,
const std::function<void()> &callback) {
const auto &task = node_state.GetKernelTask();
if (task == nullptr) {
GELOGE(INTERNAL_ERROR, "[Get][KernelTask] of [%s] is null when %s.", node_state.GetName().c_str(), __FUNCTION__);
REPORT_CALL_ERROR("E19999", "GetKernelTask of %s is null when %s.", node_state.GetName().c_str(), __FUNCTION__);
GELOGE(INTERNAL_ERROR, "[Get][KernelTask] of [%s] is null.", node_state.GetName().c_str());
REPORT_INNER_ERROR("E19999", "GetKernelTask of %s is null when %s.", node_state.GetName().c_str(), __FUNCTION__);
return INTERNAL_ERROR;
}

@@ -354,7 +354,7 @@ Status ExecutionEngine::DoExecuteAsync(NodeState &node_state,
GE_CHECK_NOTNULL(executor);
RECORD_EXECUTION_EVENT(&context, task_context.GetNodeName(), "[PrepareTask] Start");
GE_CHK_STATUS_RET(executor->PrepareTask(*task, task_context),
"[Prepare][Task] for [%s] failed when %s", node_state.GetName().c_str(), __FUNCTION__);
"[Prepare][Task] for [%s] failed.", node_state.GetName().c_str());
RECORD_EXECUTION_EVENT(&context, task_context.GetNodeName(), "[PrepareTask] End");
GELOGD("[%s] Done task preparation successfully.", node_state.GetName().c_str());

@@ -365,8 +365,8 @@ Status ExecutionEngine::DoExecuteAsync(NodeState &node_state,
}
}

GE_CHK_STATUS_RET(ValidateInputTensors(node_state, task_context), "[Validate][InputTensors] for %s failed when %s.",
node_state.GetName().c_str(), __FUNCTION__);
GE_CHK_STATUS_RET(ValidateInputTensors(node_state, task_context), "[Validate][InputTensors] for %s failed.",
node_state.GetName().c_str());
RECORD_EXECUTION_EVENT(&context, task_context.GetNodeName(), "[ValidateInputTensors] End");

if (context.profiling_level > 0) {
@@ -420,8 +420,8 @@ Status ExecutionEngine::ValidateInputTensors(const NodeState &node_state, const
input_tensor->GetSize());
} else {
GELOGE(INTERNAL_ERROR,
"[Check][Size] for [%s] Input[%d]: tensor size mismatches. expected: %ld, but given %zu when %s.",
task_context.GetNodeName(), i, expected_size, input_tensor->GetSize(), __FUNCTION__);
"[Check][Size] for [%s] Input[%d]: tensor size mismatches. expected: %ld, but given %zu.",
task_context.GetNodeName(), i, expected_size, input_tensor->GetSize());
REPORT_INNER_ERROR("E19999", "[%s] Input[%d]: tensor size mismatches. expected: %ld, but given %zu when %s.",
task_context.GetNodeName(), i, expected_size, input_tensor->GetSize(), __FUNCTION__);
return INTERNAL_ERROR;
@@ -437,7 +437,7 @@ Status ExecutionEngine::PropagateOutputs(const NodeItem &node_item,
GraphExecutionContext &context) {
if (node_item.shape_inference_type != DEPEND_COMPUTE) {
GE_CHK_STATUS_RET(task_context.PropagateOutputs(),
"[Propagate][Outputs] for [%s] failed when ExecutionEngine %s.", node_item.NodeName().c_str(), __FUNCTION__);
"[Propagate][Outputs] for [%s] failed.", node_item.NodeName().c_str());
RECORD_EXECUTION_EVENT(&context, task_context.GetNodeName(), "[PropagateOutputs] End");
GELOGD("[%s] Done propagating outputs successfully.", node_item.NodeName().c_str());
}


+ 26
- 18
ge/hybrid/executor/worker/shape_inference_engine.cc View File

@@ -70,7 +70,7 @@ Status ShapeInferenceEngine::InferShape(NodeState &node_state) {
{
RECORD_SHAPE_INFERENCE_EVENT(execution_context_, node_item.NodeName().c_str(), "[InferShapeAndType] Start");
GE_CHK_STATUS_RET(ShapeRefiner::InferShapeAndTypeForRunning(node_item.node, true),
"[Invoke][InferShapeAndType] for %s failed when %s.", node_item.NodeName().c_str(), __FUNCTION__);
"[Invoke][InferShapeAndType] for %s failed.", node_item.NodeName().c_str());
RECORD_SHAPE_INFERENCE_EVENT(execution_context_, node_item.NodeName().c_str(), "[InferShapeAndType] End");
}

@@ -172,7 +172,7 @@ Status ShapeInferenceEngine::InferShapeForSubgraph(const NodeItem &node_item, co
GE_CHK_STATUS_RET(ShapeRefiner::InferShapeAndType(node));
GELOGD("[%s] Done invoking InferShapeAndType", node->GetName().c_str());
GE_CHK_STATUS_RET(UpdatePeerNodeShape(*node),
"[Update][PeerNodeShape] failed for [%s] when %s.", node->GetName().c_str(), __FUNCTION__);
"[Update][PeerNodeShape] failed for [%s].", node->GetName().c_str());
}

for (auto &it : fused_subgraph.output_mapping) {
@@ -204,8 +204,7 @@ Status ShapeInferenceEngine::UpdatePeerNodeShape(const Node &node) {
GE_CHECK_NOTNULL(peer_op_desc);
auto peer_input_desc = peer_op_desc->MutableInputDesc(peer_anchor->GetIdx());
if (peer_input_desc == nullptr) {
GELOGE(GRAPH_FAILED, "[Call][MutableInputDesc] for %s return nullptr when ShapeInferenceEngine %s.",
peer_op_desc->GetName().c_str(), __FUNCTION__);
GELOGE(GRAPH_FAILED, "[Call][MutableInputDesc] for %s return nullptr.", peer_op_desc->GetName().c_str());
REPORT_CALL_ERROR("E19999", "%s call MutableInputDesc return nullptr when ShapeInferenceEngine %s.",
peer_op_desc->GetName().c_str(), __FUNCTION__);
continue;
@@ -233,8 +232,8 @@ Status ShapeInferenceEngine::CanonicalizeShape(GeTensorDesc &tensor_desc,
if (tensor_shape.IsUnknownShape()) {
if (!fallback_with_range) {
GELOGE(INTERNAL_ERROR,
"[Is][UnknownShape] Output shape is still unknown after shape inference. "
"shape = [%s] when ShapeInferenceEngine %s.", tensor_shape.ToString().c_str(), __FUNCTION__);
"[Is][UnknownShape] Output shape is still unknown after shape inference. shape = [%s].",
tensor_shape.ToString().c_str());
REPORT_INNER_ERROR("E19999", "Output shape is still unknown after shape inference. "
"shape = [%s] when ShapeInferenceEngine %s.", tensor_shape.ToString().c_str(), __FUNCTION__);
return INTERNAL_ERROR;
@@ -244,8 +243,8 @@ Status ShapeInferenceEngine::CanonicalizeShape(GeTensorDesc &tensor_desc,
std::vector<std::pair<int64_t, int64_t>> shape_range;
GE_CHK_GRAPH_STATUS_RET(tensor_desc.GetShapeRange(shape_range), "Failed to get shape range");
if (shape_range.size() != shape.size()) {
GELOGE(INTERNAL_ERROR, "[Check][Size] Number of shape ranges (%zu) mismatches that of dims (%zu)"
" when ShapeInferenceEngine %s.", shape_range.size(), shape.size(), __FUNCTION__);
GELOGE(INTERNAL_ERROR, "[Check][Size] Number of shape ranges (%zu) mismatches that of dims (%zu).",
shape_range.size(), shape.size());
REPORT_INNER_ERROR("E19999", "Number of shape ranges (%zu) mismatches that of dims (%zu)"
" when ShapeInferenceEngine %s.", shape_range.size(), shape.size(), __FUNCTION__);
return INTERNAL_ERROR;
@@ -271,8 +270,8 @@ Status ShapeInferenceEngine::CalcTensorSize(DataType data_type,
GELOGD("To calc tensor size by shape = [%s]", GeShape(shape).ToString().c_str());
uint32_t type_size;
if (!TypeUtils::GetDataTypeLength(data_type, type_size)) {
GELOGE(INTERNAL_ERROR, "[Get][DataTypeLength] failed for type:%s when ShapeInferenceEngine %s.",
TypeUtils::DataTypeToSerialString(data_type).c_str(), __FUNCTION__);
GELOGE(INTERNAL_ERROR, "[Get][DataTypeLength] failed for type:%s.",
TypeUtils::DataTypeToSerialString(data_type).c_str());
REPORT_CALL_ERROR("E19999", "GetDataTypeLength failed for type:%s when ShapeInferenceEngine %s.",
TypeUtils::DataTypeToSerialString(data_type).c_str(), __FUNCTION__);
return INTERNAL_ERROR;
@@ -287,7 +286,7 @@ Status ShapeInferenceEngine::CalcTensorSize(DataType data_type,
}

GE_CHK_STATUS_RET(CheckInt64AddOverflow(tensor_size, kAlignment - 1),
"[Check][Overflow]Tensor size is too large: %ld, shape = [%s]",
"[Check][Overflow]Tensor size is too large: %ld, shape = [%s] Shape size will overflow when add align.",
tensor_size, GeShape(shape).ToString().c_str());
tensor_size = (tensor_size + kAlignment - 1) / kAlignment * kAlignment;
return SUCCESS;
@@ -301,14 +300,23 @@ Status ShapeInferenceEngine::CalcOutputTensorSizes(const NodeItem &node_item, bo
const auto &shape = tensor_desc->MutableShape();
// modify on copy
auto dims = shape.GetDims();
GE_CHK_STATUS_RET(CanonicalizeShape(*tensor_desc, dims, fallback_with_range),
"[Canonicalize][Shape] failed for [%s], output %zu, when ShapeInferenceEngine %s.",
node_item.NodeName().c_str(), output_index, __FUNCTION__);

auto _status = CanonicalizeShape(*tensor_desc, dims, fallback_with_range);
if(_status != SUCCESS){
REPORT_CALL_ERROR("E19999", "Invoke CanonicalizeShape failed when ShapeInferenceEngine %s, node:%s, output:%zu.",
node_item.NodeName().c_str(), __FUNCTION__, output_index);
GELOGE(ge::FAILED, "[Canonicalize][Shape] failed for [%s], output %zu.",
node_item.NodeName().c_str(), output_index);
return _status;
}
int64_t tensor_size;
GE_CHK_STATUS_RET(CalcTensorSize(tensor_desc->GetDataType(), dims, tensor_size),
"[Calc][TensorSize] failed for [%s], output %zu when ShapeInferenceEngine %s.",
node_item.NodeName().c_str(), output_index, __FUNCTION__);
_status = CalcTensorSize(tensor_desc->GetDataType(), dims, tensor_size);
if(_status != SUCCESS){
REPORT_CALL_ERROR("E19999", "Invoke CalcTensorSize failed when ShapeInferenceEngine %s, node:%s, output:%zu.",
node_item.NodeName().c_str(), __FUNCTION__, output_index);
GELOGE(ge::FAILED, "[Calc][TensorSize] failed for [%s], output %zu.",
node_item.NodeName().c_str(), output_index);
return _status;
}
GELOGD("[%s] Tensor size of output %zu = %ld", node_item.NodeName().c_str(), output_index, tensor_size);
(void) TensorUtils::SetSize(*tensor_desc, tensor_size);
}


+ 1
- 2
ge/hybrid/executor/worker/task_compile_engine.cc View File

@@ -32,8 +32,7 @@ Status TaskCompileEngine::Compile(NodeState &node_state, GraphExecutionContext *
shared_ptr<NodeTask> kernel_task;
auto ret = node_item.node_executor->CompileTask(*context->model, node_item.node, kernel_task);
RECORD_COMPILE_EVENT(context, node_state.GetName().c_str(), "[Compile] End");
GE_CHK_STATUS_RET(ret, "[Compile][Task] failed for node: %s, when TaskCompileEngine %s.",
node_item.NodeName().c_str(), __FUNCTION__);
GE_CHK_STATUS_RET(ret, "[Compile][Task] failed for node: %s.", node_item.NodeName().c_str());
node_state.SetKernelTask(kernel_task);
GELOGI("Compiling node %s successfully", node_state.GetName().c_str());
return SUCCESS;


Loading…
Cancel
Save