|
|
@@ -441,8 +441,8 @@ Status AicpuTfNodeTask::EnsureSessionCreated(uint64_t session_id) { |
|
|
return SUCCESS; |
|
|
return SUCCESS; |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
Status AicpuTfNodeTask::ReadResultSummaryAndPrepareMemory(TaskContext &context, |
|
|
|
|
|
std::vector<std::unique_ptr<TensorBuffer>> &out_shape_hbm) { |
|
|
|
|
|
|
|
|
Status AicpuNodeTaskBase::ReadResultSummaryAndPrepareMemory(TaskContext &context, |
|
|
|
|
|
std::vector<std::unique_ptr<TensorBuffer>> &out_shape_hbm) { |
|
|
for (auto i = 0; i < node_item_->num_outputs; ++i) { |
|
|
for (auto i = 0; i < node_item_->num_outputs; ++i) { |
|
|
auto &result_summary = output_summary_host_[i]; |
|
|
auto &result_summary = output_summary_host_[i]; |
|
|
GE_CHK_RT_RET(rtMemcpy(&result_summary, sizeof(aicpu::FWKAdapter::ResultSummary), |
|
|
GE_CHK_RT_RET(rtMemcpy(&result_summary, sizeof(aicpu::FWKAdapter::ResultSummary), |
|
|
@@ -467,6 +467,31 @@ Status AicpuTfNodeTask::ReadResultSummaryAndPrepareMemory(TaskContext &context, |
|
|
return SUCCESS; |
|
|
return SUCCESS; |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
Status AicpuNodeTask::CopyDataToHbm(TaskContext &context, |
|
|
|
|
|
const std::vector<std::unique_ptr<TensorBuffer>> &out_shape_hbm) { |
|
|
|
|
|
GE_CHK_BOOL_RET_STATUS(out_shape_hbm.size() == static_cast<std::size_t>(node_item_->num_outputs), |
|
|
|
|
|
INTERNAL_ERROR, |
|
|
|
|
|
"[Check][Size]Node[%s] has %d outputs but out shape is %zu not equal.", |
|
|
|
|
|
node_name_.c_str(), node_item_->num_outputs, out_shape_hbm.size()); |
|
|
|
|
|
|
|
|
|
|
|
GE_CHK_STATUS_RET_NOLOG(PrepareCopyInputs(context, out_shape_hbm)); |
|
|
|
|
|
|
|
|
|
|
|
RECORD_CALLBACK_EVENT(context.GetExecutionContext(), node_name_.c_str(), "[LaunchCopy] Start"); |
|
|
|
|
|
uint32_t flag = RT_KERNEL_DEFAULT; |
|
|
|
|
|
auto rt_ret = rtCpuKernelLaunchWithFlag(reinterpret_cast<const void *>(memcpy_so_name_.c_str()), |
|
|
|
|
|
reinterpret_cast<const void *>(memcpy_kernel_name_.c_str()), |
|
|
|
|
|
1, // default core dim is 1 |
|
|
|
|
|
memcpy_args_.get(), memcpy_args_size_, |
|
|
|
|
|
nullptr, context.GetStream(), flag); |
|
|
|
|
|
GE_CHK_RT_RET(rt_ret); |
|
|
|
|
|
|
|
|
|
|
|
RECORD_CALLBACK_EVENT(context.GetExecutionContext(), node_name_.c_str(), "[LaunchCopy] End"); |
|
|
|
|
|
|
|
|
|
|
|
GE_CHK_RT_RET(rtStreamSynchronize(context.GetStream())); |
|
|
|
|
|
RECORD_CALLBACK_EVENT(context.GetExecutionContext(), node_name_.c_str(), "[SynchronizeCopy] End"); |
|
|
|
|
|
return SUCCESS; |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
Status AicpuTfNodeTask::CopyDataToHbm(TaskContext &context, |
|
|
Status AicpuTfNodeTask::CopyDataToHbm(TaskContext &context, |
|
|
const std::vector<std::unique_ptr<TensorBuffer>> &out_shape_hbm) { |
|
|
const std::vector<std::unique_ptr<TensorBuffer>> &out_shape_hbm) { |
|
|
GE_CHK_BOOL_RET_STATUS(out_shape_hbm.size() == static_cast<std::size_t>(node_item_->num_outputs), |
|
|
GE_CHK_BOOL_RET_STATUS(out_shape_hbm.size() == static_cast<std::size_t>(node_item_->num_outputs), |
|
|
@@ -486,8 +511,8 @@ Status AicpuTfNodeTask::CopyDataToHbm(TaskContext &context, |
|
|
return SUCCESS; |
|
|
return SUCCESS; |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
Status AicpuTfNodeTask::PrepareCopyInputs(const TaskContext &context, |
|
|
|
|
|
const std::vector<std::unique_ptr<TensorBuffer>> &out_shape_hbm) { |
|
|
|
|
|
|
|
|
Status AicpuNodeTaskBase::PrepareCopyInputs(const TaskContext &context, |
|
|
|
|
|
const std::vector<std::unique_ptr<TensorBuffer>> &out_shape_hbm) { |
|
|
std::vector<uint64_t> copy_input_release_flag; |
|
|
std::vector<uint64_t> copy_input_release_flag; |
|
|
std::vector<uint64_t> copy_input_data_size; |
|
|
std::vector<uint64_t> copy_input_data_size; |
|
|
std::vector<uint64_t> copy_input_src; |
|
|
std::vector<uint64_t> copy_input_src; |
|
|
@@ -528,8 +553,8 @@ Status AicpuTfNodeTask::PrepareCopyInputs(const TaskContext &context, |
|
|
return SUCCESS; |
|
|
return SUCCESS; |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
Status AicpuTfNodeTask::UpdateShapeByHbmBuffer(TaskContext &context, |
|
|
|
|
|
const std::vector<std::unique_ptr<TensorBuffer>> &out_shape_hbm) { |
|
|
|
|
|
|
|
|
Status AicpuNodeTaskBase::UpdateShapeByHbmBuffer(TaskContext &context, |
|
|
|
|
|
const std::vector<std::unique_ptr<TensorBuffer>> &out_shape_hbm) { |
|
|
GE_CHK_BOOL_RET_STATUS(out_shape_hbm.size() == static_cast<std::size_t>(node_item_->num_outputs), |
|
|
GE_CHK_BOOL_RET_STATUS(out_shape_hbm.size() == static_cast<std::size_t>(node_item_->num_outputs), |
|
|
INTERNAL_ERROR, |
|
|
INTERNAL_ERROR, |
|
|
"Node[%s] has %d outputs but out shape is %zu", |
|
|
"Node[%s] has %d outputs but out shape is %zu", |
|
|
@@ -560,7 +585,7 @@ Status AicpuTfNodeTask::UpdateShapeByHbmBuffer(TaskContext &context, |
|
|
return SUCCESS; |
|
|
return SUCCESS; |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
Status AicpuTfNodeTask::UpdateShapeAndDataByResultSummary(TaskContext &context) { |
|
|
|
|
|
|
|
|
Status AicpuNodeTaskBase::UpdateShapeAndDataByResultSummary(TaskContext &context) { |
|
|
GELOGD("Node[%s] update shape and data by result summary begin.", node_name_.c_str()); |
|
|
GELOGD("Node[%s] update shape and data by result summary begin.", node_name_.c_str()); |
|
|
|
|
|
|
|
|
std::vector<std::unique_ptr<TensorBuffer>> out_shape_hbm; |
|
|
std::vector<std::unique_ptr<TensorBuffer>> out_shape_hbm; |
|
|
@@ -648,7 +673,7 @@ Status AicpuTfNodeTask::LaunchTask(TaskContext &context) { |
|
|
return SUCCESS; |
|
|
return SUCCESS; |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
Status AicpuTfNodeTask::TaskCallback(TaskContext &context) { |
|
|
|
|
|
|
|
|
Status AicpuNodeTaskBase::TaskCallback(TaskContext &context) { |
|
|
GELOGD("Node[%s] task callback start. is_dynamic=%s, unknown_type=%d.", |
|
|
GELOGD("Node[%s] task callback start. is_dynamic=%s, unknown_type=%d.", |
|
|
node_name_.c_str(), node_item_->is_dynamic ? "true" : "false", unknown_type_); |
|
|
node_name_.c_str(), node_item_->is_dynamic ? "true" : "false", unknown_type_); |
|
|
Status callback_ret = SUCCESS; |
|
|
Status callback_ret = SUCCESS; |
|
|
@@ -665,14 +690,122 @@ Status AicpuTfNodeTask::TaskCallback(TaskContext &context) { |
|
|
return callback_ret; |
|
|
return callback_ret; |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
Status AicpuNodeTask::SetMemCopyTask(const domi::TaskDef &task_def) { |
|
|
|
|
|
if (node_item_->num_outputs == 0) { |
|
|
|
|
|
GELOGD("Node[%s] type[%s] has no output, no need set mem_copy task.", |
|
|
|
|
|
node_name_.c_str(), node_item_->node_type.c_str()); |
|
|
|
|
|
return SUCCESS; |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
GELOGD("Start to set memcpy task for node[%s].", node_name_.c_str()); |
|
|
|
|
|
const domi::KernelDef &kernel_def = task_def.kernel(); |
|
|
|
|
|
auto &memcpy_args = kernel_def.args(); |
|
|
|
|
|
memcpy_args_size_ = kernel_def.args_size(); |
|
|
|
|
|
memcpy_so_name_ = kernel_def.so_name(); |
|
|
|
|
|
memcpy_kernel_name_ = kernel_def.kernel_name(); |
|
|
|
|
|
GE_IF_BOOL_EXEC(memcpy_args.size() != memcpy_args_size_, |
|
|
|
|
|
REPORT_INNER_ERROR("E19999", "MemCopy task def args.size=%zu, but args_size=%u not equal.", |
|
|
|
|
|
memcpy_args.size(), memcpy_args_size_); |
|
|
|
|
|
GELOGE(FAILED, "[Check][Size]MemCopy task def args.size=%zu, but args_size=%u not equal.", |
|
|
|
|
|
memcpy_args.size(), memcpy_args_size_); |
|
|
|
|
|
return FAILED;); |
|
|
|
|
|
GE_IF_BOOL_EXEC(memcpy_args_size_ < sizeof(aicpu::AicpuParamHead), |
|
|
|
|
|
REPORT_INNER_ERROR("E19999", |
|
|
|
|
|
"Task def args_size=%u is less than aicpu param head len=%zu.", |
|
|
|
|
|
memcpy_args_size_, sizeof(aicpu::AicpuParamHead)); |
|
|
|
|
|
GELOGE(FAILED, |
|
|
|
|
|
"[Check][Size] Task def args_size=%u is less than aicpu param head len=%zu.", |
|
|
|
|
|
memcpy_args_size_, sizeof(aicpu::AicpuParamHead)); |
|
|
|
|
|
return FAILED;); |
|
|
|
|
|
|
|
|
|
|
|
memcpy_args_.reset(new(std::nothrow) uint8_t[memcpy_args_size_]()); |
|
|
|
|
|
GE_IF_BOOL_EXEC(memcpy_args_ == nullptr, |
|
|
|
|
|
REPORT_INNER_ERROR("E19999", "new memory failed for Node[MemCopy], task_size[%u].", |
|
|
|
|
|
memcpy_args_size_); |
|
|
|
|
|
GELOGE(FAILED, "[Malloc][Memory] failed for Node[MemCopy], task_size[%u].", |
|
|
|
|
|
memcpy_args_size_); |
|
|
|
|
|
return FAILED;); |
|
|
|
|
|
|
|
|
|
|
|
errno_t sec_ret = memcpy_s(memcpy_args_.get(), memcpy_args_size_, memcpy_args.c_str(), memcpy_args.size()); |
|
|
|
|
|
GE_IF_BOOL_EXEC(sec_ret != EOK, |
|
|
|
|
|
REPORT_INNER_ERROR("E19999", |
|
|
|
|
|
"memcpy_s argc_ failed for Node[MemCopy], ret: %d", sec_ret); |
|
|
|
|
|
GELOGE(INTERNAL_ERROR, |
|
|
|
|
|
"[Update][args] failed for Node[MemCopy], ret: %d", sec_ret); |
|
|
|
|
|
return sec_ret;); |
|
|
|
|
|
auto memcpy_param_head = reinterpret_cast<aicpu::AicpuParamHead *>(memcpy_args_.get()); |
|
|
|
|
|
uint32_t memcpy_io_num = memcpy_param_head->ioAddrNum; |
|
|
|
|
|
auto memcpy_io_addr = memcpy_args_.get() + sizeof(aicpu::AicpuParamHead); |
|
|
|
|
|
// if has input and output, need copy to ioaddr |
|
|
|
|
|
int cpy_ret = memcpy_s(memcpy_io_addr, memcpy_args_size_ - sizeof(aicpu::AicpuParamHead), |
|
|
|
|
|
©_ioaddr_dev_, sizeof(uint64_t) * memcpy_io_num); |
|
|
|
|
|
GE_IF_BOOL_EXEC(cpy_ret != 0, |
|
|
|
|
|
REPORT_INNER_ERROR("E19999", "Node[Memcpoy] memcpy io addr to AicpuParamHead failed," |
|
|
|
|
|
"ret=%d, args_size=%u, io nums=%u.", |
|
|
|
|
|
cpy_ret, memcpy_args_size_, memcpy_io_num); |
|
|
|
|
|
GELOGE(INTERNAL_ERROR, "[Update][io_addr]Node[MemCopy] memcpy io addr to AicpuParamHead failed," |
|
|
|
|
|
"ret=%d, args_size=%u, io nums=%u.", |
|
|
|
|
|
cpy_ret, memcpy_args_size_, memcpy_io_num); |
|
|
|
|
|
return INTERNAL_ERROR;); |
|
|
|
|
|
GELOGD("Set memcpy task for node[MemCopy] successfully."); |
|
|
|
|
|
return SUCCESS; |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
Status AicpuNodeTask::InitForDependComputeTask() { |
|
|
|
|
|
if ((unknown_type_ != DEPEND_COMPUTE) || (node_item_->num_outputs == 0)) { |
|
|
|
|
|
GELOGD("Node[%s] type[%s] unknown_type is %d, output num is %d.", |
|
|
|
|
|
node_name_.c_str(), node_item_->node_type.c_str(), unknown_type_, node_item_->num_outputs); |
|
|
|
|
|
return SUCCESS; |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
output_summary_.resize(node_item_->num_outputs); |
|
|
|
|
|
constexpr auto result_summary_size = sizeof(aicpu::FWKAdapter::ResultSummary); |
|
|
|
|
|
for (auto i = 0; i < node_item_->num_outputs; ++i) { |
|
|
|
|
|
GE_CHK_STATUS_RET(AllocTensorBuffer(result_summary_size, output_summary_[i]), |
|
|
|
|
|
"[Alloc][TensorBuffer] failed for Node[%s] to copy result summary info, size=%zu.", |
|
|
|
|
|
node_name_.c_str(), result_summary_size); |
|
|
|
|
|
} |
|
|
|
|
|
output_summary_host_.resize(node_item_->num_outputs); |
|
|
|
|
|
|
|
|
|
|
|
// init for mem copy task |
|
|
|
|
|
// copy task need copy output_data and output_shape, max len is 2 * output_num |
|
|
|
|
|
const size_t copy_input_buf_len = node_item_->num_outputs * 2 * sizeof(uint64_t); |
|
|
|
|
|
GE_CHK_STATUS_RET(AllocTensorBuffer(copy_input_buf_len, copy_input_release_flag_dev_), |
|
|
|
|
|
"[Alloc][TensorBuffer] failed for Node[%s] to copy task input release_flag, size=%zu", |
|
|
|
|
|
node_name_.c_str(), copy_input_buf_len); |
|
|
|
|
|
GE_CHK_STATUS_RET(AllocTensorBuffer(copy_input_buf_len, copy_input_data_size_dev_), |
|
|
|
|
|
"[Alloc][TensorBuffer] failed for Node[%s] to copy task input data_size, size=%zu", |
|
|
|
|
|
node_name_.c_str(), copy_input_buf_len); |
|
|
|
|
|
GE_CHK_STATUS_RET(AllocTensorBuffer(copy_input_buf_len, copy_input_src_dev_), |
|
|
|
|
|
"[Alloc][TensorBuffer] failed for Node[%s] to copy task input src, size=%zu", |
|
|
|
|
|
node_name_.c_str(), copy_input_buf_len); |
|
|
|
|
|
GE_CHK_STATUS_RET(AllocTensorBuffer(copy_input_buf_len, copy_input_dst_dev_), |
|
|
|
|
|
"[Alloc][TensorBuffer] failed for Node[%s] to copy task input dst, size=%zu", |
|
|
|
|
|
node_name_.c_str(), copy_input_buf_len); |
|
|
|
|
|
|
|
|
|
|
|
std::vector<uint64_t> copy_io_addr; |
|
|
|
|
|
copy_io_addr.emplace_back(reinterpret_cast<uintptr_t>(copy_input_release_flag_dev_->GetData())); |
|
|
|
|
|
copy_io_addr.emplace_back(reinterpret_cast<uintptr_t>(copy_input_data_size_dev_->GetData())); |
|
|
|
|
|
copy_io_addr.emplace_back(reinterpret_cast<uintptr_t>(copy_input_src_dev_->GetData())); |
|
|
|
|
|
copy_io_addr.emplace_back(reinterpret_cast<uintptr_t>(copy_input_dst_dev_->GetData())); |
|
|
|
|
|
|
|
|
|
|
|
// mem copy op has 4 inputs and 0 output. |
|
|
|
|
|
const auto copy_io_addr_size = sizeof(uint64_t) * copy_io_addr.size(); |
|
|
|
|
|
|
|
|
|
|
|
// can alloc in init, it can reuse |
|
|
|
|
|
GE_CHK_STATUS_RET(AllocTensorBuffer(copy_io_addr_size, copy_ioaddr_dev_), |
|
|
|
|
|
"[Alloc][TensorBuffer] failed for Node[%s] to copy task ioaddr, size=%zu", |
|
|
|
|
|
node_name_.c_str(), copy_io_addr_size); |
|
|
|
|
|
|
|
|
|
|
|
GE_CHK_RT_RET(rtMemcpy(copy_ioaddr_dev_->GetData(), copy_io_addr_size, |
|
|
|
|
|
©_io_addr[0], copy_io_addr_size, RT_MEMCPY_HOST_TO_DEVICE)); |
|
|
|
|
|
return SUCCESS; |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
Status AicpuNodeTask::Init(const HybridModel &model) { |
|
|
Status AicpuNodeTask::Init(const HybridModel &model) { |
|
|
auto node_name = node_name_; |
|
|
auto node_name = node_name_; |
|
|
GELOGD("Node[%s] init start.", node_name.c_str()); |
|
|
GELOGD("Node[%s] init start.", node_name.c_str()); |
|
|
|
|
|
|
|
|
GE_CHK_BOOL_RET_STATUS(unknown_type_ != DEPEND_COMPUTE, FAILED, |
|
|
|
|
|
"[Check][Type]Node[%s] unknown type[%d] is depend compute, it's not supported now.", |
|
|
|
|
|
node_name.c_str(), unknown_type_); |
|
|
|
|
|
|
|
|
|
|
|
GE_CHK_BOOL_RET_STATUS(task_def_.has_kernel(), FAILED, |
|
|
GE_CHK_BOOL_RET_STATUS(task_def_.has_kernel(), FAILED, |
|
|
"[Check][task_def_]Node[%s] task def does not has kernel.", node_name.c_str()); |
|
|
"[Check][task_def_]Node[%s] task def does not has kernel.", node_name.c_str()); |
|
|
auto &kernel_def = task_def_.kernel(); |
|
|
auto &kernel_def = task_def_.kernel(); |
|
|
@@ -761,7 +894,10 @@ Status AicpuNodeTask::Init(const HybridModel &model) { |
|
|
uint64_t ext_session_id = model.GetSessionId(); |
|
|
uint64_t ext_session_id = model.GetSessionId(); |
|
|
GE_CHK_STATUS_RET(InitExtInfo(kernel_ext_info, ext_session_id), |
|
|
GE_CHK_STATUS_RET(InitExtInfo(kernel_ext_info, ext_session_id), |
|
|
"[Init][ExtInfo] failed for Node[%s].", node_name.c_str()); |
|
|
"[Init][ExtInfo] failed for Node[%s].", node_name.c_str()); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
GE_CHK_STATUS_RET(InitForDependComputeTask(), |
|
|
|
|
|
"[Init][DependComputeTask] failed for Node[%s].", |
|
|
|
|
|
node_name_.c_str()); |
|
|
if (ext_info_addr_dev_ == nullptr) { |
|
|
if (ext_info_addr_dev_ == nullptr) { |
|
|
aicpu_param_head->extInfoLength = 0; |
|
|
aicpu_param_head->extInfoLength = 0; |
|
|
aicpu_param_head->extInfoAddr = 0; |
|
|
aicpu_param_head->extInfoAddr = 0; |
|
|
@@ -769,7 +905,11 @@ Status AicpuNodeTask::Init(const HybridModel &model) { |
|
|
aicpu_param_head->extInfoLength = ext_info_addr_dev_->GetSize(); |
|
|
aicpu_param_head->extInfoLength = ext_info_addr_dev_->GetSize(); |
|
|
aicpu_param_head->extInfoAddr = reinterpret_cast<uintptr_t>(ext_info_addr_dev_->GetData()); |
|
|
aicpu_param_head->extInfoAddr = reinterpret_cast<uintptr_t>(ext_info_addr_dev_->GetData()); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
auto task_defs = model.GetTaskDefs(node_item_->node); |
|
|
|
|
|
GE_CHECK_NOTNULL(task_defs); |
|
|
|
|
|
if (unknown_type_ == DEPEND_COMPUTE) { |
|
|
|
|
|
GE_CHK_STATUS_RET_NOLOG(SetMemCopyTask((*task_defs)[1])); |
|
|
|
|
|
} |
|
|
GELOGD("Node[%s] init end.", node_name.c_str()); |
|
|
GELOGD("Node[%s] init end.", node_name.c_str()); |
|
|
return SUCCESS; |
|
|
return SUCCESS; |
|
|
} |
|
|
} |
|
|
@@ -785,13 +925,29 @@ Status AicpuNodeTask::UpdateIoAddr(TaskContext &context) { |
|
|
io_addrs.emplace_back(reinterpret_cast<uintptr_t>(inputData->GetData())); |
|
|
io_addrs.emplace_back(reinterpret_cast<uintptr_t>(inputData->GetData())); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
GE_CHK_STATUS_RET_NOLOG(context.AllocateOutputs()); |
|
|
|
|
|
for (auto j = 0; j < node_item_->num_outputs; ++j) { |
|
|
|
|
|
auto outputData = context.GetOutput(j); |
|
|
|
|
|
GE_CHECK_NOTNULL(outputData); |
|
|
|
|
|
GELOGD("Node[%s] output[%d] addr = %p, size = %zu", node_name_.c_str(), j, |
|
|
|
|
|
outputData->GetData(), outputData->GetSize()); |
|
|
|
|
|
io_addrs.emplace_back(reinterpret_cast<uintptr_t>(outputData->GetData())); |
|
|
|
|
|
|
|
|
// known shape or not depend compute |
|
|
|
|
|
if (!node_item_->is_dynamic || unknown_type_ != DEPEND_COMPUTE) { |
|
|
|
|
|
// unknown type 4 do this in call back. |
|
|
|
|
|
GE_CHK_STATUS_RET_NOLOG(context.AllocateOutputs()); |
|
|
|
|
|
for (auto j = 0; j < node_item_->num_outputs; ++j) { |
|
|
|
|
|
auto outputData = context.GetOutput(j); |
|
|
|
|
|
GE_CHECK_NOTNULL(outputData); |
|
|
|
|
|
GELOGD("Node[%s] output[%d] addr = %p, size = %zu", |
|
|
|
|
|
node_name_.c_str(), j, outputData->GetData(), outputData->GetSize()); |
|
|
|
|
|
io_addrs.emplace_back(reinterpret_cast<uintptr_t>(outputData->GetData())); |
|
|
|
|
|
} |
|
|
|
|
|
} else { |
|
|
|
|
|
// unknown type 4 use result summary update ioaddr. |
|
|
|
|
|
GELOGD("Node[%s] is depend compute node, use result summary as out addr.", node_name_.c_str()); |
|
|
|
|
|
GE_CHK_BOOL_RET_STATUS(output_summary_.size() == static_cast<std::size_t>(node_item_->num_outputs), |
|
|
|
|
|
INTERNAL_ERROR, |
|
|
|
|
|
"[Check][Size]Node[%s] has %d output but %zu output summary not equal.", |
|
|
|
|
|
node_name_.c_str(), node_item_->num_outputs, output_summary_.size()); |
|
|
|
|
|
|
|
|
|
|
|
for (auto j = 0; j < node_item_->num_outputs; ++j) { |
|
|
|
|
|
void *summary_addr = output_summary_[j]->GetData(); |
|
|
|
|
|
io_addrs.emplace_back(reinterpret_cast<uintptr_t>(summary_addr)); |
|
|
|
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
auto io_addr = args_.get() + sizeof(aicpu::AicpuParamHead); |
|
|
auto io_addr = args_.get() + sizeof(aicpu::AicpuParamHead); |
|
|
@@ -829,23 +985,6 @@ Status AicpuNodeTask::LaunchTask(TaskContext &context) { |
|
|
return SUCCESS; |
|
|
return SUCCESS; |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
Status AicpuNodeTask::TaskCallback(TaskContext &context) { |
|
|
|
|
|
GELOGD("Node[%s] task callback start, is_dynamic = %s, unknown_type=%d.", |
|
|
|
|
|
node_name_.c_str(), node_item_->is_dynamic ? "true" : "false", unknown_type_); |
|
|
|
|
|
Status callback_ret = SUCCESS; |
|
|
|
|
|
|
|
|
|
|
|
// check need update shape, call update shape. |
|
|
|
|
|
if (node_item_->is_dynamic && unknown_type_ == DEPEND_SHAPE_RANGE) { |
|
|
|
|
|
// check result |
|
|
|
|
|
callback_ret = UpdateOutputShapeFromExtInfo(context); |
|
|
|
|
|
} else { |
|
|
|
|
|
GELOGD("Node[%s] unknown shape type is %d no need update output shape.", |
|
|
|
|
|
node_name_.c_str(), unknown_type_); |
|
|
|
|
|
} |
|
|
|
|
|
GELOGD("Node[%s] task callback end.", node_name_.c_str()); |
|
|
|
|
|
return callback_ret; |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
Status AiCpuNodeExecutor::PrepareTask(NodeTask &task, TaskContext &context) const { |
|
|
Status AiCpuNodeExecutor::PrepareTask(NodeTask &task, TaskContext &context) const { |
|
|
// malloc HBM memory at Init, here just update them |
|
|
// malloc HBM memory at Init, here just update them |
|
|
RECORD_EXECUTION_EVENT(context.GetExecutionContext(), context.GetNodeName(), "[AiCpuNodeExecutorPrepareTask] Start"); |
|
|
RECORD_EXECUTION_EVENT(context.GetExecutionContext(), context.GetNodeName(), "[AiCpuNodeExecutorPrepareTask] Start"); |
|
|
|