diff --git a/ge/hybrid/node_executor/aicpu/aicpu_node_executor.cc b/ge/hybrid/node_executor/aicpu/aicpu_node_executor.cc index 71a8efbb..455aedc5 100755 --- a/ge/hybrid/node_executor/aicpu/aicpu_node_executor.cc +++ b/ge/hybrid/node_executor/aicpu/aicpu_node_executor.cc @@ -754,12 +754,59 @@ Status AicpuNodeTask::Init(const HybridModel &model) { aicpu_param_head->extInfoLength = ext_info_addr_dev_->GetSize(); aicpu_param_head->extInfoAddr = reinterpret_cast(ext_info_addr_dev_->GetData()); } - + auto task_defs = model.GetTaskDefs(node_item_->node); + GE_CHECK_NOTNULL(task_defs); + if (unknown_type_ == DEPEND_COMPUTE) { + GE_CHK_STATUS_RET_NOLOG(SetMemCopyTask((*task_defs)[1])); + } GELOGD("Node[%s] init end.", node_name.c_str()); return SUCCESS; } -Status AicpuNodeTask::InitForDependComputeTask() override { +Status AicpuNodeTask::SetMemCopyTask(const domi::TaskDef &task_def) { + if (node_item_->num_outputs == 0) { + GELOGD("Node[%s] type[%s] has no output, no need set mem_copy task.", + node_name_.c_str(), node_item_->node_type.c_str()); + return SUCCESS; + } + + GELOGD("Start to set memcpy task for node[%s].", node_name_.c_str()); + const domi::KernelExDef &kernel_def = task_def.kernel_ex(); + if (kernel_def.args_size() > sizeof(STR_FWK_OP_KERNEL)) { + GELOGE(PARAM_INVALID, "[Check][Size]sizeof STR_FWK_OP_KERNEL is:%lu, but args_size:%d is bigger", + sizeof(STR_FWK_OP_KERNEL), kernel_def.args_size()); + REPORT_INNER_ERROR("E19999", "sizeof STR_FWK_OP_KERNEL is:%lu, but args_size:%d is bigger.", + sizeof(STR_FWK_OP_KERNEL), kernel_def.args_size()); + return PARAM_INVALID; + } + STR_FWK_OP_KERNEL aicpu_task = {0}; + auto sec_ret = memcpy_s(&aicpu_task, sizeof(STR_FWK_OP_KERNEL), + kernel_def.args().data(), kernel_def.args_size()); + if (sec_ret != EOK) { + GELOGE(FAILED, "[Update][aicpu_task] failed, ret: %d", sec_ret); + REPORT_CALL_ERROR("E19999", "update aicpu_task failed, ret: %d.", sec_ret); + return FAILED; + } + + GE_CHK_STATUS_RET(AllocTensorBuffer(kernel_def.task_info_size(), copy_workspace_buf_), + "[Alloc][TensorBuffer] for Node[%s] to copy task workspace buf, size=%u.", + node_name_.c_str(), kernel_def.task_info_size()); + + GE_CHK_RT_RET(rtMemcpy(copy_workspace_buf_->GetData(), kernel_def.task_info_size(), + kernel_def.task_info().data(), kernel_def.task_info_size(), RT_MEMCPY_HOST_TO_DEVICE)); + + aicpu_task.fwkKernelBase.fwk_kernel.inputOutputAddr = reinterpret_cast(copy_ioaddr_dev_->GetData()); + aicpu_task.fwkKernelBase.fwk_kernel.workspaceBaseAddr = reinterpret_cast(copy_workspace_buf_->GetData()); + aicpu_task.fwkKernelBase.fwk_kernel.extInfoAddr = 0; + aicpu_task.fwkKernelBase.fwk_kernel.extInfoLen = 0; + + GE_CHK_RT_RET(rtMemcpy(copy_task_args_buf_->GetData(), sizeof(STR_FWK_OP_KERNEL), + &aicpu_task, sizeof(STR_FWK_OP_KERNEL), RT_MEMCPY_HOST_TO_DEVICE)); + GELOGD("Set memcpy task for node[%s] successfully.", node_name_.c_str()); + return SUCCESS; +} + +Status AicpuNodeTask::InitForDependComputeTask() { if ((unknown_type_ != DEPEND_COMPUTE) || (node_item_->num_outputs == 0)) { GELOGD("Node[%s] type[%s] unknown_type is %d, output num is %d.", node_name_.c_str(), node_item_->node_type.c_str(), unknown_type_, node_item_->num_outputs); @@ -899,6 +946,31 @@ Status AiCpuNodeExecutor::PrepareTask(NodeTask &task, TaskContext &context) cons return status; } +Status AicpuNodeTask::UpdateShapeAndDataByResultSummary(TaskContext &context) { + GELOGD("Node[%s] update shape and data by result summary begin.", node_name_.c_str()); + + std::vector> out_shape_hbm; + GE_CHK_STATUS_RET(ReadResultSummaryAndPrepareMemory(context, out_shape_hbm), + "[Invoke][ReadResultSummaryAndPrepareMemory] failed for Node[%s].", + node_name_.c_str()); + + RECORD_CALLBACK_EVENT(context.GetExecutionContext(), node_name_.c_str(), + "[ReadResultSummaryAndPrepareMemory] End"); + + GE_CHK_STATUS_RET(CopyDataToHbm(context, out_shape_hbm), + "[Invoke][CopyDataToHbm] failed for Node[%s] copy data to output.", + node_name_.c_str()); + + RECORD_CALLBACK_EVENT(context.GetExecutionContext(), node_name_.c_str(), "[CopyDataToHbm] End"); + + GE_CHK_STATUS_RET(UpdateShapeByHbmBuffer(context, out_shape_hbm), + "[Update][ShapeByHbmBuffer] failed for Node[%s].", + node_name_.c_str()); + + GELOGD("Node[%s] update shape and data by result summary end.", node_name_.c_str()); + return SUCCESS; +} + Status AiCpuNodeExecutor::LoadTask(const HybridModel &model, const NodePtr &node, std::shared_ptr &task) const { diff --git a/ge/hybrid/node_executor/aicpu/aicpu_node_executor.h b/ge/hybrid/node_executor/aicpu/aicpu_node_executor.h index 063659a3..b2f2c7a8 100644 --- a/ge/hybrid/node_executor/aicpu/aicpu_node_executor.h +++ b/ge/hybrid/node_executor/aicpu/aicpu_node_executor.h @@ -99,8 +99,6 @@ class AicpuTfNodeTask : public AicpuNodeTaskBase { Status LaunchTask(TaskContext &context) override; - Status TaskCallback(TaskContext &context) override; - Status UpdateIoAddr(TaskContext &context) override; Status UpdateShapeAndDataByResultSummary(TaskContext &context) override; @@ -167,15 +165,13 @@ class AicpuNodeTask : public AicpuNodeTaskBase { Status LaunchTask(TaskContext &context) override; - Status TaskCallback(TaskContext &context) override; - Status UpdateIoAddr(TaskContext &context) override; Status UpdateShapeAndDataByResultSummary(TaskContext &context) override; Status InitForDependComputeTask() override; - Status SetMemCopyTask(const domi::TaskDef &task_def); + Status SetMemCopyTask(const domi::TaskDef &task_def) override; protected: // host mem std::unique_ptr args_;