diff --git a/ge/hybrid/node_executor/aicpu/aicpu_node_executor.cc b/ge/hybrid/node_executor/aicpu/aicpu_node_executor.cc index 09931585..71a8efbb 100755 --- a/ge/hybrid/node_executor/aicpu/aicpu_node_executor.cc +++ b/ge/hybrid/node_executor/aicpu/aicpu_node_executor.cc @@ -759,6 +759,62 @@ Status AicpuNodeTask::Init(const HybridModel &model) { return SUCCESS; } +Status AicpuNodeTask::InitForDependComputeTask() override { + if ((unknown_type_ != DEPEND_COMPUTE) || (node_item_->num_outputs == 0)) { + GELOGD("Node[%s] type[%s] unknown_type is %d, output num is %d.", + node_name_.c_str(), node_item_->node_type.c_str(), unknown_type_, node_item_->num_outputs); + return SUCCESS; + } + + output_summary_.resize(node_item_->num_outputs); + constexpr auto result_summary_size = sizeof(aicpu::FWKAdapter::ResultSummary); + for (auto i = 0; i < node_item_->num_outputs; ++i) { + GE_CHK_STATUS_RET(AllocTensorBuffer(result_summary_size, output_summary_[i]), + "[Alloc][TensorBuffer] failed for Node[%s] to copy result summary info, size=%zu.", + node_name_.c_str(), result_summary_size); + } + output_summary_host_.resize(node_item_->num_outputs); + + // init for mem copy task + // copy task need copy output_data and output_shape, max len is 2 * output_num + const size_t copy_input_buf_len = node_item_->num_outputs * 2 * sizeof(uint64_t); + GE_CHK_STATUS_RET(AllocTensorBuffer(copy_input_buf_len, copy_input_release_flag_dev_), + "[Alloc][TensorBuffer] failed for Node[%s] to copy task input release_flag, size=%zu", + node_name_.c_str(), copy_input_buf_len); + GE_CHK_STATUS_RET(AllocTensorBuffer(copy_input_buf_len, copy_input_data_size_dev_), + "[Alloc][TensorBuffer] failed for Node[%s] to copy task input data_size, size=%zu", + node_name_.c_str(), copy_input_buf_len); + GE_CHK_STATUS_RET(AllocTensorBuffer(copy_input_buf_len, copy_input_src_dev_), + "[Alloc][TensorBuffer] failed for Node[%s] to copy task input src, size=%zu", + node_name_.c_str(), copy_input_buf_len); + GE_CHK_STATUS_RET(AllocTensorBuffer(copy_input_buf_len, copy_input_dst_dev_), + "[Alloc][TensorBuffer] failed for Node[%s] to copy task input dst, size=%zu", + node_name_.c_str(), copy_input_buf_len); + + // copy task args buf + GE_CHK_STATUS_RET(AllocTensorBuffer(sizeof(STR_FWK_OP_KERNEL), copy_task_args_buf_), + "[Alloc][TensorBuffer] failed for Node[%s] to copy task args, size=%zu", + node_name_.c_str(), sizeof(STR_FWK_OP_KERNEL)); + + std::vector copy_io_addr; + copy_io_addr.emplace_back(reinterpret_cast(copy_input_release_flag_dev_->GetData())); + copy_io_addr.emplace_back(reinterpret_cast(copy_input_data_size_dev_->GetData())); + copy_io_addr.emplace_back(reinterpret_cast(copy_input_src_dev_->GetData())); + copy_io_addr.emplace_back(reinterpret_cast(copy_input_dst_dev_->GetData())); + + // mem copy op has 4 inputs and 0 output. + const auto copy_io_addr_size = sizeof(uint64_t) * copy_io_addr.size(); + + // can alloc in init, it can reuse + GE_CHK_STATUS_RET(AllocTensorBuffer(copy_io_addr_size, copy_ioaddr_dev_), + "[Alloc][TensorBuffer] failed for Node[%s] to copy task ioaddr, size=%zu", + node_name_.c_str(), copy_io_addr_size); + + GE_CHK_RT_RET(rtMemcpy(copy_ioaddr_dev_->GetData(), copy_io_addr_size, + ©_io_addr[0], copy_io_addr_size, RT_MEMCPY_HOST_TO_DEVICE)); + return SUCCESS; +} + Status AicpuNodeTask::UpdateIoAddr(TaskContext &context) { vector io_addrs; io_addrs.reserve(node_item_->num_inputs + node_item_->num_outputs);