Browse Source

Pre Merge pull request !1879 from guopeian/graph-four

pull/1879/MERGE
guopeian Gitee 4 years ago
parent
commit
6a3a3eb987
7 changed files with 440 additions and 137 deletions
  1. +178
    -37
      ge/hybrid/node_executor/aicpu/aicpu_node_executor.cc
  2. +52
    -36
      ge/hybrid/node_executor/aicpu/aicpu_node_executor.h
  3. +53
    -21
      ge/single_op/single_op_model.cc
  4. +1
    -1
      ge/single_op/single_op_model.h
  5. +2
    -5
      ge/single_op/task/aicpu_kernel_task_builder.cc
  6. +122
    -15
      ge/single_op/task/op_task.cc
  7. +32
    -22
      ge/single_op/task/op_task.h

+ 178
- 37
ge/hybrid/node_executor/aicpu/aicpu_node_executor.cc View File

@@ -261,7 +261,6 @@ Status AicpuTfNodeTask::InitForDependComputeTask() {
GE_CHK_STATUS_RET(AllocTensorBuffer(copy_input_buf_len, copy_input_dst_dev_), GE_CHK_STATUS_RET(AllocTensorBuffer(copy_input_buf_len, copy_input_dst_dev_),
"[Alloc][TensorBuffer] failed for Node[%s] to copy task input dst, size=%zu", "[Alloc][TensorBuffer] failed for Node[%s] to copy task input dst, size=%zu",
node_name_.c_str(), copy_input_buf_len); node_name_.c_str(), copy_input_buf_len);

// copy task args buf // copy task args buf
GE_CHK_STATUS_RET(AllocTensorBuffer(sizeof(STR_FWK_OP_KERNEL), copy_task_args_buf_), GE_CHK_STATUS_RET(AllocTensorBuffer(sizeof(STR_FWK_OP_KERNEL), copy_task_args_buf_),
"[Alloc][TensorBuffer] failed for Node[%s] to copy task args, size=%zu", "[Alloc][TensorBuffer] failed for Node[%s] to copy task args, size=%zu",
@@ -441,8 +440,8 @@ Status AicpuTfNodeTask::EnsureSessionCreated(uint64_t session_id) {
return SUCCESS; return SUCCESS;
} }


Status AicpuTfNodeTask::ReadResultSummaryAndPrepareMemory(TaskContext &context,
std::vector<std::unique_ptr<TensorBuffer>> &out_shape_hbm) {
Status AicpuNodeTaskBase::ReadResultSummaryAndPrepareMemory(TaskContext &context,
std::vector<std::unique_ptr<TensorBuffer>> &out_shape_hbm) {
for (auto i = 0; i < node_item_->num_outputs; ++i) { for (auto i = 0; i < node_item_->num_outputs; ++i) {
auto &result_summary = output_summary_host_[i]; auto &result_summary = output_summary_host_[i];
GE_CHK_RT_RET(rtMemcpy(&result_summary, sizeof(aicpu::FWKAdapter::ResultSummary), GE_CHK_RT_RET(rtMemcpy(&result_summary, sizeof(aicpu::FWKAdapter::ResultSummary),
@@ -479,6 +478,32 @@ Status AicpuTfNodeTask::CopyDataToHbm(TaskContext &context,
RECORD_CALLBACK_EVENT(context.GetExecutionContext(), node_name_.c_str(), "[LaunchCopy] Start"); RECORD_CALLBACK_EVENT(context.GetExecutionContext(), node_name_.c_str(), "[LaunchCopy] Start");
GE_CHK_RT_RET(rtKernelLaunchEx(copy_task_args_buf_->GetData(), sizeof(STR_FWK_OP_KERNEL), GE_CHK_RT_RET(rtKernelLaunchEx(copy_task_args_buf_->GetData(), sizeof(STR_FWK_OP_KERNEL),
RT_KERNEL_DEFAULT, context.GetStream())); RT_KERNEL_DEFAULT, context.GetStream()));

RECORD_CALLBACK_EVENT(context.GetExecutionContext(), node_name_.c_str(), "[LaunchCopy] End");

GE_CHK_RT_RET(rtStreamSynchronize(context.GetStream()));
RECORD_CALLBACK_EVENT(context.GetExecutionContext(), node_name_.c_str(), "[SynchronizeCopy] End");
return SUCCESS;
}

Status AicpuNodeTask::CopyDataToHbm(TaskContext &context,
const std::vector<std::unique_ptr<TensorBuffer>> &out_shape_hbm) {
GE_CHK_BOOL_RET_STATUS(out_shape_hbm.size() == static_cast<std::size_t>(node_item_->num_outputs),
INTERNAL_ERROR,
"[Check][Size]Node[%s] has %d outputs but out shape is %zu not equal.",
node_name_.c_str(), node_item_->num_outputs, out_shape_hbm.size());

GE_CHK_STATUS_RET_NOLOG(PrepareCopyInputs(context, out_shape_hbm));

RECORD_CALLBACK_EVENT(context.GetExecutionContext(), node_name_.c_str(), "[LaunchCopy] Start");
uint32_t flag = RT_KERNEL_DEFAULT;
auto rt_ret = rtCpuKernelLaunchWithFlag(reinterpret_cast<const void *>(memcpy_so_name_.c_str()),
reinterpret_cast<const void *>(memcpy_kernel_name_.c_str()),
1, // default core dim is 1
memcpy_args_.get(), memcpy_args_size_,
nullptr, context.GetStream(), flag);
GE_CHK_RT_RET(rt_ret);

RECORD_CALLBACK_EVENT(context.GetExecutionContext(), node_name_.c_str(), "[LaunchCopy] End"); RECORD_CALLBACK_EVENT(context.GetExecutionContext(), node_name_.c_str(), "[LaunchCopy] End");


GE_CHK_RT_RET(rtStreamSynchronize(context.GetStream())); GE_CHK_RT_RET(rtStreamSynchronize(context.GetStream()));
@@ -486,8 +511,8 @@ Status AicpuTfNodeTask::CopyDataToHbm(TaskContext &context,
return SUCCESS; return SUCCESS;
} }


Status AicpuTfNodeTask::PrepareCopyInputs(const TaskContext &context,
const std::vector<std::unique_ptr<TensorBuffer>> &out_shape_hbm) {
Status AicpuNodeTaskBase::PrepareCopyInputs(const TaskContext &context,
const std::vector<std::unique_ptr<TensorBuffer>> &out_shape_hbm) {
std::vector<uint64_t> copy_input_release_flag; std::vector<uint64_t> copy_input_release_flag;
std::vector<uint64_t> copy_input_data_size; std::vector<uint64_t> copy_input_data_size;
std::vector<uint64_t> copy_input_src; std::vector<uint64_t> copy_input_src;
@@ -528,8 +553,8 @@ Status AicpuTfNodeTask::PrepareCopyInputs(const TaskContext &context,
return SUCCESS; return SUCCESS;
} }


Status AicpuTfNodeTask::UpdateShapeByHbmBuffer(TaskContext &context,
const std::vector<std::unique_ptr<TensorBuffer>> &out_shape_hbm) {
Status AicpuNodeTaskBase::UpdateShapeByHbmBuffer(TaskContext &context,
const std::vector<std::unique_ptr<TensorBuffer>> &out_shape_hbm) {
GE_CHK_BOOL_RET_STATUS(out_shape_hbm.size() == static_cast<std::size_t>(node_item_->num_outputs), GE_CHK_BOOL_RET_STATUS(out_shape_hbm.size() == static_cast<std::size_t>(node_item_->num_outputs),
INTERNAL_ERROR, INTERNAL_ERROR,
"Node[%s] has %d outputs but out shape is %zu", "Node[%s] has %d outputs but out shape is %zu",
@@ -560,7 +585,7 @@ Status AicpuTfNodeTask::UpdateShapeByHbmBuffer(TaskContext &context,
return SUCCESS; return SUCCESS;
} }


Status AicpuTfNodeTask::UpdateShapeAndDataByResultSummary(TaskContext &context) {
Status AicpuNodeTaskBase::UpdateShapeAndDataByResultSummary(TaskContext &context) {
GELOGD("Node[%s] update shape and data by result summary begin.", node_name_.c_str()); GELOGD("Node[%s] update shape and data by result summary begin.", node_name_.c_str());


std::vector<std::unique_ptr<TensorBuffer>> out_shape_hbm; std::vector<std::unique_ptr<TensorBuffer>> out_shape_hbm;
@@ -648,7 +673,7 @@ Status AicpuTfNodeTask::LaunchTask(TaskContext &context) {
return SUCCESS; return SUCCESS;
} }


Status AicpuTfNodeTask::TaskCallback(TaskContext &context) {
Status AicpuNodeTaskBase::TaskCallback(TaskContext &context) {
GELOGD("Node[%s] task callback start. is_dynamic=%s, unknown_type=%d.", GELOGD("Node[%s] task callback start. is_dynamic=%s, unknown_type=%d.",
node_name_.c_str(), node_item_->is_dynamic ? "true" : "false", unknown_type_); node_name_.c_str(), node_item_->is_dynamic ? "true" : "false", unknown_type_);
Status callback_ret = SUCCESS; Status callback_ret = SUCCESS;
@@ -665,14 +690,122 @@ Status AicpuTfNodeTask::TaskCallback(TaskContext &context) {
return callback_ret; return callback_ret;
} }


Status AicpuNodeTask::SetMemCopyTask(const domi::TaskDef &task_def) {
if (node_item_->num_outputs == 0) {
GELOGD("Node[%s] type[%s] has no output, no need set mem_copy task.",
node_name_.c_str(), node_item_->node_type.c_str());
return SUCCESS;
}

GELOGD("Start to set memcpy task for node[%s].", node_name_.c_str());
const domi::KernelDef &kernel_def = task_def.kernel();
auto &memcpy_args = kernel_def.args();
memcpy_args_size_ = kernel_def.args_size();
memcpy_so_name_ = kernel_def.so_name();
memcpy_kernel_name_ = kernel_def.kernel_name();
GE_IF_BOOL_EXEC(memcpy_args.size() != memcpy_args_size_,
REPORT_INNER_ERROR("E19999", "MemCopy task def args.size=%zu, but args_size=%u not equal.",
memcpy_args.size(), memcpy_args_size_);
GELOGE(FAILED, "[Check][Size]MemCopy task def args.size=%zu, but args_size=%u not equal.",
memcpy_args.size(), memcpy_args_size_);
return FAILED;);
GE_IF_BOOL_EXEC(memcpy_args_size_ < sizeof(aicpu::AicpuParamHead),
REPORT_INNER_ERROR("E19999",
"Task def args_size=%u is less than aicpu param head len=%zu.",
memcpy_args_size_, sizeof(aicpu::AicpuParamHead));
GELOGE(FAILED,
"[Check][Size] Task def args_size=%u is less than aicpu param head len=%zu.",
memcpy_args_size_, sizeof(aicpu::AicpuParamHead));
return FAILED;);

memcpy_args_.reset(new(std::nothrow) uint8_t[memcpy_args_size_]());
GE_IF_BOOL_EXEC(memcpy_args_ == nullptr,
REPORT_INNER_ERROR("E19999", "new memory failed for Node[MemCopy], task_size[%u].",
memcpy_args_size_);
GELOGE(FAILED, "[Malloc][Memory] failed for Node[MemCopy], task_size[%u].",
memcpy_args_size_);
return FAILED;);

errno_t sec_ret = memcpy_s(memcpy_args_.get(), memcpy_args_size_, memcpy_args.c_str(), memcpy_args.size());
GE_IF_BOOL_EXEC(sec_ret != EOK,
REPORT_INNER_ERROR("E19999",
"memcpy_s argc_ failed for Node[MemCopy], ret: %d", sec_ret);
GELOGE(INTERNAL_ERROR,
"[Update][args] failed for Node[MemCopy], ret: %d", sec_ret);
return sec_ret;);
auto memcpy_param_head = reinterpret_cast<aicpu::AicpuParamHead *>(memcpy_args_.get());
uint32_t memcpy_io_num = memcpy_param_head->ioAddrNum;
auto memcpy_io_addr = memcpy_args_.get() + sizeof(aicpu::AicpuParamHead);
// if has input and output, need copy to ioaddr
int cpy_ret = memcpy_s(memcpy_io_addr, memcpy_args_size_ - sizeof(aicpu::AicpuParamHead),
&copy_ioaddr_dev_, sizeof(uint64_t) * memcpy_io_num);
GE_IF_BOOL_EXEC(cpy_ret != 0,
REPORT_INNER_ERROR("E19999", "Node[Memcpoy] memcpy io addr to AicpuParamHead failed,"
"ret=%d, args_size=%u, io nums=%u.",
cpy_ret, memcpy_args_size_, memcpy_io_num);
GELOGE(INTERNAL_ERROR, "[Update][io_addr]Node[MemCopy] memcpy io addr to AicpuParamHead failed,"
"ret=%d, args_size=%u, io nums=%u.",
cpy_ret, memcpy_args_size_, memcpy_io_num);
return INTERNAL_ERROR;);
GELOGD("Set memcpy task for node[MemCopy] successfully.");
return SUCCESS;
}

Status AicpuNodeTask::InitForDependComputeTask() {
if ((unknown_type_ != DEPEND_COMPUTE) || (node_item_->num_outputs == 0)) {
GELOGD("Node[%s] type[%s] unknown_type is %d, output num is %d.",
node_name_.c_str(), node_item_->node_type.c_str(), unknown_type_, node_item_->num_outputs);
return SUCCESS;
}

output_summary_.resize(node_item_->num_outputs);
constexpr auto result_summary_size = sizeof(aicpu::FWKAdapter::ResultSummary);
for (auto i = 0; i < node_item_->num_outputs; ++i) {
GE_CHK_STATUS_RET(AllocTensorBuffer(result_summary_size, output_summary_[i]),
"[Alloc][TensorBuffer] failed for Node[%s] to copy result summary info, size=%zu.",
node_name_.c_str(), result_summary_size);
}
output_summary_host_.resize(node_item_->num_outputs);

// init for mem copy task
// copy task need copy output_data and output_shape, max len is 2 * output_num
const size_t copy_input_buf_len = node_item_->num_outputs * 2 * sizeof(uint64_t);
GE_CHK_STATUS_RET(AllocTensorBuffer(copy_input_buf_len, copy_input_release_flag_dev_),
"[Alloc][TensorBuffer] failed for Node[%s] to copy task input release_flag, size=%zu",
node_name_.c_str(), copy_input_buf_len);
GE_CHK_STATUS_RET(AllocTensorBuffer(copy_input_buf_len, copy_input_data_size_dev_),
"[Alloc][TensorBuffer] failed for Node[%s] to copy task input data_size, size=%zu",
node_name_.c_str(), copy_input_buf_len);
GE_CHK_STATUS_RET(AllocTensorBuffer(copy_input_buf_len, copy_input_src_dev_),
"[Alloc][TensorBuffer] failed for Node[%s] to copy task input src, size=%zu",
node_name_.c_str(), copy_input_buf_len);
GE_CHK_STATUS_RET(AllocTensorBuffer(copy_input_buf_len, copy_input_dst_dev_),
"[Alloc][TensorBuffer] failed for Node[%s] to copy task input dst, size=%zu",
node_name_.c_str(), copy_input_buf_len);

std::vector<uint64_t> copy_io_addr;
copy_io_addr.emplace_back(reinterpret_cast<uintptr_t>(copy_input_release_flag_dev_->GetData()));
copy_io_addr.emplace_back(reinterpret_cast<uintptr_t>(copy_input_data_size_dev_->GetData()));
copy_io_addr.emplace_back(reinterpret_cast<uintptr_t>(copy_input_src_dev_->GetData()));
copy_io_addr.emplace_back(reinterpret_cast<uintptr_t>(copy_input_dst_dev_->GetData()));

// mem copy op has 4 inputs and 0 output.
const auto copy_io_addr_size = sizeof(uint64_t) * copy_io_addr.size();

// can alloc in init, it can reuse
GE_CHK_STATUS_RET(AllocTensorBuffer(copy_io_addr_size, copy_ioaddr_dev_),
"[Alloc][TensorBuffer] failed for Node[%s] to copy task ioaddr, size=%zu",
node_name_.c_str(), copy_io_addr_size);

GE_CHK_RT_RET(rtMemcpy(copy_ioaddr_dev_->GetData(), copy_io_addr_size,
&copy_io_addr[0], copy_io_addr_size, RT_MEMCPY_HOST_TO_DEVICE));
return SUCCESS;
}

Status AicpuNodeTask::Init(const HybridModel &model) { Status AicpuNodeTask::Init(const HybridModel &model) {
auto node_name = node_name_; auto node_name = node_name_;
GELOGD("Node[%s] init start.", node_name.c_str()); GELOGD("Node[%s] init start.", node_name.c_str());


GE_CHK_BOOL_RET_STATUS(unknown_type_ != DEPEND_COMPUTE, FAILED,
"[Check][Type]Node[%s] unknown type[%d] is depend compute, it's not supported now.",
node_name.c_str(), unknown_type_);

GE_CHK_BOOL_RET_STATUS(task_def_.has_kernel(), FAILED, GE_CHK_BOOL_RET_STATUS(task_def_.has_kernel(), FAILED,
"[Check][task_def_]Node[%s] task def does not has kernel.", node_name.c_str()); "[Check][task_def_]Node[%s] task def does not has kernel.", node_name.c_str());
auto &kernel_def = task_def_.kernel(); auto &kernel_def = task_def_.kernel();
@@ -762,6 +895,10 @@ Status AicpuNodeTask::Init(const HybridModel &model) {
GE_CHK_STATUS_RET(InitExtInfo(kernel_ext_info, ext_session_id), GE_CHK_STATUS_RET(InitExtInfo(kernel_ext_info, ext_session_id),
"[Init][ExtInfo] failed for Node[%s].", node_name.c_str()); "[Init][ExtInfo] failed for Node[%s].", node_name.c_str());


GE_CHK_STATUS_RET(InitForDependComputeTask(),
"[Init][DependComputeTask] failed for Node[%s].",
node_name_.c_str());

if (ext_info_addr_dev_ == nullptr) { if (ext_info_addr_dev_ == nullptr) {
aicpu_param_head->extInfoLength = 0; aicpu_param_head->extInfoLength = 0;
aicpu_param_head->extInfoAddr = 0; aicpu_param_head->extInfoAddr = 0;
@@ -770,6 +907,11 @@ Status AicpuNodeTask::Init(const HybridModel &model) {
aicpu_param_head->extInfoAddr = reinterpret_cast<uintptr_t>(ext_info_addr_dev_->GetData()); aicpu_param_head->extInfoAddr = reinterpret_cast<uintptr_t>(ext_info_addr_dev_->GetData());
} }


auto task_defs = model.GetTaskDefs(node_item_->node);
GE_CHECK_NOTNULL(task_defs);
if (unknown_type_ == DEPEND_COMPUTE) {
GE_CHK_STATUS_RET_NOLOG(SetMemCopyTask((*task_defs)[1]));
}
GELOGD("Node[%s] init end.", node_name.c_str()); GELOGD("Node[%s] init end.", node_name.c_str());
return SUCCESS; return SUCCESS;
} }
@@ -784,14 +926,30 @@ Status AicpuNodeTask::UpdateIoAddr(TaskContext &context) {
GELOGD("Node[%s] input[%d] = %p, size = %zu", node_name_.c_str(), i, inputData->GetData(), inputData->GetSize()); GELOGD("Node[%s] input[%d] = %p, size = %zu", node_name_.c_str(), i, inputData->GetData(), inputData->GetSize());
io_addrs.emplace_back(reinterpret_cast<uintptr_t>(inputData->GetData())); io_addrs.emplace_back(reinterpret_cast<uintptr_t>(inputData->GetData()));
} }
// known shape or not depend compute
if (!node_item_->is_dynamic || unknown_type_ != DEPEND_COMPUTE) {
// unknown type 4 do this in call back.
GE_CHK_STATUS_RET_NOLOG(context.AllocateOutputs());
for (auto j = 0; j < node_item_->num_outputs; ++j) {
auto outputData = context.GetOutput(j);
GE_CHECK_NOTNULL(outputData);

GELOGD("Node[%s] output[%d] addr = %p, size = %zu",
node_name_.c_str(), j, outputData->GetData(), outputData->GetSize());
io_addrs.emplace_back(reinterpret_cast<uintptr_t>(outputData->GetData()));
}
} else {
// unknown type 4 use result summary update ioaddr.
GELOGD("Node[%s] is depend compute node, use result summary as out addr.", node_name_.c_str());
GE_CHK_BOOL_RET_STATUS(output_summary_.size() == static_cast<std::size_t>(node_item_->num_outputs),
INTERNAL_ERROR,
"[Check][Size]Node[%s] has %d output but %zu output summary not equal.",
node_name_.c_str(), node_item_->num_outputs, output_summary_.size());


GE_CHK_STATUS_RET_NOLOG(context.AllocateOutputs());
for (auto j = 0; j < node_item_->num_outputs; ++j) {
auto outputData = context.GetOutput(j);
GE_CHECK_NOTNULL(outputData);
GELOGD("Node[%s] output[%d] addr = %p, size = %zu", node_name_.c_str(), j,
outputData->GetData(), outputData->GetSize());
io_addrs.emplace_back(reinterpret_cast<uintptr_t>(outputData->GetData()));
for (auto j = 0; j < node_item_->num_outputs; ++j) {
void *summary_addr = output_summary_[j]->GetData();
io_addrs.emplace_back(reinterpret_cast<uintptr_t>(summary_addr));
}
} }


auto io_addr = args_.get() + sizeof(aicpu::AicpuParamHead); auto io_addr = args_.get() + sizeof(aicpu::AicpuParamHead);
@@ -829,23 +987,6 @@ Status AicpuNodeTask::LaunchTask(TaskContext &context) {
return SUCCESS; return SUCCESS;
} }


Status AicpuNodeTask::TaskCallback(TaskContext &context) {
GELOGD("Node[%s] task callback start, is_dynamic = %s, unknown_type=%d.",
node_name_.c_str(), node_item_->is_dynamic ? "true" : "false", unknown_type_);
Status callback_ret = SUCCESS;

// check need update shape, call update shape.
if (node_item_->is_dynamic && unknown_type_ == DEPEND_SHAPE_RANGE) {
// check result
callback_ret = UpdateOutputShapeFromExtInfo(context);
} else {
GELOGD("Node[%s] unknown shape type is %d no need update output shape.",
node_name_.c_str(), unknown_type_);
}
GELOGD("Node[%s] task callback end.", node_name_.c_str());
return callback_ret;
}

Status AiCpuNodeExecutor::PrepareTask(NodeTask &task, TaskContext &context) const { Status AiCpuNodeExecutor::PrepareTask(NodeTask &task, TaskContext &context) const {
// malloc HBM memory at Init, here just update them // malloc HBM memory at Init, here just update them
RECORD_EXECUTION_EVENT(context.GetExecutionContext(), context.GetNodeName(), "[AiCpuNodeExecutorPrepareTask] Start"); RECORD_EXECUTION_EVENT(context.GetExecutionContext(), context.GetNodeName(), "[AiCpuNodeExecutorPrepareTask] Start");


+ 52
- 36
ge/hybrid/node_executor/aicpu/aicpu_node_executor.h View File

@@ -54,13 +54,35 @@ class AicpuNodeTaskBase : public NodeTask {
Status UpdateShapeToOutputDesc(TaskContext &task_context, const GeShape &shape_new, int32_t output_index); Status UpdateShapeToOutputDesc(TaskContext &task_context, const GeShape &shape_new, int32_t output_index);


virtual Status LaunchTask(TaskContext &context) = 0; virtual Status LaunchTask(TaskContext &context) = 0;
virtual Status UpdateShapeAndDataByResultSummary(TaskContext &context);


virtual Status TaskCallback(TaskContext &context) = 0;
virtual Status InitForDependComputeTask() = 0;

Status TaskCallback(TaskContext &context);


virtual Status UpdateIoAddr(TaskContext &context) = 0; virtual Status UpdateIoAddr(TaskContext &context) = 0;


static Status AllocTensorBuffer(size_t size, std::unique_ptr<TensorBuffer> &tensor_buffer); static Status AllocTensorBuffer(size_t size, std::unique_ptr<TensorBuffer> &tensor_buffer);


virtual Status CopyDataToHbm(TaskContext &context,
const std::vector<std::unique_ptr<TensorBuffer>> &out_shape_hbm) = 0;

///
/// read result summary and prepare copy task memory.
/// @param context task context
/// @param out_shape_hbm if scalar, TensorBuffer->data is null, size=0
/// @return SUCCESS:success other:failed
///
Status ReadResultSummaryAndPrepareMemory(TaskContext &context,
std::vector<std::unique_ptr<TensorBuffer>> &out_shape_hbm);

Status UpdateShapeByHbmBuffer(TaskContext &context,
const std::vector<std::unique_ptr<TensorBuffer>> &out_shape_hbm);

Status PrepareCopyInputs(const TaskContext &context,
const std::vector<std::unique_ptr<TensorBuffer>> &out_shape_hbm);

protected: protected:
const NodeItem *node_item_; const NodeItem *node_item_;
// just reference. // just reference.
@@ -78,6 +100,17 @@ class AicpuNodeTaskBase : public NodeTask {


// ext info addr, device mem // ext info addr, device mem
std::unique_ptr<TensorBuffer> ext_info_addr_dev_; std::unique_ptr<TensorBuffer> ext_info_addr_dev_;

std::vector<std::unique_ptr<TensorBuffer>> output_summary_;
std::vector<aicpu::FWKAdapter::ResultSummary> output_summary_host_;

std::unique_ptr<TensorBuffer> copy_ioaddr_dev_;

std::unique_ptr<TensorBuffer> copy_input_release_flag_dev_;
std::unique_ptr<TensorBuffer> copy_input_data_size_dev_;
std::unique_ptr<TensorBuffer> copy_input_src_dev_;
std::unique_ptr<TensorBuffer> copy_input_dst_dev_;
bool need_sync_ = false;
}; };


class AicpuTfNodeTask : public AicpuNodeTaskBase { class AicpuTfNodeTask : public AicpuNodeTaskBase {
@@ -93,34 +126,15 @@ class AicpuTfNodeTask : public AicpuNodeTaskBase {


Status LaunchTask(TaskContext &context) override; Status LaunchTask(TaskContext &context) override;


Status TaskCallback(TaskContext &context) override;

Status UpdateIoAddr(TaskContext &context) override; Status UpdateIoAddr(TaskContext &context) override;
Status CopyDataToHbm(TaskContext &context,
const std::vector<std::unique_ptr<TensorBuffer>> &out_shape_hbm) override;


Status InitForDependComputeTask() override;
private: private:
Status SetMemCopyTask(const domi::TaskDef &task_def); Status SetMemCopyTask(const domi::TaskDef &task_def);


Status InitForDependComputeTask();

Status UpdateShapeAndDataByResultSummary(TaskContext &context);

///
/// read result summary and prepare copy task memory.
/// @param context task context
/// @param out_shape_hbm if scalar, TensorBuffer->data is null, size=0
/// @return SUCCESS:success other:failed
///
Status ReadResultSummaryAndPrepareMemory(TaskContext &context,
std::vector<std::unique_ptr<TensorBuffer>> &out_shape_hbm);
Status CopyDataToHbm(TaskContext &context,
const std::vector<std::unique_ptr<TensorBuffer>> &out_shape_hbm);

Status UpdateShapeByHbmBuffer(TaskContext &context,
const std::vector<std::unique_ptr<TensorBuffer>> &out_shape_hbm);

Status PrepareCopyInputs(const TaskContext &context,
const std::vector<std::unique_ptr<TensorBuffer>> &out_shape_hbm);

static Status EnsureSessionCreated(uint64_t session_id); static Status EnsureSessionCreated(uint64_t session_id);
static uint64_t GetStepIdAddr(const HybridModel &model); static uint64_t GetStepIdAddr(const HybridModel &model);
private: private:
@@ -135,17 +149,6 @@ class AicpuTfNodeTask : public AicpuNodeTaskBase {
// just used for depend DEPEND_COMPUTE op // just used for depend DEPEND_COMPUTE op
std::unique_ptr<TensorBuffer> copy_task_args_buf_; std::unique_ptr<TensorBuffer> copy_task_args_buf_;


std::vector<std::unique_ptr<TensorBuffer>> output_summary_;
std::vector<aicpu::FWKAdapter::ResultSummary> output_summary_host_;

std::unique_ptr<TensorBuffer> copy_ioaddr_dev_;

std::unique_ptr<TensorBuffer> copy_input_release_flag_dev_;
std::unique_ptr<TensorBuffer> copy_input_data_size_dev_;
std::unique_ptr<TensorBuffer> copy_input_src_dev_;
std::unique_ptr<TensorBuffer> copy_input_dst_dev_;
bool need_sync_ = false;

std::unique_ptr<TensorBuffer> copy_workspace_buf_; std::unique_ptr<TensorBuffer> copy_workspace_buf_;
}; };


@@ -162,16 +165,29 @@ class AicpuNodeTask : public AicpuNodeTaskBase {


Status LaunchTask(TaskContext &context) override; Status LaunchTask(TaskContext &context) override;


Status TaskCallback(TaskContext &context) override;
Status CopyDataToHbm(TaskContext &context,
const std::vector<std::unique_ptr<TensorBuffer>> &out_shape_hbm) override;


Status UpdateIoAddr(TaskContext &context) override; Status UpdateIoAddr(TaskContext &context) override;


Status InitForDependComputeTask() override;
private:
Status SetMemCopyTask(const domi::TaskDef &task_def);

protected: protected:
// host mem // host mem
std::unique_ptr<uint8_t[]> args_; std::unique_ptr<uint8_t[]> args_;


// host memcpy mem
std::unique_ptr<uint8_t[]> memcpy_args_;
// args size // args size
uint32_t args_size_ = 0; uint32_t args_size_ = 0;

std::string memcpy_so_name_;

std::string memcpy_kernel_name_;
// args size
uint32_t memcpy_args_size_ = 0;
}; };


class AiCpuNodeExecutor : public NodeExecutor { class AiCpuNodeExecutor : public NodeExecutor {


+ 53
- 21
ge/single_op/single_op_model.cc View File

@@ -536,28 +536,60 @@ Status SingleOpModel::BuildTaskListForDynamicOp(StreamResource *stream_resource,
auto compute_graph = GraphUtils::GetComputeGraph(ge_model->GetGraph()); auto compute_graph = GraphUtils::GetComputeGraph(ge_model->GetGraph());
GE_CHECK_NOTNULL(compute_graph); GE_CHECK_NOTNULL(compute_graph);
single_op.compute_graph_ = compute_graph; single_op.compute_graph_ = compute_graph;
if (tbe_tasks_.size() > 0) {
const auto &task_def = tbe_tasks_[0];
GELOGD("Building TBE task.");
TbeOpTask *tbe_task = nullptr;
GE_CHK_STATUS_RET_NOLOG(BuildKernelTask(task_def, &tbe_task));
tbe_task->SetModelArgs(model_name_, model_id_);
if (tbe_task->tiling_buffer_ != nullptr) {
GELOGD("tiling buffer is not nullptr.");
tbe_task->stream_resource_ = stream_resource;
}
single_op.op_task_.reset(tbe_task);
} else if (aicpu_tasks_.size() > 0) {
const auto &task_def = aicpu_tasks_[0];
auto tasks = ge_model->GetModelTaskDefPtr()->task();
for (int i = 0; i < tasks.size(); ++i) {
const TaskDef &task_def = tasks[i];
GELOGI("[%s] Task[%d], type = [%u], DebugString = [%s]", model_name_.c_str(), i, task_def.type(),
task_def.DebugString().c_str());
auto task_type = static_cast<rtModelTaskType_t>(task_def.type()); auto task_type = static_cast<rtModelTaskType_t>(task_def.type());
if (task_type == RT_MODEL_TASK_KERNEL) {
GELOGD("Building AICPU_CC task");
OpTask *task = nullptr;
uint64_t dynamic_singleop_kernel_id = aicpu_kernel_id++;
GELOGI("Build dynamic singleOp CCTask, kernel_id = %lu", dynamic_singleop_kernel_id);
GE_CHK_STATUS_RET_NOLOG(BuildCpuKernelTask(task_def.kernel(), &task, dynamic_singleop_kernel_id));
task->SetModelArgs(model_name_, model_id_);
single_op.op_task_.reset(task);
if (task_type == RT_MODEL_TASK_KERNEL || task_type == RT_MODEL_TASK_ALL_KERNEL) {
if (single_op.op_task_ != nullptr) {
GELOGE(ACL_ERROR_GE_OP_TASK_TYPE_INVALID, "[Check][TaskType]Do not support dynamic op with multiple tasks.");
REPORT_INNER_ERROR("E19999",
"BuildTaskListForDynamicOp fail for Do not support dynamic op with multiple tasks.");
return ACL_ERROR_GE_OP_TASK_TYPE_INVALID;
}
auto task_type = static_cast<rtModelTaskType_t>(task_def.type());
const auto &context = task_type == RT_MODEL_TASK_KERNEL ? task_def.kernel().context() :
task_def.kernel_with_handle().context();
auto kernel_type = static_cast<ccKernelType>(context.kernel_type());
if (kernel_type == ccKernelType::TE) {
GELOGD("Building TBE task.");
TbeOpTask *tbe_task = nullptr;
GE_CHK_STATUS_RET_NOLOG(BuildKernelTask(task_def, &tbe_task));
tbe_task->SetModelArgs(model_name_, model_id_);
if (tbe_task->tiling_buffer_ != nullptr) {
GELOGD("tiling buffer is not nullptr.");
tbe_task->stream_resource_ = stream_resource;
}
single_op.op_task_.reset(tbe_task);
} else if (kernel_type == ccKernelType::AI_CPU || kernel_type == ccKernelType::CUST_AI_CPU) {
GELOGD("Building AICPU_CC task");
AicpuCCTask *aicpu_kernel_task = nullptr;
uint64_t dynamic_singleop_kernel_id = aicpu_kernel_id++;
GELOGI("Build dynamic singleOp CCTask, kernel_id = %lu", dynamic_singleop_kernel_id);
GE_CHK_STATUS_RET_NOLOG(BuildCpuKernelTask(task_def.kernel(), &aicpu_kernel_task, dynamic_singleop_kernel_id));
if (aicpu_kernel_task->GetUnknownType() == DEPEND_COMPUTE) {
if (i >= tasks.size() - 1) {
GELOGE(ACL_ERROR_GE_PARAM_INVALID, "[Check][Task]The copy task of the fourth operator was not found.");
REPORT_INNER_ERROR("E19999", "The copy task of the fourth operator was not found.");
return ACL_ERROR_GE_PARAM_INVALID;
}
++i;
const TaskDef &copy_task_def = tasks[i];
GE_CHK_STATUS_RET_NOLOG(aicpu_kernel_task->SetMemCopyTask(copy_task_def.kernel()));
}
task->SetModelArgs(model_name_, model_id_);
single_op.op_task_.reset(task);
} else {
GELOGE(ACL_ERROR_GE_OP_KERNEL_TYPE_INVALID,
"[Check][Param:TaskDef]Only TBE, AI_CPU, CUST_AI_CPU kernel are supported, but got %u",
context.kernel_type());
REPORT_INNER_ERROR("E19999",
"BuildTaskListForDynamicOp fail for got:%u not supported, Only TBE, AI_CPU, CUST_AI_CPU kernel are supported.",
context.kernel_type());
return ACL_ERROR_GE_OP_KERNEL_TYPE_INVALID;
}
} else if (task_type == RT_MODEL_TASK_KERNEL_EX) { } else if (task_type == RT_MODEL_TASK_KERNEL_EX) {
GELOGD("Building AICPU_TF task"); GELOGD("Building AICPU_TF task");
AiCpuTask *aicpu_task = nullptr; AiCpuTask *aicpu_task = nullptr;


+ 1
- 1
ge/single_op/single_op_model.h View File

@@ -70,7 +70,7 @@ class SingleOpModel {
Status BuildTaskListForDynamicOp(StreamResource *stream_resource, DynamicSingleOp &dynamic_single_op); Status BuildTaskListForDynamicOp(StreamResource *stream_resource, DynamicSingleOp &dynamic_single_op);
Status BuildKernelTask(const domi::TaskDef &task_def, TbeOpTask **task); Status BuildKernelTask(const domi::TaskDef &task_def, TbeOpTask **task);
Status BuildKernelExTask(const domi::KernelExDef &kernel_def, AiCpuTask **task, uint64_t kernel_id); Status BuildKernelExTask(const domi::KernelExDef &kernel_def, AiCpuTask **task, uint64_t kernel_id);
Status BuildCpuKernelTask(const domi::KernelDef &kernel_def, OpTask **task, uint64_t kernel_id);
Status BuildCpuKernelTask(const domi::KernelDef &kernel_def, AiCpuCCTask **task, uint64_t kernel_id);


static void ParseOpModelParams(ModelHelper &model_helper, SingleOpModelParam &param); static void ParseOpModelParams(ModelHelper &model_helper, SingleOpModelParam &param);
void ParseArgTable(OpTask *task, SingleOp &op); void ParseArgTable(OpTask *task, SingleOp &op);


+ 2
- 5
ge/single_op/task/aicpu_kernel_task_builder.cc View File

@@ -78,7 +78,7 @@ Status AiCpuCCTaskBuilder::BuildTask(AiCpuCCTask &task, uint64_t kernel_id, cons
task.is_custom_ = true; task.is_custom_ = true;
task.dump_flag_ |= RT_KERNEL_CUSTOM_AICPU; task.dump_flag_ |= RT_KERNEL_CUSTOM_AICPU;
bool loaded = false; bool loaded = false;
GE_CHK_STATUS_RET(ModelManager::GetInstance()->LoadCustAicpuSo(op_desc_, so_name, loaded),
GE_CHK_STATUS_RET(ModelManager::GetInstance()->LoadCustAicpuSo(op_desc_, so_name, loaded),
"[Load][CustAicpuSo] failed."); "[Load][CustAicpuSo] failed.");
if (!loaded) { if (!loaded) {
GE_CHK_STATUS_RET(ModelManager::GetInstance()->LaunchCustAicpuSo(), "[Launch][CustAicpuSo] failed."); GE_CHK_STATUS_RET(ModelManager::GetInstance()->LaunchCustAicpuSo(), "[Launch][CustAicpuSo] failed.");
@@ -102,11 +102,8 @@ Status AiCpuCCTaskBuilder::BuildTask(AiCpuCCTask &task, uint64_t kernel_id, cons
return ret; return ret;
} }
GE_CHK_STATUS_RET(task.SetInputConst(), "[Set][InputConst] failed."); GE_CHK_STATUS_RET(task.SetInputConst(), "[Set][InputConst] failed.");
GE_CHK_STATUS_RET(task.InitForSummaryAndCopy(), "[Init][SummaryAndCopy] failed.");


if (task.GetUnknownType() == DEPEND_COMPUTE) {
GELOGE(FAILED, "[Get][UnknownType] is depend compute, it's not supported now.");
return FAILED;
}
auto aicpu_param_head = reinterpret_cast<aicpu::AicpuParamHead *>(task.args_.get()); auto aicpu_param_head = reinterpret_cast<aicpu::AicpuParamHead *>(task.args_.get());
if (task.ext_info_addr_dev_ != nullptr) { if (task.ext_info_addr_dev_ != nullptr) {
aicpu_param_head->extInfoLength = kernel_ext_info.size(); aicpu_param_head->extInfoLength = kernel_ext_info.size();


+ 122
- 15
ge/single_op/task/op_task.cc View File

@@ -467,6 +467,18 @@ AiCpuBaseTask::~AiCpuBaseTask() {
if (ext_info_addr_dev_ != nullptr) { if (ext_info_addr_dev_ != nullptr) {
(void)rtFree(ext_info_addr_dev_); (void)rtFree(ext_info_addr_dev_);
} }

FreeHbm(copy_ioaddr_dev_);
FreeHbm(copy_input_release_flag_dev_);
FreeHbm(copy_input_data_size_dev_);
FreeHbm(copy_input_src_dev_);
FreeHbm(copy_input_dst_dev_);
for (auto summary : output_summary_) {
FreeHbm(summary);
}
for (auto out_shape : out_shape_hbm_) {
FreeHbm(out_shape);
}
} }


Status AiCpuBaseTask::SetExtInfoAndType(const std::string &kernel_ext_info, uint64_t kernel_id) { Status AiCpuBaseTask::SetExtInfoAndType(const std::string &kernel_ext_info, uint64_t kernel_id) {
@@ -670,18 +682,7 @@ AiCpuTask::~AiCpuTask() {
FreeHbm(io_addr_); FreeHbm(io_addr_);
FreeHbm(workspace_addr_); FreeHbm(workspace_addr_);
FreeHbm(copy_workspace_buf_); FreeHbm(copy_workspace_buf_);
FreeHbm(copy_ioaddr_dev_);
FreeHbm(copy_input_release_flag_dev_);
FreeHbm(copy_input_data_size_dev_);
FreeHbm(copy_input_src_dev_);
FreeHbm(copy_input_dst_dev_);
FreeHbm(copy_task_args_buf_); FreeHbm(copy_task_args_buf_);
for (auto summary : output_summary_) {
FreeHbm(summary);
}
for (auto out_shape : out_shape_hbm_) {
FreeHbm(out_shape);
}
} }


Status AiCpuTask::LaunchKernel(rtStream_t stream) { Status AiCpuTask::LaunchKernel(rtStream_t stream) {
@@ -711,7 +712,7 @@ Status AiCpuTask::LaunchKernel(rtStream_t stream) {
return SUCCESS; return SUCCESS;
} }


Status AiCpuTask::PrepareCopyInputs(vector<DataBuffer> &outputs) {
Status AiCpuBaseTask::PrepareCopyInputs(vector<DataBuffer> &outputs) {
std::vector<uint64_t> copy_input_release_flag; std::vector<uint64_t> copy_input_release_flag;
std::vector<uint64_t> copy_input_data_size; std::vector<uint64_t> copy_input_data_size;
std::vector<uint64_t> copy_input_src; std::vector<uint64_t> copy_input_src;
@@ -752,7 +753,7 @@ Status AiCpuTask::PrepareCopyInputs(vector<DataBuffer> &outputs) {
return SUCCESS; return SUCCESS;
} }


Status AiCpuTask::ReadResultSummaryAndPrepareMemory() {
Status AiCpuBaseTask::ReadResultSummaryAndPrepareMemory() {
for (size_t i = 0; i < num_outputs_; ++i) { for (size_t i = 0; i < num_outputs_; ++i) {
auto &result_summary = output_summary_host_[i]; auto &result_summary = output_summary_host_[i];


@@ -779,7 +780,20 @@ Status AiCpuTask::CopyDataToHbm(vector<DataBuffer> &outputs,
return SUCCESS; return SUCCESS;
} }


Status AiCpuTask::UpdateShapeByHbmBuffer(vector<GeTensorDesc> &output_desc) {
Status AiCpuCCTask::CopyDataToHbm(vector<DataBuffer> &outputs,
rtStream_t stream) {
GE_CHK_STATUS_RET_NOLOG(PrepareCopyInputs(outputs));
auto ret = rtCpuKernelLaunchWithFlag(static_cast<const void *>(memcpy_so_name_.data()),
static_cast<const void *>(memcpy_kernel_name_.data()),
block_dim_, memcpy_args_.get(), static_cast<uint32_t>(memcpy_arg_size_),
nullptr, stream, dump_flag_);
GE_CHK_RT_RET(ret);
GE_CHK_RT_RET(rtStreamSynchronize(stream));
return SUCCESS;
}

Status AiCpuBaseTask::UpdateShapeByHbmBuffer(vector<GeTensorDesc> &output_desc) {
for (size_t i = 0; i < num_outputs_; ++i) { for (size_t i = 0; i < num_outputs_; ++i) {
const auto &result_summary = output_summary_host_[i]; const auto &result_summary = output_summary_host_[i];
std::vector<int64_t> shape_dims; std::vector<int64_t> shape_dims;
@@ -808,7 +822,7 @@ Status AiCpuTask::UpdateShapeByHbmBuffer(vector<GeTensorDesc> &output_desc) {
} }




Status AiCpuTask::UpdateShapeAndDataByResultSummary(vector<GeTensorDesc> &output_desc,
Status AiCpuBaseTask::UpdateShapeAndDataByResultSummary(vector<GeTensorDesc> &output_desc,
vector<DataBuffer> &outputs, vector<DataBuffer> &outputs,
rtStream_t stream) { rtStream_t stream) {
if (num_outputs_ == 0) { if (num_outputs_ == 0) {
@@ -998,8 +1012,101 @@ Status AiCpuCCTask::LaunchKernel(const std::vector<GeTensorDesc> &input_desc,
if (unknown_type_ == DEPEND_SHAPE_RANGE) { if (unknown_type_ == DEPEND_SHAPE_RANGE) {
GE_CHK_RT_RET(rtStreamSynchronize(stream)); GE_CHK_RT_RET(rtStreamSynchronize(stream));
GE_CHK_STATUS_RET_NOLOG(UpdateOutputShape(output_desc)); GE_CHK_STATUS_RET_NOLOG(UpdateOutputShape(output_desc));
} else if (unknown_type_ == DEPEND_COMPUTE) {
GE_CHK_RT_RET(rtStreamSynchronize(stream));
GE_CHK_STATUS_RET_NOLOG(UpdateShapeAndDataByResultSummary(output_desc, output_buffers, stream));
}

return SUCCESS;
}

Status AiCpuCCTask::InitForSummaryAndCopy() {
if (unknown_type_ != DEPEND_COMPUTE || num_outputs_ == 0) {
GELOGI("Unknown_type is %d, output num is %zu.", unknown_type_, num_outputs_);
return SUCCESS;
}

output_summary_.resize(num_outputs_);
constexpr auto result_summary_size = sizeof(aicpu::FWKAdapter::ResultSummary);
for (size_t i = 0; i < num_outputs_; ++i) {
GE_CHK_RT_RET(rtMalloc(&output_summary_[i], result_summary_size, RT_MEMORY_HBM));
} }
output_summary_host_.resize(num_outputs_);

const size_t copy_input_buf_len = num_outputs_ * kCopyNum * sizeof(uint64_t);

GE_CHK_RT_RET(rtMalloc(&copy_input_release_flag_dev_, copy_input_buf_len, RT_MEMORY_HBM));
GE_CHK_RT_RET(rtMalloc(&copy_input_data_size_dev_, copy_input_buf_len, RT_MEMORY_HBM));
GE_CHK_RT_RET(rtMalloc(&copy_input_src_dev_, copy_input_buf_len, RT_MEMORY_HBM));
GE_CHK_RT_RET(rtMalloc(&copy_input_dst_dev_, copy_input_buf_len, RT_MEMORY_HBM));

GE_CHK_RT_RET(rtMalloc(&copy_task_args_buf_, sizeof(STR_FWK_OP_KERNEL), RT_MEMORY_HBM));

std::vector<uint64_t> copy_io_addr;
copy_io_addr.emplace_back(reinterpret_cast<uintptr_t>(copy_input_release_flag_dev_));
copy_io_addr.emplace_back(reinterpret_cast<uintptr_t>(copy_input_data_size_dev_));
copy_io_addr.emplace_back(reinterpret_cast<uintptr_t>(copy_input_src_dev_));
copy_io_addr.emplace_back(reinterpret_cast<uintptr_t>(copy_input_dst_dev_));

const auto copy_io_addr_size = sizeof(uint64_t) * copy_io_addr.size();

GE_CHK_RT_RET(rtMalloc(&copy_ioaddr_dev_, copy_io_addr_size, RT_MEMORY_HBM));

GE_CHK_RT_RET(rtMemcpy(copy_ioaddr_dev_, copy_io_addr_size,
copy_io_addr.data(), copy_io_addr_size, RT_MEMCPY_HOST_TO_DEVICE));
return SUCCESS;
}


Status AiCpuCCTask::SetMemCopyTask(const domi::KernelDef &kernel_def) {
auto &memcpy_args = kernel_def.args();
memcpy_args_size_ = kernel_def.args_size();
memcpy_so_name_ = kernel_def.so_name();
memcpy_kernel_name_ = kernel_def.kernel_name();
GE_IF_BOOL_EXEC(memcpy_args.size() != memcpy_args_size_,
REPORT_INNER_ERROR("E19999", "MemCopy task def args.size=%zu, but args_size=%u not equal.",
memcpy_args.size(), memcpy_args_size_);
GELOGE(FAILED, "[Check][Size]MemCopy task def args.size=%zu, but args_size=%u not equal.",
memcpy_args.size(), memcpy_args_size_);
return FAILED;);
GE_IF_BOOL_EXEC(memcpy_args_size_ < sizeof(aicpu::AicpuParamHead),
REPORT_INNER_ERROR("E19999",
"Task def args_size=%u is less than aicpu param head len=%zu.",
memcpy_args_size_, sizeof(aicpu::AicpuParamHead));
GELOGE(FAILED,
"[Check][Size] Task def args_size=%u is less than aicpu param head len=%zu.",
memcpy_args_size_, sizeof(aicpu::AicpuParamHead));
return FAILED;);

memcpy_args_.reset(new(std::nothrow) uint8_t[memcpy_args_size_]());
GE_IF_BOOL_EXEC(memcpy_args_ == nullptr,
REPORT_INNER_ERROR("E19999", "new memory failed for Node[MemCopy], task_size[%u].",
memcpy_args_size_);
GELOGE(FAILED, "[Malloc][Memory] failed for Node[MemCopy], task_size[%u].",
memcpy_args_size_);
return FAILED;);

errno_t sec_ret = memcpy_s(memcpy_args_.get(), memcpy_args_size_, memcpy_args.c_str(), memcpy_args.size());
GE_IF_BOOL_EXEC(sec_ret != EOK,
REPORT_INNER_ERROR("E19999",
"memcpy_s argc_ failed for Node[MemCopy], ret: %d", sec_ret);
GELOGE(INTERNAL_ERROR,
"[Update][args] failed for Node[MemCopy], ret: %d", sec_ret);
return sec_ret;);
auto memcpy_param_head = reinterpret_cast<aicpu::AicpuParamHead *>(memcpy_args_.get());
uint32_t memcpy_io_num = memcpy_param_head->ioAddrNum;
auto memcpy_io_addr = memcpy_args_.get() + sizeof(aicpu::AicpuParamHead);
// if has input and output, need copy to ioaddr
int cpy_ret = memcpy_s(memcpy_io_addr, memcpy_args_size_ - sizeof(aicpu::AicpuParamHead),
&copy_ioaddr_dev_, sizeof(uint64_t) * memcpy_io_num);
GE_IF_BOOL_EXEC(cpy_ret != 0,
REPORT_INNER_ERROR("E19999", "Node[Memcpoy] memcpy io addr to AicpuParamHead failed,"
"ret=%d, args_size=%u, io nums=%u.",
cpy_ret, memcpy_args_size_, memcpy_io_num);
GELOGE(INTERNAL_ERROR, "[Update][io_addr]Node[MemCopy] memcpy io addr to AicpuParamHead failed,"
"ret=%d, args_size=%u, io nums=%u.",
cpy_ret, memcpy_args_size_, memcpy_io_num);
return INTERNAL_ERROR;);
GELOGD("Set memcpy task for node[MemCopy] successfully.");
return SUCCESS; return SUCCESS;
} }




+ 32
- 22
ge/single_op/task/op_task.h View File

@@ -152,7 +152,15 @@ class AiCpuBaseTask : public OpTask {
rtStream_t stream); rtStream_t stream);
Status UpdateOutputShape(vector<GeTensorDesc> &output_desc); Status UpdateOutputShape(vector<GeTensorDesc> &output_desc);
Status UpdateShapeToOutputDesc(const GeShape &shape_new, GeTensorDesc &output_desc); Status UpdateShapeToOutputDesc(const GeShape &shape_new, GeTensorDesc &output_desc);
Status UpdateShapeAndDataByResultSummary(vector<GeTensorDesc> &output_desc,
vector<DataBuffer> &outputs,
rtStream_t stream);
Status ReadResultSummaryAndPrepareMemory();

Status PrepareCopyInputs(vector<DataBuffer> &outputs);


Status UpdateShapeByHbmBuffer(vector<GeTensorDesc> &output_desc);
protected: protected:
size_t num_inputs_ = 0; size_t num_inputs_ = 0;
size_t num_outputs_ = 0; size_t num_outputs_ = 0;
@@ -160,6 +168,18 @@ class AiCpuBaseTask : public OpTask {
std::unique_ptr<ge::hybrid::AicpuExtInfoHandler> aicpu_ext_handle_; std::unique_ptr<ge::hybrid::AicpuExtInfoHandler> aicpu_ext_handle_;
void *ext_info_addr_dev_ = nullptr; void *ext_info_addr_dev_ = nullptr;
vector<bool> input_is_const_; vector<bool> input_is_const_;
std::vector<void *> output_summary_;
std::vector<aicpu::FWKAdapter::ResultSummary> output_summary_host_;

void *copy_ioaddr_dev_ = nullptr;

void *copy_input_release_flag_dev_ = nullptr;
void *copy_input_data_size_dev_ = nullptr;
void *copy_input_src_dev_ = nullptr;
void *copy_input_dst_dev_ = nullptr;

vector<void *> out_shape_hbm_;
}; };


class AiCpuTask : public AiCpuBaseTask { class AiCpuTask : public AiCpuBaseTask {
@@ -180,16 +200,9 @@ class AiCpuTask : public AiCpuBaseTask {
private: private:
// for copy task. // for copy task.
Status InitForSummaryAndCopy(); Status InitForSummaryAndCopy();
Status UpdateShapeAndDataByResultSummary(vector<GeTensorDesc> &output_desc,
vector<DataBuffer> &outputs,
rtStream_t stream);
Status ReadResultSummaryAndPrepareMemory();


Status CopyDataToHbm(vector<DataBuffer> &outputs, rtStream_t stream); Status CopyDataToHbm(vector<DataBuffer> &outputs, rtStream_t stream);
Status PrepareCopyInputs(vector<DataBuffer> &outputs);

Status UpdateShapeByHbmBuffer(vector<GeTensorDesc> &output_desc);

private:
friend class AiCpuTaskBuilder; friend class AiCpuTaskBuilder;
void *workspace_addr_ = nullptr; void *workspace_addr_ = nullptr;
std::string task_info_; std::string task_info_;
@@ -207,18 +220,6 @@ class AiCpuTask : public AiCpuBaseTask {
// for copy task // for copy task
void *copy_task_args_buf_ = nullptr; void *copy_task_args_buf_ = nullptr;
void *copy_workspace_buf_ = nullptr; void *copy_workspace_buf_ = nullptr;

std::vector<void *> output_summary_;
std::vector<aicpu::FWKAdapter::ResultSummary> output_summary_host_;

void *copy_ioaddr_dev_ = nullptr;

void *copy_input_release_flag_dev_ = nullptr;
void *copy_input_data_size_dev_ = nullptr;
void *copy_input_src_dev_ = nullptr;
void *copy_input_dst_dev_ = nullptr;

vector<void *> out_shape_hbm_;
uint64_t kernel_id_ = 0; uint64_t kernel_id_ = 0;
}; };


@@ -237,14 +238,17 @@ class AiCpuCCTask : public AiCpuBaseTask {
void SetkernelName(const std::string &kernel_Name); void SetkernelName(const std::string &kernel_Name);
void SetIoAddr(uintptr_t *io_addr); void SetIoAddr(uintptr_t *io_addr);
size_t GetArgSize() const; size_t GetArgSize() const;
Status SetMemCopyTask(const domi::KernelDef &kernel_def);
Status LaunchKernel(const std::vector<GeTensorDesc> &input_desc, Status LaunchKernel(const std::vector<GeTensorDesc> &input_desc,
const std::vector<DataBuffer> &input_buffers, const std::vector<DataBuffer> &input_buffers,
std::vector<GeTensorDesc> &output_desc, std::vector<GeTensorDesc> &output_desc,
std::vector<DataBuffer> &output_buffers, std::vector<DataBuffer> &output_buffers,
rtStream_t stream) override; rtStream_t stream) override;
private:
Status InitForSummaryAndCopy();


private:
Status CopyDataToHbm(vector<DataBuffer> &outputs, rtStream_t stream);
private:
friend class AiCpuCCTaskBuilder; friend class AiCpuCCTaskBuilder;
std::string so_name_; std::string so_name_;
std::string kernel_name_; std::string kernel_name_;
@@ -257,6 +261,12 @@ private:
uint32_t dump_flag_ = RT_KERNEL_DEFAULT; uint32_t dump_flag_ = RT_KERNEL_DEFAULT;
std::string op_type_; std::string op_type_;
uint64_t kernel_id_ = 0; uint64_t kernel_id_ = 0;
// host memcpy mem
std::unique_ptr<uint8_t[]> memcpy_args_;
std::string memcpy_so_name_;
std::string memcpy_kernel_name_;
// args size
size_t memcpy_args_size_ = 0;
}; };


class MemcpyAsyncTask : public OpTask { class MemcpyAsyncTask : public OpTask {


Loading…
Cancel
Save