Browse Source

!750 Add SetIoAddrs for UpdateArgs.

From: @zhangxiaokun9
Reviewed-by: @wangxiaotian22,@xchu42,@wangxiaotian22,@xchu42
Signed-off-by: @ji_chen
tags/v1.2.0
mindspore-ci-bot Gitee 3 years ago
parent
commit
0996fda674
16 changed files with 2122 additions and 180 deletions
  1. +29
    -0
      ge/graph/load/new_model_manager/davinci_model.cc
  2. +27
    -3
      ge/graph/load/new_model_manager/davinci_model.h
  3. +31
    -29
      ge/graph/load/new_model_manager/task_info/hccl_task_info.cc
  4. +2
    -6
      ge/graph/load/new_model_manager/task_info/hccl_task_info.h
  5. +21
    -23
      ge/graph/load/new_model_manager/task_info/kernel_ex_task_info.cc
  6. +2
    -1
      ge/graph/load/new_model_manager/task_info/kernel_ex_task_info.h
  7. +75
    -63
      ge/graph/load/new_model_manager/task_info/kernel_task_info.cc
  8. +3
    -22
      ge/graph/load/new_model_manager/task_info/kernel_task_info.h
  9. +33
    -29
      ge/graph/load/new_model_manager/task_info/memcpy_async_task_info.cc
  10. +3
    -2
      ge/graph/load/new_model_manager/task_info/memcpy_async_task_info.h
  11. +8
    -2
      tests/ut/ge/CMakeLists.txt
  12. +134
    -0
      tests/ut/ge/graph/load/hccl_task_info_unittest.cc
  13. +144
    -0
      tests/ut/ge/graph/load/kernel_ex_task_info_unittest.cc
  14. +1199
    -0
      tests/ut/ge/graph/load/kernel_task_info_unittest.cc
  15. +138
    -0
      tests/ut/ge/graph/load/memcpy_addr_async_task_info_unittest.cc
  16. +273
    -0
      tests/ut/ge/graph/load/memcpy_async_task_info_unittest.cc

+ 29
- 0
ge/graph/load/new_model_manager/davinci_model.cc View File

@@ -108,6 +108,7 @@ std::mutex DavinciModel::tvm_bin_mutex_;
DavinciModel::DavinciModel(int32_t priority, const std::shared_ptr<ModelListener> &listener) DavinciModel::DavinciModel(int32_t priority, const std::shared_ptr<ModelListener> &listener)
: weights_mem_base_(nullptr), : weights_mem_base_(nullptr),
var_mem_base_(nullptr), var_mem_base_(nullptr),
fixed_mem_base_(0),
mem_base_(nullptr), mem_base_(nullptr),
is_inner_mem_base_(false), is_inner_mem_base_(false),
is_inner_weight_base_(false), is_inner_weight_base_(false),
@@ -139,6 +140,7 @@ DavinciModel::DavinciModel(int32_t priority, const std::shared_ptr<ModelListener
is_l1_fusion_enable_(false), is_l1_fusion_enable_(false),
is_first_execute_(true) { is_first_execute_(true) {
op_list_.clear(); op_list_.clear();
skt_info_ = {0, 0, 0, 0, nullptr, nullptr, {}, {}, {}, {}, {}, RT_KERNEL_DEFAULT, -1, 0, nullptr};
} }


DavinciModel::~DavinciModel() { DavinciModel::~DavinciModel() {
@@ -261,6 +263,7 @@ Status DavinciModel::Assign(const GeModelPtr &ge_model) {
/// @return: void /// @return: void
/// ///
void DavinciModel::Shrink() { void DavinciModel::Shrink() {
skt_info_ = {0, 0, 0, 0, nullptr, nullptr, {}, {}, {}, {}, {}, RT_KERNEL_DEFAULT, -1, 0, nullptr};
ge_model_.reset(); // delete object. ge_model_.reset(); // delete object.
} }


@@ -668,6 +671,7 @@ Status DavinciModel::Init(void *dev_ptr, size_t mem_size, void *weight_ptr, size
data_inputer_ = new (std::nothrow) DataInputer(); data_inputer_ = new (std::nothrow) DataInputer();
GE_CHK_BOOL_RET_STATUS(data_inputer_ != nullptr, MEMALLOC_FAILED, "data_inputer_ is nullptr."); GE_CHK_BOOL_RET_STATUS(data_inputer_ != nullptr, MEMALLOC_FAILED, "data_inputer_ is nullptr.");
} }
fixed_mem_base_ = reinterpret_cast<uintptr_t>(mem_base_);
GE_TIMESTAMP_END(InitModelMem, "GraphLoader::InitModelMem"); GE_TIMESTAMP_END(InitModelMem, "GraphLoader::InitModelMem");


for (const ge::NodePtr &node : compute_graph->GetDirectNode()) { for (const ge::NodePtr &node : compute_graph->GetDirectNode()) {
@@ -2826,7 +2830,32 @@ Status DavinciModel::CreateKnownZeroCopyMap(const vector<void *> &inputs, const
return SUCCESS; return SUCCESS;
} }


void DavinciModel::SetTotalIOAddrs(const vector<void *> &io_addrs) {
if (fixed_mem_base_ == reinterpret_cast<uintptr_t>(mem_base_)) {
total_io_addrs_.insert(total_io_addrs_.end(), io_addrs.begin(), io_addrs.end());
return;
}

for (size_t i = 0; i < io_addrs.size(); ++i) {
uintptr_t addr = reinterpret_cast<uintptr_t>(io_addrs[i]);
if ((fixed_mem_base_ <= addr) && (addr < fixed_mem_base_ + runtime_param_.mem_size)) {
total_io_addrs_.emplace_back(mem_base_ + (addr - fixed_mem_base_));
} else {
total_io_addrs_.emplace_back(io_addrs[i]);
}
}
}

Status DavinciModel::UpdateKnownZeroCopyAddr(vector<void *> &total_io_addrs) { Status DavinciModel::UpdateKnownZeroCopyAddr(vector<void *> &total_io_addrs) {
if (fixed_mem_base_ != reinterpret_cast<uintptr_t>(mem_base_)) {
for (size_t i = 0; i < total_io_addrs.size(); ++i) {
uintptr_t addr = reinterpret_cast<uintptr_t>(total_io_addrs[i]);
if ((fixed_mem_base_ <= addr) && (addr < fixed_mem_base_ + runtime_param_.mem_size)) {
total_io_addrs[i] = mem_base_ + (addr - fixed_mem_base_);
}
}
}

for (size_t i = 0; i < total_io_addrs.size(); ++i) { for (size_t i = 0; i < total_io_addrs.size(); ++i) {
auto it_in = knonw_input_data_info_.find(total_io_addrs[i]); auto it_in = knonw_input_data_info_.find(total_io_addrs[i]);
if (it_in != knonw_input_data_info_.end()) { if (it_in != knonw_input_data_info_.end()) {


+ 27
- 3
ge/graph/load/new_model_manager/davinci_model.h View File

@@ -76,6 +76,25 @@ struct timeInfo {
int64_t dumpEndTime; int64_t dumpEndTime;
}; };


// For super kernel
struct SuperKernelTaskInfo {
uint32_t last_block_dim;
uint32_t last_args_size;
uint32_t last_task_id;
uint32_t last_stream_id;
void *last_stream;
void *last_sm_desc;
std::vector<void *> kernel_list;
std::vector<void *> arg_list;
std::vector<uint32_t> dump_flag_list;
std::vector<OpDescPtr> op_desc_list;
std::vector<uintptr_t> dump_args_list;
uint32_t last_dump_flag;
int64_t last_group_key;
uintptr_t last_dump_args;
OpDescPtr last_op;
};

struct TaskMemInfo { struct TaskMemInfo {
int64_t input_size{0}; int64_t input_size{0};
int64_t output_size{0}; int64_t output_size{0};
@@ -261,6 +280,9 @@ class DavinciModel {
// get updated task info list // get updated task info list
std::vector<TaskInfoPtr> GetTaskList() { return task_list_; } std::vector<TaskInfoPtr> GetTaskList() { return task_list_; }


// Modified from KernelTaskInfo.
SuperKernelTaskInfo &GetSuperKernelTaskInfo() { return skt_info_; }

/// ///
/// @ingroup ge /// @ingroup ge
/// @brief get model input and output format /// @brief get model input and output format
@@ -481,9 +503,7 @@ class DavinciModel {
void *cur_args = static_cast<char *>(args_) + offset; void *cur_args = static_cast<char *>(args_) + offset;
return cur_args; return cur_args;
} }
void SetTotalIOAddrs(vector<void *> &io_addrs) {
total_io_addrs_.insert(total_io_addrs_.end(), io_addrs.begin(), io_addrs.end());
}
void SetTotalIOAddrs(const vector<void *> &io_addrs);
void SetHybridArgsSize(uint32_t args_size) { total_hybrid_args_size_ += args_size; } void SetHybridArgsSize(uint32_t args_size) { total_hybrid_args_size_ += args_size; }
uint32_t GetHybridArgsSize() { uint32_t GetHybridArgsSize() {
return total_hybrid_args_size_; return total_hybrid_args_size_;
@@ -533,6 +553,7 @@ class DavinciModel {
uint8_t *weights_mem_base_; uint8_t *weights_mem_base_;
uint8_t *var_mem_base_; uint8_t *var_mem_base_;
// memory address of model // memory address of model
uintptr_t fixed_mem_base_; // Initial of mem_base_, keep forever.
uint8_t *mem_base_; uint8_t *mem_base_;
uint8_t *p2p_mem_base_; uint8_t *p2p_mem_base_;
bool is_inner_mem_base_; bool is_inner_mem_base_;
@@ -996,6 +1017,9 @@ class DavinciModel {


std::multimap<uint32_t, uint32_t> op_id_map_; std::multimap<uint32_t, uint32_t> op_id_map_;
std::vector<ProfileInfo> profile_list_; std::vector<ProfileInfo> profile_list_;

// For super kernel.
SuperKernelTaskInfo skt_info_;
}; };
} // namespace ge } // namespace ge
#endif // GE_GRAPH_LOAD_NEW_MODEL_MANAGER_DAVINCI_MODEL_H_ #endif // GE_GRAPH_LOAD_NEW_MODEL_MANAGER_DAVINCI_MODEL_H_

+ 31
- 29
ge/graph/load/new_model_manager/task_info/hccl_task_info.cc View File

@@ -59,40 +59,40 @@ Status HcclTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davinci_m
GELOGI("HcclTaskInfo Init, op_index is: %u", op_index); GELOGI("HcclTaskInfo Init, op_index is: %u", op_index);


// Get HCCL op // Get HCCL op
op_desc_ = davinci_model->GetOpByIndex(op_index);
GE_CHECK_NOTNULL(op_desc_);
const auto op_desc = davinci_model_->GetOpByIndex(op_index);
GE_CHECK_NOTNULL(op_desc);


// Create the kernel hccl infos // Create the kernel hccl infos
CreateKernelHcclInfo(op_desc_);
CreateKernelHcclInfo(op_desc);


// Initialize the hccl_type of all kernel hccl info // Initialize the hccl_type of all kernel hccl info
HcomOmeUtil::GetHcclType(task_def, kernel_hccl_infos_); HcomOmeUtil::GetHcclType(task_def, kernel_hccl_infos_);


// Only in Horovod scenario should get the inputName and GeShape // Only in Horovod scenario should get the inputName and GeShape
ret = HcomOmeUtil::GetHorovodInputs(op_desc_, kernel_hccl_infos_);
ret = HcomOmeUtil::GetHorovodInputs(op_desc, kernel_hccl_infos_);
if (ret != SUCCESS) { if (ret != SUCCESS) {
GELOGE(ret, "davinci_model: GetHorovodInputs fail! domi error: %u", ret); GELOGE(ret, "davinci_model: GetHorovodInputs fail! domi error: %u", ret);
return ret; return ret;
} }
Status dmrt = HcomOmeUtil::GetHcclDataType(op_desc_, kernel_hccl_infos_);
Status dmrt = HcomOmeUtil::GetHcclDataType(op_desc, kernel_hccl_infos_);
if (dmrt != SUCCESS) { if (dmrt != SUCCESS) {
GELOGE(dmrt, "davinci_model: GetHcomDataType fail! domi error: %u", dmrt); GELOGE(dmrt, "davinci_model: GetHcomDataType fail! domi error: %u", dmrt);
return dmrt; return dmrt;
} }
dmrt = HcomOmeUtil::GetHcclCount(op_desc_, kernel_hccl_infos_);
dmrt = HcomOmeUtil::GetHcclCount(op_desc, kernel_hccl_infos_);
if (dmrt != SUCCESS) { if (dmrt != SUCCESS) {
GELOGE(dmrt, "davinci_model: GetHcomCount fail! domi error: %u", dmrt); GELOGE(dmrt, "davinci_model: GetHcomCount fail! domi error: %u", dmrt);
return dmrt; return dmrt;
} }
// Only HCOMBROADCAST and HVDCALLBACKBROADCAST need to get the rootId // Only HCOMBROADCAST and HVDCALLBACKBROADCAST need to get the rootId
dmrt = HcomOmeUtil::GetAllRootId(op_desc_, kernel_hccl_infos_);
dmrt = HcomOmeUtil::GetAllRootId(op_desc, kernel_hccl_infos_);
if (dmrt != SUCCESS) { if (dmrt != SUCCESS) {
GELOGE(dmrt, "davinci_model: Get rootId fail! domi error: %u", dmrt); GELOGE(dmrt, "davinci_model: Get rootId fail! domi error: %u", dmrt);
return dmrt; return dmrt;
} }


// GE's new process: hccl declares the number of streams required, creates a stream by GE, and sends it to hccl // GE's new process: hccl declares the number of streams required, creates a stream by GE, and sends it to hccl
ret = SetFollowStream(op_desc_, davinci_model);
ret = SetFollowStream(op_desc, davinci_model);
if (ret != SUCCESS) { if (ret != SUCCESS) {
GELOGE(ret, "SetStream Fail."); GELOGE(ret, "SetStream Fail.");
return ret; return ret;
@@ -100,21 +100,22 @@ Status HcclTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davinci_m


if (davinci_model_->IsKnownNode()) { if (davinci_model_->IsKnownNode()) {
args_ = davinci_model_->GetCurrentArgsAddr(args_offset_); args_ = davinci_model_->GetCurrentArgsAddr(args_offset_);
GELOGI("Known node %s args addr %p, offset %u.", op_desc_->GetName().c_str(), args_, args_offset_);
GELOGI("Known node %s args addr %p, offset %u.", op_desc->GetName().c_str(), args_, args_offset_);
} }


ret = SetAddrs(op_desc_, kernel_hccl_infos_);
ret = SetAddrs(op_desc, kernel_hccl_infos_);
if (ret != SUCCESS) { if (ret != SUCCESS) {
GELOGE(ret, "Setaddrs Fail."); GELOGE(ret, "Setaddrs Fail.");
return ret; return ret;
} }
// GE's new process: hccl declares the need for Workspace size, and GE allocates Workspace // GE's new process: hccl declares the need for Workspace size, and GE allocates Workspace
ret = SetWorkspace(op_desc_, kernel_hccl_infos_);
ret = SetWorkspace(op_desc, kernel_hccl_infos_);
if (ret != SUCCESS) { if (ret != SUCCESS) {
GELOGE(ret, "SetWorkspace Fail."); GELOGE(ret, "SetWorkspace Fail.");
return ret; return ret;
} }


SetIoAddrs(op_desc);
GELOGI("HcclTaskInfo Init Success"); GELOGI("HcclTaskInfo Init Success");
return SUCCESS; return SUCCESS;
} }
@@ -229,20 +230,19 @@ Status HcclTaskInfo::CalculateArgs(const domi::TaskDef &task_def, DavinciModel *
return SUCCESS; return SUCCESS;
} }


Status HcclTaskInfo::UpdateArgs() {
GELOGI("HcclTaskInfo::UpdateArgs in.");
void HcclTaskInfo::SetIoAddrs(const OpDescPtr &op_desc) {
const RuntimeParam &rts_param = davinci_model_->GetRuntimeParam(); const RuntimeParam &rts_param = davinci_model_->GetRuntimeParam();
input_data_addrs_ = ModelUtils::GetInputDataAddrs(rts_param, op_desc_);
output_data_addrs_ = ModelUtils::GetOutputDataAddrs(rts_param, op_desc_);
workspace_data_addrs_ = ModelUtils::GetWorkspaceDataAddrs(rts_param, op_desc_);

vector<void *> io_addrs;
io_addrs.insert(io_addrs.end(), input_data_addrs_.begin(), input_data_addrs_.end());
io_addrs.insert(io_addrs.end(), output_data_addrs_.begin(), output_data_addrs_.end());
io_addrs.insert(io_addrs.end(), workspace_data_addrs_.begin(), workspace_data_addrs_.end());

davinci_model_->SetTotalIOAddrs(io_addrs);
const auto input_data_addrs = ModelUtils::GetInputDataAddrs(rts_param, op_desc);
const auto output_data_addrs = ModelUtils::GetOutputDataAddrs(rts_param, op_desc);
const auto workspace_data_addrs = ModelUtils::GetWorkspaceDataAddrs(rts_param, op_desc);
io_addrs_.insert(io_addrs_.end(), input_data_addrs.begin(), input_data_addrs.end());
io_addrs_.insert(io_addrs_.end(), output_data_addrs.begin(), output_data_addrs.end());
io_addrs_.insert(io_addrs_.end(), workspace_data_addrs.begin(), workspace_data_addrs.end());
}


Status HcclTaskInfo::UpdateArgs() {
GELOGI("HcclTaskInfo::UpdateArgs in.");
davinci_model_->SetTotalIOAddrs(io_addrs_);
GELOGI("HcclTaskInfo::UpdateArgs success."); GELOGI("HcclTaskInfo::UpdateArgs success.");
return SUCCESS; return SUCCESS;
} }
@@ -261,9 +261,11 @@ Status HcclTaskInfo::SetAddrs(const std::shared_ptr<OpDesc> &op_desc,
HcclReduceOp op_type = HCCL_REDUCE_SUM; HcclReduceOp op_type = HCCL_REDUCE_SUM;
GE_CHECK_NOTNULL(davinci_model_); GE_CHECK_NOTNULL(davinci_model_);
GELOGI("Calc opType[%s] input address before. Node name[%s]", op_desc->GetType().c_str(), op_desc->GetName().c_str()); GELOGI("Calc opType[%s] input address before. Node name[%s]", op_desc->GetType().c_str(), op_desc->GetName().c_str());
vector<void *> input_data_addrs;
vector<void *> output_data_addrs;
if (!davinci_model_->IsKnownNode()) { if (!davinci_model_->IsKnownNode()) {
input_data_addrs_ = ModelUtils::GetInputDataAddrs(davinci_model_->GetRuntimeParam(), op_desc);
output_data_addrs_ = ModelUtils::GetOutputDataAddrs(davinci_model_->GetRuntimeParam(), op_desc);
input_data_addrs = ModelUtils::GetInputDataAddrs(davinci_model_->GetRuntimeParam(), op_desc);
output_data_addrs = ModelUtils::GetOutputDataAddrs(davinci_model_->GetRuntimeParam(), op_desc);
} }
void *input_data_addr = nullptr; void *input_data_addr = nullptr;
void *output_data_addr = nullptr; void *output_data_addr = nullptr;
@@ -275,8 +277,8 @@ Status HcclTaskInfo::SetAddrs(const std::shared_ptr<OpDesc> &op_desc,
output_data_addr = reinterpret_cast<void *>(reinterpret_cast<uint64_t *>(args_) + op_desc->GetInputsSize() + i); output_data_addr = reinterpret_cast<void *>(reinterpret_cast<uint64_t *>(args_) + op_desc->GetInputsSize() + i);
GELOGI("Hccl task info known input addr %p, output addr %p.", input_data_addr, output_data_addr); GELOGI("Hccl task info known input addr %p, output addr %p.", input_data_addr, output_data_addr);
} else { } else {
input_data_addr = input_data_addrs_.empty() ? nullptr : input_data_addrs_[i];
output_data_addr = output_data_addrs_.empty() ? nullptr : output_data_addrs_[i];
input_data_addr = input_data_addrs.empty() ? nullptr : input_data_addrs[i];
output_data_addr = output_data_addrs.empty() ? nullptr : output_data_addrs[i];
} }
kernel_hccl_infos[i].inputDataAddr = input_data_addr; kernel_hccl_infos[i].inputDataAddr = input_data_addr;
if (hccl_type == HCOMALLGATHER || hccl_type == HCOMRECEIVE || hccl_type == HVDCALLBACKALLGATHER) { if (hccl_type == HCOMALLGATHER || hccl_type == HCOMRECEIVE || hccl_type == HVDCALLBACKALLGATHER) {
@@ -366,8 +368,8 @@ Status HcclTaskInfo::SetWorkspace(const std::shared_ptr<OpDesc> &op_desc,
workspace_addr = reinterpret_cast<void *>(reinterpret_cast<uint64_t *>(args_) + op_desc->GetInputsSize() + workspace_addr = reinterpret_cast<void *>(reinterpret_cast<uint64_t *>(args_) + op_desc->GetInputsSize() +
op_desc->GetOutputsSize()); op_desc->GetOutputsSize());
} else { } else {
workspace_data_addrs_ = ModelUtils::GetWorkspaceDataAddrs(davinci_model_->GetRuntimeParam(), op_desc);
workspace_addr = workspace_data_addrs_.empty() ? nullptr : workspace_data_addrs_[0];
const auto workspace_data_addrs = ModelUtils::GetWorkspaceDataAddrs(davinci_model_->GetRuntimeParam(), op_desc);
workspace_addr = workspace_data_addrs.empty() ? nullptr : workspace_data_addrs[0];
} }
} }
} }


+ 2
- 6
ge/graph/load/new_model_manager/task_info/hccl_task_info.h View File

@@ -35,7 +35,6 @@ class HcclTaskInfo : public TaskInfo {
ops_kernel_store_(nullptr), ops_kernel_store_(nullptr),
private_def_(nullptr), private_def_(nullptr),
private_def_len_(0), private_def_len_(0),
op_desc_(nullptr),
args_(nullptr), args_(nullptr),
args_offset_(0) {} args_offset_(0) {}


@@ -52,7 +51,7 @@ class HcclTaskInfo : public TaskInfo {
Status UpdateArgs() override; Status UpdateArgs() override;


private: private:
ge::Status SetAddrs(const std::string &hccl_type, const std::shared_ptr<OpDesc> &op);
void SetIoAddrs(const OpDescPtr &op_desc);


Status SetAddrs(const std::shared_ptr<OpDesc> &op_desc, std::vector<GETaskKernelHcclInfo> &kernel_hccl_infos); Status SetAddrs(const std::shared_ptr<OpDesc> &op_desc, std::vector<GETaskKernelHcclInfo> &kernel_hccl_infos);


@@ -76,10 +75,7 @@ class HcclTaskInfo : public TaskInfo {
uint32_t private_def_len_; uint32_t private_def_len_;
static std::mutex hccl_follow_stream_mutex_; static std::mutex hccl_follow_stream_mutex_;
vector<GETaskKernelHcclInfo> kernel_hccl_infos_; vector<GETaskKernelHcclInfo> kernel_hccl_infos_;
vector<void *> input_data_addrs_;
vector<void *> output_data_addrs_;
vector<void *> workspace_data_addrs_;
OpDescPtr op_desc_;
vector<void *> io_addrs_;
void *args_; void *args_;
uint32_t args_offset_; uint32_t args_offset_;
}; };


+ 21
- 23
ge/graph/load/new_model_manager/task_info/kernel_ex_task_info.cc View File

@@ -30,11 +30,7 @@
namespace ge { namespace ge {
Status KernelExTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davinci_model) { Status KernelExTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davinci_model) {
GELOGI("KernelExTaskInfo Init Start."); GELOGI("KernelExTaskInfo Init Start.");
if (davinci_model == nullptr) {
GELOGE(PARAM_INVALID, "davinci_model is null!");
return PARAM_INVALID;
}

GE_CHECK_NOTNULL(davinci_model);
davinci_model_ = davinci_model; davinci_model_ = davinci_model;
Status ret = SetStream(task_def.stream_id(), davinci_model_->GetStreamList()); Status ret = SetStream(task_def.stream_id(), davinci_model_->GetStreamList());
if (ret != SUCCESS) { if (ret != SUCCESS) {
@@ -51,7 +47,6 @@ Status KernelExTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davin
GELOGE(INTERNAL_ERROR, "Init aicpu task info error, index is out of range!"); GELOGE(INTERNAL_ERROR, "Init aicpu task info error, index is out of range!");
return INTERNAL_ERROR; return INTERNAL_ERROR;
} }
op_desc_ = op_desc;


// 2. Reconstruct kernelExDef.args to STR_FWK_OP_KERNEL // 2. Reconstruct kernelExDef.args to STR_FWK_OP_KERNEL
STR_FWK_OP_KERNEL fwk_op_kernel = {0}; STR_FWK_OP_KERNEL fwk_op_kernel = {0};
@@ -79,8 +74,8 @@ Status KernelExTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davin
return RT_ERROR_TO_GE_STATUS(rt_ret);) return RT_ERROR_TO_GE_STATUS(rt_ret);)
} }


GELOGI("Node[%s] type[%s] kernel_ext_info size=%zu, ext_info_addr_=%p", op_desc_->GetName().c_str(),
op_desc_->GetType().c_str(), ext_info.size(), ext_info_addr_);
GELOGI("Node[%s] type[%s] kernel_ext_info size=%zu, ext_info_addr_=%p", op_desc->GetName().c_str(),
op_desc->GetType().c_str(), ext_info.size(), ext_info_addr_);


// 2.1 get loop cond variable for tensor array write // 2.1 get loop cond variable for tensor array write
uint64_t step_id_addr = 0; uint64_t step_id_addr = 0;
@@ -132,6 +127,7 @@ Status KernelExTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davin
GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE, GELOGE(RT_FAILED, "rtMemcpy error, ret: Ox%X", rt_ret); GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE, GELOGE(RT_FAILED, "rtMemcpy error, ret: Ox%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);) return RT_ERROR_TO_GE_STATUS(rt_ret);)


SetIoAddrs(op_desc);
GELOGI("KernelExTaskInfo knonw node Init Success."); GELOGI("KernelExTaskInfo knonw node Init Success.");
return SUCCESS; return SUCCESS;
} }
@@ -195,7 +191,7 @@ Status KernelExTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davin
return RT_ERROR_TO_GE_STATUS(rt_ret);) return RT_ERROR_TO_GE_STATUS(rt_ret);)


davinci_model_->SetZeroCopyAddr(op_desc, io_addrs, io_addrs.data(), input_output_addr_, addrs_size, 0); davinci_model_->SetZeroCopyAddr(op_desc, io_addrs, io_addrs.data(), input_output_addr_, addrs_size, 0);
SetIoAddrs(op_desc);
GELOGI("KernelExTaskInfo Init Success. session id: %lu", session_id); GELOGI("KernelExTaskInfo Init Success. session id: %lu", session_id);
return SUCCESS; return SUCCESS;
} }
@@ -236,36 +232,38 @@ Status KernelExTaskInfo::CalculateArgs(const domi::TaskDef &task_def, DavinciMod
return SUCCESS; return SUCCESS;
} }


Status KernelExTaskInfo::UpdateArgs() {
GELOGI("KernelExTaskInfo::UpdateArgs in.");
void KernelExTaskInfo::SetIoAddrs(const OpDescPtr &op_desc) {
const RuntimeParam &rts_param = davinci_model_->GetRuntimeParam(); const RuntimeParam &rts_param = davinci_model_->GetRuntimeParam();
vector<void *> input_data_addrs = ModelUtils::GetInputDataAddrs(rts_param, op_desc_);
vector<void *> output_data_addrs = ModelUtils::GetOutputDataAddrs(rts_param, op_desc_);
vector<void *> io_addrs;
if (!op_desc_->HasAttr(ATTR_DYNAMIC_SHAPE_FIXED_ADDR)) {
io_addrs.insert(io_addrs.end(), input_data_addrs.begin(), input_data_addrs.end());
io_addrs.insert(io_addrs.end(), output_data_addrs.begin(), output_data_addrs.end());
vector<void *> input_data_addrs = ModelUtils::GetInputDataAddrs(rts_param, op_desc);
vector<void *> output_data_addrs = ModelUtils::GetOutputDataAddrs(rts_param, op_desc);
if (!op_desc->HasAttr(ATTR_DYNAMIC_SHAPE_FIXED_ADDR)) {
io_addrs_.insert(io_addrs_.end(), input_data_addrs.begin(), input_data_addrs.end());
io_addrs_.insert(io_addrs_.end(), output_data_addrs.begin(), output_data_addrs.end());
} else { } else {
string peer_input_name; string peer_input_name;
if (AttrUtils::GetStr(op_desc_, ATTR_DYNAMIC_SHAPE_FIXED_ADDR, peer_input_name)) {
if (AttrUtils::GetStr(op_desc, ATTR_DYNAMIC_SHAPE_FIXED_ADDR, peer_input_name)) {
uint32_t output_index = davinci_model_->GetFixedAddrOutputIndex(peer_input_name); uint32_t output_index = davinci_model_->GetFixedAddrOutputIndex(peer_input_name);
if (output_index > output_data_addrs.size()) { if (output_index > output_data_addrs.size()) {
GELOGE(FAILED, "The output data addr size[%zu] and output index[%u] are inconsistent.", GELOGE(FAILED, "The output data addr size[%zu] and output index[%u] are inconsistent.",
output_data_addrs.size(), output_index); output_data_addrs.size(), output_index);
return FAILED;
return;
} }
io_addrs.insert(io_addrs.end(), input_data_addrs.begin(), input_data_addrs.end());
io_addrs_.insert(io_addrs_.end(), input_data_addrs.begin(), input_data_addrs.end());
for (size_t i = 0; i < output_data_addrs.size(); ++i) { for (size_t i = 0; i < output_data_addrs.size(); ++i) {
if (i == output_index) { if (i == output_index) {
void *fixed_addr = davinci_model_->GetCurrentFixedAddr(fixed_addr_offset_); void *fixed_addr = davinci_model_->GetCurrentFixedAddr(fixed_addr_offset_);
io_addrs.emplace_back(fixed_addr);
io_addrs_.emplace_back(fixed_addr);
continue; continue;
} }
io_addrs.emplace_back(output_data_addrs[i]);
io_addrs_.emplace_back(output_data_addrs[i]);
} }
} }
} }
davinci_model_->SetTotalIOAddrs(io_addrs);
}

Status KernelExTaskInfo::UpdateArgs() {
GELOGI("KernelExTaskInfo::UpdateArgs in.");
davinci_model_->SetTotalIOAddrs(io_addrs_);
GELOGI("KernelExTaskInfo::UpdateArgs success."); GELOGI("KernelExTaskInfo::UpdateArgs success.");
return SUCCESS; return SUCCESS;
} }


+ 2
- 1
ge/graph/load/new_model_manager/task_info/kernel_ex_task_info.h View File

@@ -59,6 +59,7 @@ class KernelExTaskInfo : public TaskInfo {
}; };
private: private:
Status CopyTaskInfo(const domi::KernelExDef &kernel_def, const RuntimeParam &rts_param, const OpDescPtr &op_desc); Status CopyTaskInfo(const domi::KernelExDef &kernel_def, const RuntimeParam &rts_param, const OpDescPtr &op_desc);
void SetIoAddrs(const OpDescPtr &op_desc);


uint32_t task_id_; uint32_t task_id_;
uint32_t stream_id_; uint32_t stream_id_;
@@ -69,7 +70,7 @@ class KernelExTaskInfo : public TaskInfo {
void *input_output_addr_; void *input_output_addr_;
void *ext_info_addr_; void *ext_info_addr_;
void *dump_args_; void *dump_args_;
OpDescPtr op_desc_ = nullptr;
vector<void *> io_addrs_;
uint32_t args_offset_ = 0; uint32_t args_offset_ = 0;
int64_t fixed_addr_offset_ = 0; int64_t fixed_addr_offset_ = 0;
}; };


+ 75
- 63
ge/graph/load/new_model_manager/task_info/kernel_task_info.cc View File

@@ -53,9 +53,6 @@ const int kArgsAttrHandle = 4;
} // namespace } // namespace


namespace ge { namespace ge {
KernelTaskInfo::SuperKernelTaskInfo KernelTaskInfo::skt_info_ = {
0, 0, 0, 0, nullptr, nullptr, {}, {}, {}, {}, {}, RT_KERNEL_DEFAULT, kInvalidGroupKey, 0, nullptr};

Status KernelTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davinci_model) { Status KernelTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davinci_model) {
GE_CHECK_NOTNULL(davinci_model); GE_CHECK_NOTNULL(davinci_model);
davinci_model_ = davinci_model; davinci_model_ = davinci_model;
@@ -137,6 +134,7 @@ Status KernelTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davinci
ret = InitCceTask(kernel_def); ret = InitCceTask(kernel_def);
} }


SetIoAddrs(op_desc_);
GELOGD("KernelTaskInfo init finish, result=%u.", ret); GELOGD("KernelTaskInfo init finish, result=%u.", ret);
return ret; return ret;
} }
@@ -148,9 +146,10 @@ Status KernelTaskInfo::SaveSKTDumpInfo() {
return SUCCESS; return SUCCESS;
} }
// all op in super kernel share one taskid and streamid // all op in super kernel share one taskid and streamid
for (size_t i = 0; i < skt_info_.op_desc_list.size(); i++) {
davinci_model_->SaveDumpTask(skt_info_.last_task_id, skt_info_.last_stream_id, skt_info_.op_desc_list[i],
skt_info_.dump_args_list[i]);
const SuperKernelTaskInfo &skt_info = davinci_model_->GetSuperKernelTaskInfo();
for (size_t i = 0; i < skt_info.op_desc_list.size(); i++) {
davinci_model_->SaveDumpTask(skt_info.last_task_id, skt_info.last_stream_id, skt_info.op_desc_list[i],
skt_info.dump_args_list[i]);
} }
return SUCCESS; return SUCCESS;
} }
@@ -164,9 +163,10 @@ void KernelTaskInfo::UpdateSKTTaskId() {
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret); GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
return; return;
} }
skt_info_.last_task_id = task_id;
skt_info_.last_stream_id = stream_id;
skt_id_ = skt_info_.last_task_id;
SuperKernelTaskInfo &skt_info = davinci_model_->GetSuperKernelTaskInfo();
skt_info.last_task_id = task_id;
skt_info.last_stream_id = stream_id;
skt_id_ = skt_info.last_task_id;


GELOGI("UpdateTaskId:UpdateSKTTaskId [%u],stream id [%u]", task_id, stream_id); GELOGI("UpdateTaskId:UpdateSKTTaskId [%u],stream id [%u]", task_id, stream_id);
} }
@@ -191,23 +191,25 @@ Status KernelTaskInfo::SKTFinalize() {
UpdateSKTTaskId(); UpdateSKTTaskId();
GE_CHK_STATUS_RET(SaveSKTDumpInfo(), "skt save dump info failed"); GE_CHK_STATUS_RET(SaveSKTDumpInfo(), "skt save dump info failed");
GELOGI("SuperKernel Distribute [skt_id:%u]", skt_id_); GELOGI("SuperKernel Distribute [skt_id:%u]", skt_id_);
skt_info_.kernel_list.clear();
skt_info_.arg_list.clear();
skt_info_.dump_flag_list.clear();
skt_info_.op_desc_list.clear();
skt_info_.dump_args_list.clear();
skt_info_.last_stream = nullptr;
skt_info_.last_block_dim = 0;
skt_info_.last_sm_desc = sm_desc_;
skt_info_.last_group_key = kInvalidGroupKey;
skt_info_.last_dump_flag = RT_KERNEL_DEFAULT;
skt_info_.last_dump_args = 0;
skt_info_.last_op = nullptr;
SuperKernelTaskInfo &skt_info = davinci_model_->GetSuperKernelTaskInfo();
skt_info.kernel_list.clear();
skt_info.arg_list.clear();
skt_info.dump_flag_list.clear();
skt_info.op_desc_list.clear();
skt_info.dump_args_list.clear();
skt_info.last_stream = nullptr;
skt_info.last_block_dim = 0;
skt_info.last_sm_desc = sm_desc_;
skt_info.last_group_key = kInvalidGroupKey;
skt_info.last_dump_flag = RT_KERNEL_DEFAULT;
skt_info.last_dump_args = 0;
skt_info.last_op = nullptr;
return SUCCESS; return SUCCESS;
} }


uint32_t KernelTaskInfo::GetDumpFlag() { uint32_t KernelTaskInfo::GetDumpFlag() {
for (auto flag : skt_info_.dump_flag_list) {
const SuperKernelTaskInfo &skt_info = davinci_model_->GetSuperKernelTaskInfo();
for (auto flag : skt_info.dump_flag_list) {
if (flag == RT_KERNEL_DUMPFLAG) { if (flag == RT_KERNEL_DUMPFLAG) {
return RT_KERNEL_DUMPFLAG; return RT_KERNEL_DUMPFLAG;
} }
@@ -216,19 +218,20 @@ uint32_t KernelTaskInfo::GetDumpFlag() {
} }


Status KernelTaskInfo::SuperKernelLaunch() { Status KernelTaskInfo::SuperKernelLaunch() {
if (skt_info_.kernel_list.empty()) {
const SuperKernelTaskInfo &skt_info = davinci_model_->GetSuperKernelTaskInfo();
if (skt_info.kernel_list.empty()) {
GELOGI("SuperKernelLaunch: Skt_kernel_list has no task, just return"); GELOGI("SuperKernelLaunch: Skt_kernel_list has no task, just return");
return SUCCESS; return SUCCESS;
} }
rtError_t rt_ret; rtError_t rt_ret;
auto &skt_kernel_list = skt_info_.kernel_list;
auto &skt_arg_list = skt_info_.arg_list;
auto &skt_kernel_list = skt_info.kernel_list;
auto &skt_arg_list = skt_info.arg_list;
GELOGI("SuperKernelLaunch: Skt_kernel_list size[%zu] skt_arg_list[%zu]", skt_kernel_list.size(), skt_arg_list.size()); GELOGI("SuperKernelLaunch: Skt_kernel_list size[%zu] skt_arg_list[%zu]", skt_kernel_list.size(), skt_arg_list.size());
if (skt_kernel_list.size() == kSKTSingleSize && skt_arg_list.size() == kSKTSingleSize) { if (skt_kernel_list.size() == kSKTSingleSize && skt_arg_list.size() == kSKTSingleSize) {
rt_ret = rtKernelLaunchWithFlag(skt_info_.kernel_list[0], static_cast<uint32_t>(skt_info_.last_block_dim),
skt_info_.arg_list[0], skt_info_.last_args_size,
static_cast<rtSmDesc_t *>(skt_info_.last_sm_desc), skt_info_.last_stream,
skt_info_.last_dump_flag);
rt_ret = rtKernelLaunchWithFlag(skt_info.kernel_list[0], static_cast<uint32_t>(skt_info.last_block_dim),
skt_info.arg_list[0], skt_info.last_args_size,
static_cast<rtSmDesc_t *>(skt_info.last_sm_desc), skt_info.last_stream,
skt_info.last_dump_flag);
if (rt_ret != RT_ERROR_NONE) { if (rt_ret != RT_ERROR_NONE) {
GELOGE(RT_FAILED, "SuperKernelLaunch: Call rt api failed, ret: 0x%X", rt_ret); GELOGE(RT_FAILED, "SuperKernelLaunch: Call rt api failed, ret: 0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret); return RT_ERROR_TO_GE_STATUS(rt_ret);
@@ -247,14 +250,14 @@ Status KernelTaskInfo::SuperKernelLaunch() {
} }
// Call the fuse API // Call the fuse API
std::unique_ptr<skt::SuperKernel> superKernel = nullptr; std::unique_ptr<skt::SuperKernel> superKernel = nullptr;
ge_ret = factory->FuseKernels(skt_kernel_list, skt_arg_list, skt_info_.last_block_dim, superKernel);
ge_ret = factory->FuseKernels(skt_kernel_list, skt_arg_list, skt_info.last_block_dim, superKernel);
if (ge_ret != SUCCESS) { if (ge_ret != SUCCESS) {
GELOGE(ge_ret, "SuperKernelLaunch: fuse call failed"); GELOGE(ge_ret, "SuperKernelLaunch: fuse call failed");
return ge_ret; return ge_ret;
} }
// Launch a super kernel // Launch a super kernel
skt_dump_flag_ = GetDumpFlag(); skt_dump_flag_ = GetDumpFlag();
ge_ret = superKernel->Launch(skt_info_.last_stream, skt_dump_flag_);
ge_ret = superKernel->Launch(skt_info.last_stream, skt_dump_flag_);
if (ge_ret != SUCCESS) { if (ge_ret != SUCCESS) {
GELOGE(ge_ret, "SuperKernelLaunch: launch failed"); GELOGE(ge_ret, "SuperKernelLaunch: launch failed");
return ge_ret; return ge_ret;
@@ -269,23 +272,26 @@ Status KernelTaskInfo::SuperKernelLaunch() {
} }


Status KernelTaskInfo::SaveSuperKernelInfo() { Status KernelTaskInfo::SaveSuperKernelInfo() {
skt_info_.kernel_list.push_back(stub_func_);
skt_info_.arg_list.push_back(args_);
skt_info_.last_stream = stream_;
skt_info_.last_block_dim = block_dim_;
skt_info_.last_args_size = args_size_;
skt_info_.last_sm_desc = sm_desc_;
skt_info_.last_dump_flag = dump_flag_;
skt_info_.dump_flag_list.push_back(dump_flag_);
skt_info_.op_desc_list.push_back(op_desc_);
skt_info_.dump_args_list.push_back(reinterpret_cast<uintptr_t>(skt_dump_args_));
skt_info_.last_group_key = group_key_;
skt_info_.last_dump_args = reinterpret_cast<uintptr_t>(skt_dump_args_);
skt_info_.last_op = op_desc_;
SuperKernelTaskInfo &skt_info = davinci_model_->GetSuperKernelTaskInfo();
skt_info.kernel_list.push_back(stub_func_);
skt_info.arg_list.push_back(args_);
skt_info.last_stream = stream_;
skt_info.last_block_dim = block_dim_;
skt_info.last_args_size = args_size_;
skt_info.last_sm_desc = sm_desc_;
skt_info.last_dump_flag = dump_flag_;
skt_info.dump_flag_list.push_back(dump_flag_);
skt_info.op_desc_list.push_back(op_desc_);
skt_info.dump_args_list.push_back(reinterpret_cast<uintptr_t>(skt_dump_args_));
skt_info.last_group_key = group_key_;
skt_info.last_dump_args = reinterpret_cast<uintptr_t>(skt_dump_args_);
skt_info.last_op = op_desc_;
// last node in a stream, just launch // last node in a stream, just launch
if (IsMarkedLastNode()) { if (IsMarkedLastNode()) {
return SuperKernelLaunch(); return SuperKernelLaunch();
} }

GELOGI("Save Current task [block_dim:%u, size:%zu].", block_dim_, skt_info.kernel_list.size());
return SUCCESS; return SUCCESS;
} }


@@ -322,8 +328,9 @@ bool KernelTaskInfo::IsMarkedFirstNode() {
// then may be saved to skt task list; else // then may be saved to skt task list; else
// call skt launch those saved tasks before // call skt launch those saved tasks before
bool KernelTaskInfo::FirstCallSKTLaunchCheck() { bool KernelTaskInfo::FirstCallSKTLaunchCheck() {
return ((block_dim_ != skt_info_.last_block_dim) || (stream_ != skt_info_.last_stream) ||
(has_group_key_ && (group_key_ != skt_info_.last_group_key)));
const SuperKernelTaskInfo &skt_info = davinci_model_->GetSuperKernelTaskInfo();
return ((block_dim_ != skt_info.last_block_dim) || (stream_ != skt_info.last_stream) ||
(has_group_key_ && (group_key_ != skt_info.last_group_key)));
} }


// current task has group_id or has n ATTR_N_BATCH_SPLIT then save it to skt task list; else // current task has group_id or has n ATTR_N_BATCH_SPLIT then save it to skt task list; else
@@ -362,7 +369,6 @@ Status KernelTaskInfo::SuperKernelDistribute() {
GELOGE(ret, "Call SuperKernelLaunch failed!"); GELOGE(ret, "Call SuperKernelLaunch failed!");
return ret; return ret;
} }
GELOGI("Save Current task [block_dim:%u, size:%zu].", block_dim_, skt_info_.kernel_list.size());
} }
return SUCCESS; return SUCCESS;
} }
@@ -391,10 +397,11 @@ Status KernelTaskInfo::Distribute() {
call_save_dump_ = true; call_save_dump_ = true;
} else { } else {
/* default: not skt launch */ /* default: not skt launch */
const SuperKernelTaskInfo &skt_info = davinci_model_->GetSuperKernelTaskInfo();
GELOGD( GELOGD(
"KernelTaskInfo Distribute Start, sktenable:%d taskid:%u sktid:%u last_sktid:%u stubfunc_name:%s " "KernelTaskInfo Distribute Start, sktenable:%d taskid:%u sktid:%u last_sktid:%u stubfunc_name:%s "
"stubfunc:%p blockdim:%u stream:%p", "stubfunc:%p blockdim:%u stream:%p",
call_skt, task_id_, skt_id_, skt_info_.last_task_id, stub_func_name_.c_str(), stub_func_, block_dim_, stream_);
call_skt, task_id_, skt_id_, skt_info.last_task_id, stub_func_name_.c_str(), stub_func_, block_dim_, stream_);
// l1 fusion enable and env flag open (kCloseSkt for skt debug) // l1 fusion enable and env flag open (kCloseSkt for skt debug)
bool open_dump = false; bool open_dump = false;
auto all_dump_model = davinci_model_->GetDumpProperties().GetAllDumpModel(); auto all_dump_model = davinci_model_->GetDumpProperties().GetAllDumpModel();
@@ -422,23 +429,29 @@ Status KernelTaskInfo::Distribute() {
"KernelTaskInfo Distribute Success. sktenable:%d taskid:%d sktid:%d stubfunc_name:%s stubfunc:%p " "KernelTaskInfo Distribute Success. sktenable:%d taskid:%d sktid:%d stubfunc_name:%s stubfunc:%p "
"blockdim:%d stream:%p", "blockdim:%d stream:%p",
call_skt, task_id_, skt_id_, stub_func_name_.c_str(), stub_func_, block_dim_, stream_); call_skt, task_id_, skt_id_, stub_func_name_.c_str(), stub_func_, block_dim_, stream_);
op_desc_.reset(); // Not hold OpDesc after distribute.
return SUCCESS; return SUCCESS;
} }


Status KernelTaskInfo::UpdateArgs() {
GELOGI("KernelTaskInfo::UpdateArgs in.");
void KernelTaskInfo::SetIoAddrs(const OpDescPtr &op_desc) {
const RuntimeParam &rts_param = davinci_model_->GetRuntimeParam(); const RuntimeParam &rts_param = davinci_model_->GetRuntimeParam();
vector<void *> input_data_addrs = ModelUtils::GetInputDataAddrs(rts_param, op_desc_);
vector<void *> output_data_addrs = ModelUtils::GetOutputDataAddrs(rts_param, op_desc_);
vector<void *> input_data_addrs = ModelUtils::GetInputDataAddrs(rts_param, op_desc);
vector<void *> output_data_addrs = ModelUtils::GetOutputDataAddrs(rts_param, op_desc);


vector<void *> io_addrs;
io_addrs.insert(io_addrs.end(), input_data_addrs.begin(), input_data_addrs.end());
io_addrs.insert(io_addrs.end(), output_data_addrs.begin(), output_data_addrs.end());
io_addrs_.insert(io_addrs_.end(), input_data_addrs.begin(), input_data_addrs.end());
io_addrs_.insert(io_addrs_.end(), output_data_addrs.begin(), output_data_addrs.end());
if (kernel_type_ == ccKernelType::TE) { if (kernel_type_ == ccKernelType::TE) {
vector<void *> workspace_data_addrs = ModelUtils::GetWorkspaceDataAddrs(rts_param, op_desc_);
io_addrs.insert(io_addrs.end(), workspace_data_addrs.begin(), workspace_data_addrs.end());
davinci_model_->SetTotalIOAddrs(io_addrs);
vector<void *> workspace_data_addrs = ModelUtils::GetWorkspaceDataAddrs(rts_param, op_desc);
io_addrs_.insert(io_addrs_.end(), workspace_data_addrs.begin(), workspace_data_addrs.end());
}
}

Status KernelTaskInfo::UpdateArgs() {
GELOGI("KernelTaskInfo::UpdateArgs in.");
if (kernel_type_ == ccKernelType::TE) {
davinci_model_->SetTotalIOAddrs(io_addrs_);
} else if (kernel_type_ == ccKernelType::AI_CPU || kernel_type_ == ccKernelType::CUST_AI_CPU) { } else if (kernel_type_ == ccKernelType::AI_CPU || kernel_type_ == ccKernelType::CUST_AI_CPU) {
vector<void *> io_addrs = io_addrs_;
davinci_model_->UpdateKnownZeroCopyAddr(io_addrs); davinci_model_->UpdateKnownZeroCopyAddr(io_addrs);
uintptr_t io_addr = reinterpret_cast<uintptr_t>(args_addr.get()) + sizeof(aicpu::AicpuParamHead); uintptr_t io_addr = reinterpret_cast<uintptr_t>(args_addr.get()) + sizeof(aicpu::AicpuParamHead);
auto addrs_size = sizeof(uint64_t) * io_addrs.size(); auto addrs_size = sizeof(uint64_t) * io_addrs.size();
@@ -789,7 +802,6 @@ Status KernelTaskInfo::InitCceTask(const domi::KernelDef &kernel_def) {
GELOGE(FAILED, "flowtable is null."); GELOGE(FAILED, "flowtable is null.");
return FAILED; return FAILED;
} }
flowtable_size_ = flowtable.size();
} }


// get smDesc stored in model // get smDesc stored in model
@@ -854,14 +866,14 @@ Status KernelTaskInfo::InitAicpuTask(uint32_t op_index, const domi::KernelDef &k
GELOGI("Do InitAicpuTask"); GELOGI("Do InitAicpuTask");
so_name_ = kernel_def.so_name(); so_name_ = kernel_def.so_name();
kernel_name_ = kernel_def.kernel_name(); kernel_name_ = kernel_def.kernel_name();
GELOGI("node[%s] test so name %s, kernel name %s", op_desc_->GetName().c_str(), so_name_.c_str(),
kernel_name_.c_str());


OpDescPtr op_desc = davinci_model_->GetOpByIndex(op_index); OpDescPtr op_desc = davinci_model_->GetOpByIndex(op_index);
if (op_desc == nullptr) { if (op_desc == nullptr) {
GELOGE(INTERNAL_ERROR, "index is out of range, index: %u", op_index); GELOGE(INTERNAL_ERROR, "index is out of range, index: %u", op_index);
return INTERNAL_ERROR; return INTERNAL_ERROR;
} }
GELOGI("node[%s] test so name %s, kernel name %s", op_desc->GetName().c_str(), so_name_.c_str(),
kernel_name_.c_str());


if (kernel_type_ == ccKernelType::CUST_AI_CPU) { if (kernel_type_ == ccKernelType::CUST_AI_CPU) {
bool loaded = false; bool loaded = false;
@@ -885,8 +897,8 @@ Status KernelTaskInfo::InitAicpuTask(uint32_t op_index, const domi::KernelDef &k
GELOGE(init_ret, "Init aicpu task ext info failed, ext_info size=%zu", ext_info.size()); GELOGE(init_ret, "Init aicpu task ext info failed, ext_info size=%zu", ext_info.size());
return init_ret; return init_ret;
} }
GELOGI("Node[%s] type[%s] kernel_ext_info size=%zu, aicpu_ext_info_addr_=%p", op_desc_->GetName().c_str(),
op_desc_->GetType().c_str(), ext_info.size(), aicpu_ext_info_addr_);
GELOGI("Node[%s] type[%s] kernel_ext_info size=%zu, aicpu_ext_info_addr_=%p", op_desc->GetName().c_str(),
op_desc->GetType().c_str(), ext_info.size(), aicpu_ext_info_addr_);


aicpu_param_head->extInfoAddr = reinterpret_cast<uintptr_t>(aicpu_ext_info_addr_); aicpu_param_head->extInfoAddr = reinterpret_cast<uintptr_t>(aicpu_ext_info_addr_);
aicpu_param_head->extInfoLength = static_cast<uintptr_t>(ext_info.size()); aicpu_param_head->extInfoLength = static_cast<uintptr_t>(ext_info.size());


+ 3
- 22
ge/graph/load/new_model_manager/task_info/kernel_task_info.h View File

@@ -38,7 +38,6 @@ class KernelTaskInfo : public TaskInfo {
flowtable_(nullptr), flowtable_(nullptr),
block_dim_(0), block_dim_(0),
args_size_(0), args_size_(0),
flowtable_size_(0),
task_id_(0), task_id_(0),
stream_id_(0), stream_id_(0),
so_name_(""), so_name_(""),
@@ -128,6 +127,7 @@ class KernelTaskInfo : public TaskInfo {


Status SuperKernelDistribute(); Status SuperKernelDistribute();
bool IsL1FusionOp(const OpDescPtr &op_desc); bool IsL1FusionOp(const OpDescPtr &op_desc);
void SetIoAddrs(const OpDescPtr &op_desc);


// For super kernel // For super kernel
Status SaveSKTDumpInfo(); Status SaveSKTDumpInfo();
@@ -148,7 +148,6 @@ class KernelTaskInfo : public TaskInfo {
void *flowtable_; void *flowtable_;
uint32_t block_dim_; uint32_t block_dim_;
uint32_t args_size_; uint32_t args_size_;
uint32_t flowtable_size_;
uint32_t task_id_; uint32_t task_id_;
uint32_t stream_id_; uint32_t stream_id_;
std::string so_name_; std::string so_name_;
@@ -156,7 +155,8 @@ class KernelTaskInfo : public TaskInfo {
ccKernelType kernel_type_; ccKernelType kernel_type_;
uint32_t dump_flag_; uint32_t dump_flag_;
void *dump_args_; void *dump_args_;
OpDescPtr op_desc_;
OpDescPtr op_desc_; // Clear after distribute.
vector<void *> io_addrs_;
DavinciModel *davinci_model_; DavinciModel *davinci_model_;
uint32_t args_offset_ = 0; uint32_t args_offset_ = 0;
uint32_t hybrid_args_offset_ = 0; uint32_t hybrid_args_offset_ = 0;
@@ -186,25 +186,6 @@ class KernelTaskInfo : public TaskInfo {
void *output_addrs = nullptr; void *output_addrs = nullptr;
void *attr_handle = nullptr; void *attr_handle = nullptr;
} custom_info_; } custom_info_;

// For super kernel
static struct SuperKernelTaskInfo {
uint32_t last_block_dim;
uint32_t last_args_size;
uint32_t last_task_id;
uint32_t last_stream_id;
void *last_stream;
void *last_sm_desc;
std::vector<void *> kernel_list;
std::vector<void *> arg_list;
std::vector<uint32_t> dump_flag_list;
std::vector<OpDescPtr> op_desc_list;
std::vector<uintptr_t> dump_args_list;
uint32_t last_dump_flag;
int64_t last_group_key;
uintptr_t last_dump_args;
OpDescPtr last_op;
} skt_info_;
}; };
} // namespace ge } // namespace ge
#endif // GE_GRAPH_LOAD_NEW_MODEL_MANAGER_TASK_INFO_KERNEL_TASK_INFO_H_ #endif // GE_GRAPH_LOAD_NEW_MODEL_MANAGER_TASK_INFO_KERNEL_TASK_INFO_H_

+ 33
- 29
ge/graph/load/new_model_manager/task_info/memcpy_async_task_info.cc View File

@@ -30,14 +30,13 @@ Status MemcpyAsyncTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *da
return ret; return ret;
} }


memcpy_async_ = task_def.memcpy_async();
count_ = memcpy_async_.count();
kind_ = memcpy_async_.kind();
dst_max_ = memcpy_async_.dst_max();
OpDescPtr op_desc = davinci_model_->GetOpByIndex(memcpy_async_.op_index());
op_desc_ = op_desc;
const domi::MemcpyAsyncDef &memcpy_async = task_def.memcpy_async();
count_ = memcpy_async.count();
kind_ = memcpy_async.kind();
dst_max_ = memcpy_async.dst_max();
OpDescPtr op_desc = davinci_model_->GetOpByIndex(memcpy_async.op_index());
if (op_desc == nullptr) { if (op_desc == nullptr) {
GELOGE(INTERNAL_ERROR, "Task op index:%u out of range", memcpy_async_.op_index());
GELOGE(INTERNAL_ERROR, "Task op index:%u out of range", memcpy_async.op_index());
return INTERNAL_ERROR; return INTERNAL_ERROR;
} }


@@ -46,13 +45,14 @@ Status MemcpyAsyncTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *da
dst_ = reinterpret_cast<uint8_t *>(reinterpret_cast<uintptr_t>(src_) + sizeof(void *)); dst_ = reinterpret_cast<uint8_t *>(reinterpret_cast<uintptr_t>(src_) + sizeof(void *));
// for zero copy // for zero copy
kind_ = RT_MEMCPY_ADDR_DEVICE_TO_DEVICE; kind_ = RT_MEMCPY_ADDR_DEVICE_TO_DEVICE;
GE_CHK_STATUS_RET(SetIoAddrs(op_desc, memcpy_async), "Set addrs failed");
GELOGI("MemcpyAsyncTaskInfo op name %s, src_ %p, dst_ %p, args_offset %u.", GELOGI("MemcpyAsyncTaskInfo op name %s, src_ %p, dst_ %p, args_offset %u.",
op_desc->GetName().c_str(), src_, dst_, args_offset_); op_desc->GetName().c_str(), src_, dst_, args_offset_);
return SUCCESS; return SUCCESS;
} }


const RuntimeParam &rts_param = davinci_model_->GetRuntimeParam(); const RuntimeParam &rts_param = davinci_model_->GetRuntimeParam();
ret = ModelUtils::GetRtAddress(rts_param, memcpy_async_.src(), src_);
ret = ModelUtils::GetRtAddress(rts_param, memcpy_async.src(), src_);
if (ret != SUCCESS) { if (ret != SUCCESS) {
return ret; return ret;
} }
@@ -61,23 +61,23 @@ Status MemcpyAsyncTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *da
vector<int64_t> memory_type_list; vector<int64_t> memory_type_list;
(void)AttrUtils::GetListInt(op_desc, ATTR_NAME_OUTPUT_MEM_TYPE_LIST, memory_type_list); (void)AttrUtils::GetListInt(op_desc, ATTR_NAME_OUTPUT_MEM_TYPE_LIST, memory_type_list);
if (!memory_type_list.empty() && memory_type_list[0] == RT_MEMORY_TS_4G) { // TS Feature, Just one. if (!memory_type_list.empty() && memory_type_list[0] == RT_MEMORY_TS_4G) { // TS Feature, Just one.
uint64_t mem_offset = memcpy_async_.dst() - rts_param.logic_mem_base;
dst_ = static_cast<uint8_t *>(rts_param.ts_mem_mall->Acquire(mem_offset, memcpy_async_.dst_max()));
uint64_t mem_offset = memcpy_async.dst() - rts_param.logic_mem_base;
dst_ = static_cast<uint8_t *>(rts_param.ts_mem_mall->Acquire(mem_offset, memcpy_async.dst_max()));
if (dst_ == nullptr) { if (dst_ == nullptr) {
return FAILED; return FAILED;
} }
} else { } else {
ret = ModelUtils::GetRtAddress(rts_param, memcpy_async_.dst(), dst_);
ret = ModelUtils::GetRtAddress(rts_param, memcpy_async.dst(), dst_);
if (ret != SUCCESS) { if (ret != SUCCESS) {
return ret; return ret;
} }
} }


GELOGI("MemcpyAsyncTaskInfo Init Success, logic[0x%lx, 0x%lx], src:%p, dst:%p, max:%lu, count:%lu",
memcpy_async_.src(), memcpy_async_.dst(), src_, dst_, dst_max_, count_);

davinci_model_->DisableZeroCopy(src_); davinci_model_->DisableZeroCopy(src_);
davinci_model_->DisableZeroCopy(dst_); davinci_model_->DisableZeroCopy(dst_);
GE_CHK_STATUS_RET(SetIoAddrs(op_desc, memcpy_async), "Set addrs failed");
GELOGI("MemcpyAsyncTaskInfo Init Success, logic[0x%lx, 0x%lx], src:%p, dst:%p, max:%lu, count:%lu",
memcpy_async.src(), memcpy_async.dst(), src_, dst_, dst_max_, count_);
return SUCCESS; return SUCCESS;
} }


@@ -115,29 +115,33 @@ Status MemcpyAsyncTaskInfo::CalculateArgs(const domi::TaskDef &task_def, Davinci
return SUCCESS; return SUCCESS;
} }


Status MemcpyAsyncTaskInfo::UpdateArgs() {
GELOGI("MemcpyAsyncTaskInfo::UpdateArgs in.");
GE_CHECK_NOTNULL(davinci_model_);
Status ret = ModelUtils::GetRtAddress(davinci_model_->GetRuntimeParam(), memcpy_async_.src(), src_);
if (ret != SUCCESS) {
return ret;
}

ret = ModelUtils::GetRtAddress(davinci_model_->GetRuntimeParam(), memcpy_async_.dst(), dst_);
Status MemcpyAsyncTaskInfo::SetIoAddrs(const OpDescPtr &op_desc, const domi::MemcpyAsyncDef &memcpy_async) {
uint8_t *src = nullptr;
Status ret = ModelUtils::GetRtAddress(davinci_model_->GetRuntimeParam(), memcpy_async.src(), src);
if (ret != SUCCESS) { if (ret != SUCCESS) {
return ret; return ret;
} }
io_addrs_.emplace_back(reinterpret_cast<void *>(src));


vector<void *> io_addrs;
io_addrs.emplace_back(reinterpret_cast<void *>(src_));
if (op_desc_->HasAttr(ATTR_DYNAMIC_SHAPE_FIXED_ADDR)) {
if (op_desc->HasAttr(ATTR_DYNAMIC_SHAPE_FIXED_ADDR)) {
void *fixed_addr = davinci_model_->GetCurrentFixedAddr(fixed_addr_offset_); void *fixed_addr = davinci_model_->GetCurrentFixedAddr(fixed_addr_offset_);
io_addrs.emplace_back(fixed_addr);
io_addrs_.emplace_back(fixed_addr);
} else { } else {
io_addrs.emplace_back(reinterpret_cast<void *>(dst_));
uint8_t *dst = nullptr;
ret = ModelUtils::GetRtAddress(davinci_model_->GetRuntimeParam(), memcpy_async.dst(), dst);
if (ret != SUCCESS) {
return ret;
}
io_addrs_.emplace_back(reinterpret_cast<void *>(dst));
} }
davinci_model_->SetTotalIOAddrs(io_addrs);


return SUCCESS;
}

Status MemcpyAsyncTaskInfo::UpdateArgs() {
GELOGI("MemcpyAsyncTaskInfo::UpdateArgs in.");
GE_CHECK_NOTNULL(davinci_model_);
davinci_model_->SetTotalIOAddrs(io_addrs_);
GELOGI("MemcpyAsyncTaskInfo::UpdateArgs success."); GELOGI("MemcpyAsyncTaskInfo::UpdateArgs success.");
return SUCCESS; return SUCCESS;
} }


+ 3
- 2
ge/graph/load/new_model_manager/task_info/memcpy_async_task_info.h View File

@@ -39,16 +39,17 @@ class MemcpyAsyncTaskInfo : public TaskInfo {
Status CalculateArgs(const domi::TaskDef &task_def, DavinciModel *davinci_model) override; Status CalculateArgs(const domi::TaskDef &task_def, DavinciModel *davinci_model) override;


private: private:
Status SetIoAddrs(const OpDescPtr &op_desc, const domi::MemcpyAsyncDef &memcpy_async);

uint8_t *dst_; uint8_t *dst_;
uint64_t dst_max_; uint64_t dst_max_;
uint8_t *src_; uint8_t *src_;
uint64_t count_; uint64_t count_;
uint32_t kind_; uint32_t kind_;
OpDescPtr op_desc_;
vector<void *> io_addrs_;
int64_t fixed_addr_offset_; int64_t fixed_addr_offset_;
DavinciModel *davinci_model_ = nullptr; DavinciModel *davinci_model_ = nullptr;
uint32_t args_offset_ = 0; uint32_t args_offset_ = 0;
domi::MemcpyAsyncDef memcpy_async_;
}; };
} // namespace ge } // namespace ge
#endif // GE_GRAPH_LOAD_NEW_MODEL_MANAGER_TASK_INFO_MEMCPY_ASYNC_TASK_INFO_H_ #endif // GE_GRAPH_LOAD_NEW_MODEL_MANAGER_TASK_INFO_MEMCPY_ASYNC_TASK_INFO_H_

+ 8
- 2
tests/ut/ge/CMakeLists.txt View File

@@ -329,7 +329,7 @@ set(COMMON_FORMAT_SRC_FILES
"${GE_CODE_DIR}/ge/common/formats/format_transfers/format_transfer_fracz_nchw.cc" "${GE_CODE_DIR}/ge/common/formats/format_transfers/format_transfer_fracz_nchw.cc"
"${GE_CODE_DIR}/ge/common/formats/format_transfers/format_transfer_fracz_nhwc.cc" "${GE_CODE_DIR}/ge/common/formats/format_transfers/format_transfer_fracz_nhwc.cc"
"${GE_CODE_DIR}/ge/common/formats/format_transfers/format_transfer_fracz_hwcn.cc" "${GE_CODE_DIR}/ge/common/formats/format_transfers/format_transfer_fracz_hwcn.cc"
"${GE_CODE_DIR}/ge/common/formats/utils/formats_trans_utils.cc"
"${GE_CODE_DIR}/ge/common/formats/utils/formats_trans_utils.cc"
) )


set(GRAPH_OPTIMIZE_COMMON_SRC_FILES set(GRAPH_OPTIMIZE_COMMON_SRC_FILES
@@ -388,6 +388,7 @@ set(DISTINCT_GRAPH_LOAD_SRC_FILES
"${GE_CODE_DIR}/ge/graph/load/new_model_manager/task_info/kernel_ex_task_info.cc" "${GE_CODE_DIR}/ge/graph/load/new_model_manager/task_info/kernel_ex_task_info.cc"
"${GE_CODE_DIR}/ge/graph/load/new_model_manager/task_info/kernel_task_info.cc" "${GE_CODE_DIR}/ge/graph/load/new_model_manager/task_info/kernel_task_info.cc"
"${GE_CODE_DIR}/ge/graph/load/new_model_manager/task_info/label_set_task_info.cc" "${GE_CODE_DIR}/ge/graph/load/new_model_manager/task_info/label_set_task_info.cc"
"${GE_CODE_DIR}/ge/graph/load/new_model_manager/task_info/memcpy_addr_async_task_info.cc"
"${GE_CODE_DIR}/ge/graph/load/new_model_manager/task_info/memcpy_async_task_info.cc" "${GE_CODE_DIR}/ge/graph/load/new_model_manager/task_info/memcpy_async_task_info.cc"
"${GE_CODE_DIR}/ge/graph/load/new_model_manager/task_info/profiler_trace_task_info.cc" "${GE_CODE_DIR}/ge/graph/load/new_model_manager/task_info/profiler_trace_task_info.cc"
"${GE_CODE_DIR}/ge/graph/load/new_model_manager/task_info/stream_active_task_info.cc" "${GE_CODE_DIR}/ge/graph/load/new_model_manager/task_info/stream_active_task_info.cc"
@@ -565,6 +566,11 @@ set(DISTINCT_GRAPH_LOAD_TEST_FILES
"graph/load/new_model_manager_event_manager_unittest.cc" "graph/load/new_model_manager_event_manager_unittest.cc"
#"graph/load/output_net_output_unittest.cc" #"graph/load/output_net_output_unittest.cc"
"graph/load/tbe_handle_store_unittest.cc" "graph/load/tbe_handle_store_unittest.cc"
"graph/load/hccl_task_info_unittest.cc"
"graph/load/kernel_ex_task_info_unittest.cc"
"graph/load/kernel_task_info_unittest.cc"
"graph/load/memcpy_addr_async_task_info_unittest.cc"
"graph/load/memcpy_async_task_info_unittest.cc"
#"graph/graph_load_unittest.cc" #"graph/graph_load_unittest.cc"
"graph/ge_executor_unittest.cc" "graph/ge_executor_unittest.cc"
) )
@@ -918,7 +924,7 @@ target_compile_definitions(ut_libge_distinct_load_utest PRIVATE
google=ascend_private google=ascend_private
) )


target_link_libraries(ut_libge_distinct_load_utest
target_link_libraries(ut_libge_distinct_load_utest
${COMMON_SHARED_LIBRARIES} ${COMMON_SHARED_LIBRARIES}
$<BUILD_INTERFACE:intf_pub> $<BUILD_INTERFACE:intf_pub>
ge_execute_common ge_ut_common_format ge_load_common ge_execute_common ge_ut_common_format ge_load_common


+ 134
- 0
tests/ut/ge/graph/load/hccl_task_info_unittest.cc View File

@@ -0,0 +1,134 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <gtest/gtest.h>

#define private public
#define protected public

#include "graph/load/new_model_manager/davinci_model.h"
#include "graph/load/new_model_manager/task_info/hccl_task_info.h"

namespace ge {
class UtestHcclTaskInfo : public testing::Test {
protected:
void SetUp() {}

void TearDown() {}
};


// test success GetTaskID
TEST_F(UtestHcclTaskInfo, success_get_task_id) {
domi::ModelTaskDef model_task_def;
domi::TaskDef *task = model_task_def.add_task();
task->set_type(RT_MODEL_TASK_KERNEL);
TaskInfoPtr task_info = TaskInfoFactory::Instance().Create(static_cast<rtModelTaskType_t>(task->type()));

EXPECT_EQ(task_info->GetTaskID(), 0);

HcclTaskInfo hccl_task_info;
EXPECT_EQ(hccl_task_info.GetTaskID(), 0);
}

// test init EventRecordTaskInfo
TEST_F(UtestHcclTaskInfo, success_create_stream) {
DavinciModel model(0, nullptr);

HcclTaskInfo hccl_task_info;
EXPECT_EQ(hccl_task_info.CreateStream(3, &model, 0), SUCCESS);
}

// test hccl_Distribute
TEST_F(UtestHcclTaskInfo, success_distribute7) {
DavinciModel model(0, nullptr);

domi::ModelTaskDef model_task_def;
domi::TaskDef *task7 = model_task_def.add_task();
task7->set_type(RT_MODEL_TASK_HCCL);
TaskInfoPtr task_info7 = TaskInfoFactory::Instance().Create(static_cast<rtModelTaskType_t>(task7->type()));
Status ret = task_info7->Init(task7[0], &model);
EXPECT_EQ(FAILED, ret);

std::vector<TaskInfoPtr> task_list;
task_list.push_back(task_info7);
model.task_list_ = task_list;

EXPECT_EQ(task_info7->Release(), SUCCESS);
}

// test hccl_Distribute
TEST_F(UtestHcclTaskInfo, success_distribute7_with_hccl_type) {
DavinciModel model(0, nullptr);
rtStream_t stream = nullptr;
rtStreamCreate(&stream, 0);
model.stream_list_ = { stream };

domi::TaskDef task_def;
HcclTaskInfo hccl_task_info;
EXPECT_EQ(hccl_task_info.Init(task_def, nullptr), PARAM_INVALID);


domi::KernelHcclDef *kernel_hccl_def = task_def.mutable_kernel_hccl();
kernel_hccl_def->set_op_index(0);
kernel_hccl_def->set_hccl_type("HcomBroadcast");
model.op_list_[0] = std::make_shared<OpDesc>("FrameworkOp", "FrameworkOp");
EXPECT_EQ(hccl_task_info.Init(task_def, &model), SUCCESS);

task_def.clear_kernel_hccl();
}

// test hccl_GetPrivateDefByTaskDef
TEST_F(UtestHcclTaskInfo, success_hccl_get_private_def_by_task_def) {
DavinciModel model(0, nullptr);

domi::ModelTaskDef model_task_def;
domi::TaskDef *task7 = model_task_def.add_task();
task7->set_type(RT_MODEL_TASK_HCCL);
// for SetStream
rtStream_t stream = nullptr;
rtStreamCreate(&stream, 0);
model.stream_list_.push_back(stream);
// for GetPrivateDefByTaskDef
task7->set_ops_kernel_store_ptr(10);
std::string value = "hccl_task";
task7->set_private_def(value);

TaskInfoPtr task_info7 = TaskInfoFactory::Instance().Create(static_cast<rtModelTaskType_t>(task7->type()));
// for Distribute
EXPECT_EQ(task_info7->Init(task7[0], &model), PARAM_INVALID);

EXPECT_EQ(task_info7->Release(), SUCCESS);
}

// test hccl_task_TransToGETaskInfo
TEST_F(UtestHcclTaskInfo, success_hccl_trans_to_ge_task_info) {
DavinciModel model(0, nullptr);

domi::ModelTaskDef model_task_def;
domi::TaskDef *task7 = model_task_def.add_task();
// for type
task7->set_type(RT_MODEL_TASK_HCCL);
TaskInfoPtr task_info7 = TaskInfoFactory::Instance().Create(static_cast<rtModelTaskType_t>(task7->type()));

GETaskInfo ge_task;
HcclTaskInfo hccl_task_info;
hccl_task_info.TransToGETaskInfo(ge_task);

EXPECT_EQ(task_info7->Release(), SUCCESS);
}

} // namespace ge

+ 144
- 0
tests/ut/ge/graph/load/kernel_ex_task_info_unittest.cc View File

@@ -0,0 +1,144 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <gtest/gtest.h>

#define private public
#define protected public

#include "graph/load/new_model_manager/davinci_model.h"

#include "graph/load/new_model_manager/task_info/kernel_ex_task_info.h"
#include "cce/aicpu_engine_struct.h"

namespace ge {
extern OpDescPtr CreateOpDesc(string name, string type);

class UtestKernelExTaskInfo : public testing::Test {
protected:
void SetUp() {}

void TearDown() {}
};

// test kernel_ex_task_Release
TEST_F(UtestKernelExTaskInfo, success_kernel_ex_task_init) {
domi::TaskDef task_def;
KernelExTaskInfo kernel_ex_task_info;
EXPECT_EQ(kernel_ex_task_info.Init(task_def, nullptr), PARAM_INVALID);

DavinciModel model(0, nullptr);
EXPECT_EQ(kernel_ex_task_info.Init(task_def, &model), FAILED);

rtStream_t stream = nullptr;
rtStreamCreate(&stream, 0);
model.stream_list_.push_back(stream);
domi::KernelExDef *kernel_ex_def = task_def.mutable_kernel_ex();
kernel_ex_def->set_op_index(1);
model.op_list_[0] = CreateOpDesc("FrameworkOp", "FrameworkOp");
EXPECT_EQ(kernel_ex_task_info.Init(task_def, &model), INTERNAL_ERROR);

kernel_ex_def->clear_op_index();
kernel_ex_def->set_op_index(0);
EXPECT_EQ(kernel_ex_task_info.Init(task_def, &model), FAILED);

kernel_ex_def->set_task_info("KernelEx");
kernel_ex_def->set_task_info_size(1);
EXPECT_EQ(kernel_ex_task_info.Init(task_def, &model), FAILED);


constexpr uint32_t arg_size = sizeof(STR_FWK_OP_KERNEL);
string value1(arg_size, 'a');
kernel_ex_def->set_args_size(arg_size);
kernel_ex_def->set_args(value1);
OpDescPtr v_op_desc = CreateOpDesc("ge_global_step", "Variable");
model.variable_op_list_.push_back(v_op_desc);
model.op_list_[0]->SetWorkspace({100331008}); // offset
model.op_list_[0]->SetWorkspaceBytes({150}); // length
EXPECT_EQ(kernel_ex_task_info.Init(task_def, &model), FAILED);


task_def.clear_kernel_ex();
}

// test kernel_ex_task_Release
TEST_F(UtestKernelExTaskInfo, success_kernel_ex_task_release) {
KernelExTaskInfo kernel_ex_task_info;
EXPECT_EQ(kernel_ex_task_info.Release(), SUCCESS);

kernel_ex_task_info.kernel_buf_ = nullptr;
rtMalloc(&kernel_ex_task_info.input_output_addr_, 64, RT_MEMORY_HBM);
EXPECT_EQ(kernel_ex_task_info.Release(), SUCCESS);

kernel_ex_task_info.input_output_addr_ = nullptr;
rtMalloc(&kernel_ex_task_info.kernel_buf_, 64, RT_MEMORY_HBM);
EXPECT_EQ(kernel_ex_task_info.Release(), SUCCESS);

rtMalloc(&kernel_ex_task_info.kernel_buf_, 64, RT_MEMORY_HBM);
rtMalloc(&kernel_ex_task_info.input_output_addr_, 64, RT_MEMORY_HBM);
EXPECT_EQ(kernel_ex_task_info.Release(), SUCCESS);
}

// test kernel_ex_task_Release
TEST_F(UtestKernelExTaskInfo, success_kernel_ex_task_info_copy) {
DavinciModel model(0, nullptr);
model.runtime_param_.mem_base = (uint8_t *)0x12345;
model.runtime_param_.mem_size = 100332000;

rtStream_t stream = nullptr;
rtStreamCreate(&stream, 0);
model.stream_list_.push_back(stream);

domi::TaskDef task_def;
KernelExTaskInfo kernel_ex_task_info;

domi::KernelExDef *kernel_ex_def = task_def.mutable_kernel_ex();
kernel_ex_def->set_task_info_size(150);
kernel_ex_def->set_op_index(0);
model.op_list_[0] = CreateOpDesc("FrameworkOp", "FrameworkOp");

EXPECT_EQ(kernel_ex_task_info.Init(task_def, &model), FAILED); // workspace empty.

model.op_list_[0]->SetWorkspace({100331008}); // offset
model.op_list_[0]->SetWorkspaceBytes({0}); // length
EXPECT_EQ(kernel_ex_task_info.Init(task_def, &model), FAILED); // workspace addr is null.

model.op_list_[0]->SetWorkspace({100331008}); // offset
model.op_list_[0]->SetWorkspaceBytes({10}); // length
EXPECT_EQ(kernel_ex_task_info.Init(task_def, &model), FAILED); // workspace addr is small.

model.op_list_[0]->SetWorkspace({100331008}); // offset
model.op_list_[0]->SetWorkspaceBytes({150}); // length
EXPECT_EQ(kernel_ex_task_info.Init(task_def, &model), SUCCESS);

task_def.clear_kernel_ex();
model.runtime_param_.mem_base = nullptr;
}

TEST_F(UtestKernelExTaskInfo, kernel_ex_task_info_calculate_args) {
DavinciModel model(0, nullptr);
domi::TaskDef task_def;
domi::KernelExDef *kernel_ex_def = task_def.mutable_kernel_ex();
kernel_ex_def->set_op_index(0);
model.op_list_[0] = CreateOpDesc("FrameworkOp", "FrameworkOp");

AttrUtils::SetStr(model.op_list_[0], ATTR_DYNAMIC_SHAPE_FIXED_ADDR, "Hello Mr Tree");

KernelExTaskInfo kernel_ex_task_info;
EXPECT_EQ(kernel_ex_task_info.CalculateArgs(task_def, &model), FAILED);
}

} // namespace ge

+ 1199
- 0
tests/ut/ge/graph/load/kernel_task_info_unittest.cc
File diff suppressed because it is too large
View File


+ 138
- 0
tests/ut/ge/graph/load/memcpy_addr_async_task_info_unittest.cc View File

@@ -0,0 +1,138 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <gtest/gtest.h>

#define private public
#define protected public

#include "graph/load/new_model_manager/davinci_model.h"
#include "graph/load/new_model_manager/task_info/memcpy_addr_async_task_info.h"

namespace ge {
class UtestMemcpyAddrAsyncTaskInfo : public testing::Test {
protected:
void SetUp() {}

void TearDown() {}
};

extern OpDescPtr CreateOpDesc(string name, string type);

TEST_F(UtestMemcpyAddrAsyncTaskInfo, success_memcpy_addr_async_task_init) {
DavinciModel model(0, nullptr);
domi::TaskDef task_def;
task_def.set_stream_id(0);

domi::MemcpyAsyncDef *memcpy_async = task_def.mutable_memcpy_async();
memcpy_async->set_dst(10);
memcpy_async->set_dst_max(512);
memcpy_async->set_src(10);
memcpy_async->set_count(1);
memcpy_async->set_kind(RT_MEMCPY_ADDR_DEVICE_TO_DEVICE);
memcpy_async->set_op_index(6);

model.runtime_param_.logic_mem_base = 0x8003000;
model.runtime_param_.logic_weight_base = 0x8008000;
model.runtime_param_.logic_var_base = 0x800e000;
model.runtime_param_.mem_size = 0x5000;
model.runtime_param_.weight_size = 0x6000;
model.runtime_param_.var_size = 0x1000;

// DavinciModel is null
MemcpyAddrAsyncTaskInfo memcpy_addr_async_task_info;
EXPECT_EQ(memcpy_addr_async_task_info.Init(task_def, nullptr), PARAM_INVALID);

// SetStream failed.
EXPECT_EQ(memcpy_addr_async_task_info.Init(task_def, &model), FAILED);

// GetOpByIndex src failed
rtStream_t stream = nullptr;
rtStreamCreate(&stream, 0);
model.stream_list_.push_back(stream);
EXPECT_EQ(memcpy_addr_async_task_info.Init(task_def, &model), INTERNAL_ERROR);

// GetRuntimeAddress src failed.
model.op_list_[6] = CreateOpDesc("memcpyaddrasync", MEMCPYADDRASYNC);
EXPECT_EQ(memcpy_addr_async_task_info.Init(task_def, &model), PARAM_INVALID);

// GetRuntimeAddress dst failed.
memcpy_async->set_src(0x08003000);
EXPECT_EQ(memcpy_addr_async_task_info.Init(task_def, &model), PARAM_INVALID);

memcpy_async->set_dst(0x08008000);
EXPECT_EQ(memcpy_addr_async_task_info.Init(task_def, &model), SUCCESS);

task_def.clear_memcpy_async();
}

TEST_F(UtestMemcpyAddrAsyncTaskInfo, success_memcpy_async_task_init_failed) {
DavinciModel model(0, nullptr);
domi::TaskDef task_def;

task_def.set_stream_id(0);
rtStream_t stream = nullptr;
rtStreamCreate(&stream, 0);
model.stream_list_.push_back(stream);

domi::MemcpyAsyncDef *memcpy_async = task_def.mutable_memcpy_async();
memcpy_async->set_dst(10);
memcpy_async->set_dst_max(512);
memcpy_async->set_src(10);
memcpy_async->set_count(1);
memcpy_async->set_kind(RT_MEMCPY_ADDR_DEVICE_TO_DEVICE);
memcpy_async->set_op_index(6);

model.runtime_param_.logic_mem_base = 0x8003000;
model.runtime_param_.logic_weight_base = 0x8008000;
model.runtime_param_.logic_var_base = 0x800e000;
model.runtime_param_.mem_size = 0x5000;
model.runtime_param_.weight_size = 0x6000;
model.runtime_param_.var_size = 0x1000;


GeTensorDesc tensor(GeShape(), FORMAT_NCHW, DT_FLOAT);
model.op_list_[6] = CreateOpDesc("memcpyasync", MEMCPYADDRASYNC);
model.op_list_[6]->AddInputDesc(tensor);
model.op_list_[6]->AddOutputDesc(tensor);
model.op_list_[6]->SetInputOffset({1024});
model.op_list_[6]->SetOutputOffset({5120});

// DavinciModel is null
MemcpyAddrAsyncTaskInfo memcpy_addr_async_task_info;
EXPECT_EQ(memcpy_addr_async_task_info.Init(task_def, &model), PARAM_INVALID);

task_def.clear_memcpy_async();
}

TEST_F(UtestMemcpyAddrAsyncTaskInfo, success_memcpy_async_calculate_args) {
DavinciModel model(0, nullptr);
domi::TaskDef task_def;

domi::MemcpyAsyncDef *memcpy_async = task_def.mutable_memcpy_async();
memcpy_async->set_dst(0x08003000);
memcpy_async->set_dst_max(512);
memcpy_async->set_src(0x08008000);
memcpy_async->set_count(1);
memcpy_async->set_kind(RT_MEMCPY_DEVICE_TO_DEVICE);
memcpy_async->set_op_index(0);

// DavinciModel is null
MemcpyAddrAsyncTaskInfo memcpy_addr_async_task_info;
EXPECT_EQ(memcpy_addr_async_task_info.CalculateArgs(task_def, &model), SUCCESS);
}

} // namespace ge

+ 273
- 0
tests/ut/ge/graph/load/memcpy_async_task_info_unittest.cc View File

@@ -0,0 +1,273 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <gtest/gtest.h>

#define private public
#define protected public

#include "graph/load/new_model_manager/davinci_model.h"
#include "graph/load/new_model_manager/task_info/memcpy_async_task_info.h"


namespace ge {
class UtestMemcpyAsyncTaskInfo : public testing::Test {
protected:
void SetUp() {}

void TearDown() {}
};

OpDescPtr CreateOpDesc(string name = "", string type = "") {
auto op_desc = std::make_shared<OpDesc>(name, type);
op_desc->SetStreamId(0);
op_desc->SetId(0);

AttrUtils::SetFloat(op_desc, ATTR_NAME_ALPHA, 0);
AttrUtils::SetFloat(op_desc, ATTR_NAME_BETA, 0);

op_desc->SetWorkspace({});
op_desc->SetWorkspaceBytes({});
op_desc->SetInputOffset({});
op_desc->SetOutputOffset({});

AttrUtils::SetListStr(op_desc, ATTR_NAME_WEIGHT_NAME, {});
AttrUtils::SetInt(op_desc, POOLING_ATTR_MODE, 0);
AttrUtils::SetInt(op_desc, POOLING_ATTR_PAD_MODE, 0);
AttrUtils::SetInt(op_desc, POOLING_ATTR_DATA_MODE, 0);
AttrUtils::SetInt(op_desc, POOLING_ATTR_CEIL_MODE, 0);
AttrUtils::SetInt(op_desc, POOLING_ATTR_NAN_OPT, 0);
AttrUtils::SetListInt(op_desc, POOLING_ATTR_WINDOW, {});
AttrUtils::SetListInt(op_desc, POOLING_ATTR_PAD, {});
AttrUtils::SetListInt(op_desc, POOLING_ATTR_STRIDE, {});
AttrUtils::SetListInt(op_desc, ATTR_NAME_ACTIVE_STREAM_LIST, {1, 1});
AttrUtils::SetInt(op_desc, ATTR_NAME_STREAM_SWITCH_COND, 0);
return op_desc;
}

TEST_F(UtestMemcpyAsyncTaskInfo, success_memcpy_async_task_init) {
DavinciModel model(0, nullptr);
domi::TaskDef task_def;
task_def.set_stream_id(0);

domi::MemcpyAsyncDef *memcpy_async = task_def.mutable_memcpy_async();
memcpy_async->set_dst(10);
memcpy_async->set_dst_max(512);
memcpy_async->set_src(10);
memcpy_async->set_count(1);
memcpy_async->set_kind(RT_MEMCPY_DEVICE_TO_DEVICE);
memcpy_async->set_op_index(6);

model.runtime_param_.logic_mem_base = 0x8003000;
model.runtime_param_.logic_weight_base = 0x8008000;
model.runtime_param_.logic_var_base = 0x800e000;
model.runtime_param_.mem_size = 0x5000;
model.runtime_param_.weight_size = 0x6000;
model.runtime_param_.var_size = 0x1000;

MemcpyAsyncTaskInfo memcpy_async_task_info;

// GetOpByIndex src failed
rtStream_t stream = nullptr;
rtStreamCreate(&stream, 0);
model.stream_list_.push_back(stream);
EXPECT_EQ(memcpy_async_task_info.Init(task_def, &model), INTERNAL_ERROR);

model.op_list_[6] = CreateOpDesc("memcpyasync", MEMCPYASYNC);
memcpy_async->set_src(0x08008000);
EXPECT_EQ(memcpy_async_task_info.Init(task_def, &model), PARAM_INVALID);

// set OpDesc attr
std::vector<int64_t> memory_type = { RT_MEMORY_TS_4G };
AttrUtils::SetListInt(model.op_list_[6], ATTR_NAME_OUTPUT_MEM_TYPE_LIST, memory_type);
GeTensorDesc tensor(GeShape(), FORMAT_NCHW, DT_FLOAT);
model.op_list_[6]->AddInputDesc(tensor);
model.op_list_[6]->AddOutputDesc(tensor);
memcpy_async->set_dst_max(0);
EXPECT_EQ(memcpy_async_task_info.Init(task_def, &model), FAILED);

memcpy_async->set_dst_max(0);
model.op_list_[6]->SetInputOffset({1024});
model.op_list_[6]->SetOutputOffset({5120});
EXPECT_EQ(memcpy_async_task_info.Init(task_def, &model), FAILED);


task_def.clear_memcpy_async();
}

TEST_F(UtestMemcpyAsyncTaskInfo, success_memcpy_async_task_init_failed) {
DavinciModel model(0, nullptr);
domi::TaskDef task_def;
task_def.set_stream_id(0);

domi::MemcpyAsyncDef *memcpy_async = task_def.mutable_memcpy_async();
memcpy_async->set_dst(10);
memcpy_async->set_dst_max(512);
memcpy_async->set_src(10);
memcpy_async->set_count(1);
memcpy_async->set_kind(RT_MEMCPY_DEVICE_TO_DEVICE);
memcpy_async->set_op_index(6);

model.runtime_param_.logic_mem_base = 0x8003000;
model.runtime_param_.logic_weight_base = 0x8008000;
model.runtime_param_.logic_var_base = 0x800e000;
model.runtime_param_.mem_size = 0x5000;
model.runtime_param_.weight_size = 0x6000;
model.runtime_param_.var_size = 0x1000;


// DavinciModel is null
MemcpyAsyncTaskInfo memcpy_async_task_info;
EXPECT_EQ(memcpy_async_task_info.Init(task_def, nullptr), PARAM_INVALID);

// SetStream failed
EXPECT_EQ(memcpy_async_task_info.Init(task_def, nullptr), PARAM_INVALID);

// GetOpByIndex failed
rtStream_t stream = nullptr;
rtStreamCreate(&stream, 0);
model.stream_list_.push_back(stream);
EXPECT_EQ(memcpy_async_task_info.Init(task_def, &model), INTERNAL_ERROR);

model.op_list_[6] = CreateOpDesc("memcpyasync", MEMCPYASYNC);
EXPECT_EQ(memcpy_async_task_info.Init(task_def, &model), PARAM_INVALID);
memcpy_async->set_src(0x08008000);

EXPECT_EQ(memcpy_async_task_info.Init(task_def, &model), PARAM_INVALID);
memcpy_async->set_dst(0x08003000);

// set OpDesc attr
std::vector<int64_t> memory_type = { RT_MEMORY_TS_4G };
AttrUtils::SetListInt(model.op_list_[6], ATTR_NAME_OUTPUT_MEM_TYPE_LIST, memory_type);
memcpy_async->set_dst_max(0);
EXPECT_EQ(memcpy_async_task_info.Init(task_def, nullptr), PARAM_INVALID);
memcpy_async->set_dst_max(512);


GeTensorDesc tensor(GeShape(), FORMAT_NCHW, DT_FLOAT);
model.op_list_[6]->AddInputDesc(tensor);
model.op_list_[6]->AddOutputDesc(tensor);
model.op_list_[6]->SetInputOffset({1024});
model.op_list_[6]->SetOutputOffset({5120});
EXPECT_EQ(memcpy_async_task_info.Init(task_def, &model), SUCCESS);

memcpy_async->set_dst(0x08009000);
EXPECT_EQ(memcpy_async_task_info.Init(task_def, &model), SUCCESS);

task_def.clear_memcpy_async();
}

TEST_F(UtestMemcpyAsyncTaskInfo, success_memcpy_async_task_distribute) {
DavinciModel model(0, nullptr);
model.SetKnownNode(true);
domi::TaskDef task_def;
task_def.set_stream_id(0);

domi::MemcpyAsyncDef *memcpy_async = task_def.mutable_memcpy_async();
memcpy_async->set_dst(10);
memcpy_async->set_dst_max(512);
memcpy_async->set_src(10);
memcpy_async->set_count(1);
memcpy_async->set_kind(RT_MEMCPY_DEVICE_TO_DEVICE);
memcpy_async->set_op_index(6);

model.runtime_param_.logic_mem_base = 0x8003000;
model.runtime_param_.logic_weight_base = 0x8008000;
model.runtime_param_.logic_var_base = 0x800e000;
model.runtime_param_.mem_size = 0x5000;
model.runtime_param_.weight_size = 0x6000;
model.runtime_param_.var_size = 0x1000;

MemcpyAsyncTaskInfo memcpy_async_task_info;

// GetOpByIndex src failed
rtStream_t stream = nullptr;
rtStreamCreate(&stream, 0);
model.stream_list_.push_back(stream);
EXPECT_EQ(memcpy_async_task_info.Init(task_def, &model), INTERNAL_ERROR);

model.op_list_[6] = CreateOpDesc("memcpyasync", MEMCPYASYNC);
memcpy_async->set_src(0x08008000);
EXPECT_EQ(memcpy_async_task_info.Init(task_def, &model), PARAM_INVALID);

// set OpDesc attr
AttrUtils::SetStr(model.op_list_[6], ATTR_DYNAMIC_SHAPE_FIXED_ADDR, "Hello Mr Tree");
GeTensorDesc tensor(GeShape(), FORMAT_NCHW, DT_FLOAT);
model.op_list_[6]->AddInputDesc(tensor);
model.op_list_[6]->AddOutputDesc(tensor);
memcpy_async->set_dst_max(0);
EXPECT_EQ(memcpy_async_task_info.Init(task_def, &model), SUCCESS);

memcpy_async->set_dst_max(0);
model.op_list_[6]->SetInputOffset({1024});
model.op_list_[6]->SetOutputOffset({5120});
EXPECT_EQ(memcpy_async_task_info.Init(task_def, &model), SUCCESS);


task_def.clear_memcpy_async();
}

TEST_F(UtestMemcpyAsyncTaskInfo, success_distribute) {
DavinciModel model(0, nullptr);
model.ge_model_ = MakeShared<GeModel>();

auto model_task_def = MakeShared<domi::ModelTaskDef>();
domi::TaskDef *task_def = model_task_def->add_task();
task_def->set_type(RT_MODEL_TASK_MEMCPY_ASYNC);
domi::KernelDef *kernel_def = task_def->mutable_kernel();
domi::KernelContext *ctx = kernel_def->mutable_context();
ctx->set_op_index(0);
model.op_list_[0] = CreateOpDesc("memcpyasync", MEMCPYASYNC);
TaskInfoPtr task_info = TaskInfoFactory::Instance().Create(static_cast<rtModelTaskType_t>(task_def->type()));

model.task_list_ = { task_info };
model.ge_model_->SetModelTaskDef(model_task_def);

EXPECT_EQ(model.DistributeTask(), SUCCESS);
EXPECT_EQ(task_info->Distribute(), SUCCESS);
task_info->Release();
}

TEST_F(UtestMemcpyAsyncTaskInfo, success_memcpy_async_calculate_args) {
DavinciModel model(0, nullptr);
domi::TaskDef task_def;

domi::MemcpyAsyncDef *memcpy_async = task_def.mutable_memcpy_async();
memcpy_async->set_dst(0x08003000);
memcpy_async->set_dst_max(512);
memcpy_async->set_src(0x08008000);
memcpy_async->set_count(1);
memcpy_async->set_kind(RT_MEMCPY_DEVICE_TO_DEVICE);
memcpy_async->set_op_index(0);

model.op_list_[0] = CreateOpDesc("memcpyasync", MEMCPYASYNC);
AttrUtils::SetStr(model.op_list_[0], ATTR_DYNAMIC_SHAPE_FIXED_ADDR, "Hello Mr Tree");

// DavinciModel is null
MemcpyAsyncTaskInfo memcpy_async_task_info;
EXPECT_EQ(memcpy_async_task_info.CalculateArgs(task_def, &model), SUCCESS);
}

TEST_F(UtestMemcpyAsyncTaskInfo, memcpy_async_update_args) {
DavinciModel model(0, nullptr);

MemcpyAsyncTaskInfo memcpy_async_task_info;
memcpy_async_task_info.davinci_model_ = &model;

EXPECT_EQ(memcpy_async_task_info.UpdateArgs(), SUCCESS);
}

} // namespace ge

Loading…
Cancel
Save