From: @zhangxiaokun9 Reviewed-by: @xchu42,@ji_chen Signed-off-by: @ji_chentags/v1.2.0
@@ -139,6 +139,7 @@ DavinciModel::DavinciModel(int32_t priority, const std::shared_ptr<ModelListener | |||||
is_l1_fusion_enable_(false), | is_l1_fusion_enable_(false), | ||||
is_first_execute_(true) { | is_first_execute_(true) { | ||||
op_list_.clear(); | op_list_.clear(); | ||||
skt_info_ = {0, 0, 0, 0, nullptr, nullptr, {}, {}, {}, {}, {}, RT_KERNEL_DEFAULT, -1, 0, nullptr}; | |||||
} | } | ||||
DavinciModel::~DavinciModel() { | DavinciModel::~DavinciModel() { | ||||
@@ -261,6 +262,7 @@ Status DavinciModel::Assign(const GeModelPtr &ge_model) { | |||||
/// @return: void | /// @return: void | ||||
/// | /// | ||||
void DavinciModel::Shrink() { | void DavinciModel::Shrink() { | ||||
skt_info_ = {0, 0, 0, 0, nullptr, nullptr, {}, {}, {}, {}, {}, RT_KERNEL_DEFAULT, -1, 0, nullptr}; | |||||
ge_model_.reset(); // delete object. | ge_model_.reset(); // delete object. | ||||
} | } | ||||
@@ -76,6 +76,25 @@ struct timeInfo { | |||||
int64_t dumpEndTime; | int64_t dumpEndTime; | ||||
}; | }; | ||||
// For super kernel | |||||
struct SuperKernelTaskInfo { | |||||
uint32_t last_block_dim; | |||||
uint32_t last_args_size; | |||||
uint32_t last_task_id; | |||||
uint32_t last_stream_id; | |||||
void *last_stream; | |||||
void *last_sm_desc; | |||||
std::vector<void *> kernel_list; | |||||
std::vector<void *> arg_list; | |||||
std::vector<uint32_t> dump_flag_list; | |||||
std::vector<OpDescPtr> op_desc_list; | |||||
std::vector<uintptr_t> dump_args_list; | |||||
uint32_t last_dump_flag; | |||||
int64_t last_group_key; | |||||
uintptr_t last_dump_args; | |||||
OpDescPtr last_op; | |||||
}; | |||||
struct TaskMemInfo { | struct TaskMemInfo { | ||||
int64_t input_size{0}; | int64_t input_size{0}; | ||||
int64_t output_size{0}; | int64_t output_size{0}; | ||||
@@ -204,13 +223,14 @@ class DavinciModel { | |||||
// get total mem size | // get total mem size | ||||
size_t TotalMemSize() const { return runtime_param_.mem_size; } | size_t TotalMemSize() const { return runtime_param_.mem_size; } | ||||
const std::map<uint32_t, MemInfo> &P2PMemInfos() const {return runtime_param_.memory_infos;} | |||||
const std::map<uint32_t, MemInfo> &P2PMemInfos() const { return runtime_param_.memory_infos; } | |||||
// model name | // model name | ||||
string Name() const { return name_; } | string Name() const { return name_; } | ||||
// om_name | // om_name | ||||
string OmName() const { return om_name_; } | string OmName() const { return om_name_; } | ||||
// version | // version | ||||
uint32_t Version() const { return version_; } | uint32_t Version() const { return version_; } | ||||
@@ -255,12 +275,16 @@ class DavinciModel { | |||||
} | } | ||||
return nullptr; | return nullptr; | ||||
} | } | ||||
// get task info for profiling | // get task info for profiling | ||||
const std::vector<TaskDescInfo> &GetTaskDescInfo() const { return task_desc_info_; } | const std::vector<TaskDescInfo> &GetTaskDescInfo() const { return task_desc_info_; } | ||||
// get updated task info list | // get updated task info list | ||||
std::vector<TaskInfoPtr> GetTaskList() { return task_list_; } | std::vector<TaskInfoPtr> GetTaskList() { return task_list_; } | ||||
// Modified from KernelTaskInfo. | |||||
SuperKernelTaskInfo &GetSuperKernelTaskInfo() { return skt_info_; } | |||||
/// | /// | ||||
/// @ingroup ge | /// @ingroup ge | ||||
/// @brief get model input and output format | /// @brief get model input and output format | ||||
@@ -610,7 +634,7 @@ class DavinciModel { | |||||
uint8_t *MallocWeightsMem(size_t weights_size); | uint8_t *MallocWeightsMem(size_t weights_size); | ||||
uint8_t* MallocP2PMem(size_t p2p_data_size); | |||||
uint8_t *MallocP2PMem(size_t p2p_data_size); | |||||
void FreeFeatureMapMem(); | void FreeFeatureMapMem(); | ||||
@@ -996,6 +1020,9 @@ class DavinciModel { | |||||
std::multimap<uint32_t, uint32_t> op_id_map_; | std::multimap<uint32_t, uint32_t> op_id_map_; | ||||
std::vector<ProfileInfo> profile_list_; | std::vector<ProfileInfo> profile_list_; | ||||
// For super kernel. | |||||
SuperKernelTaskInfo skt_info_; | |||||
}; | }; | ||||
} // namespace ge | } // namespace ge | ||||
#endif // GE_GRAPH_LOAD_NEW_MODEL_MANAGER_DAVINCI_MODEL_H_ | #endif // GE_GRAPH_LOAD_NEW_MODEL_MANAGER_DAVINCI_MODEL_H_ |
@@ -59,40 +59,40 @@ Status HcclTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davinci_m | |||||
GELOGI("HcclTaskInfo Init, op_index is: %u", op_index); | GELOGI("HcclTaskInfo Init, op_index is: %u", op_index); | ||||
// Get HCCL op | // Get HCCL op | ||||
op_desc_ = davinci_model->GetOpByIndex(op_index); | |||||
GE_CHECK_NOTNULL(op_desc_); | |||||
const auto op_desc = davinci_model_->GetOpByIndex(op_index); | |||||
GE_CHECK_NOTNULL(op_desc); | |||||
// Create the kernel hccl infos | // Create the kernel hccl infos | ||||
CreateKernelHcclInfo(op_desc_); | |||||
CreateKernelHcclInfo(op_desc); | |||||
// Initialize the hccl_type of all kernel hccl info | // Initialize the hccl_type of all kernel hccl info | ||||
HcomOmeUtil::GetHcclType(task_def, kernel_hccl_infos_); | HcomOmeUtil::GetHcclType(task_def, kernel_hccl_infos_); | ||||
// Only in Horovod scenario should get the inputName and GeShape | // Only in Horovod scenario should get the inputName and GeShape | ||||
ret = HcomOmeUtil::GetHorovodInputs(op_desc_, kernel_hccl_infos_); | |||||
ret = HcomOmeUtil::GetHorovodInputs(op_desc, kernel_hccl_infos_); | |||||
if (ret != SUCCESS) { | if (ret != SUCCESS) { | ||||
GELOGE(ret, "davinci_model: GetHorovodInputs fail! domi error: %u", ret); | GELOGE(ret, "davinci_model: GetHorovodInputs fail! domi error: %u", ret); | ||||
return ret; | return ret; | ||||
} | } | ||||
Status dmrt = HcomOmeUtil::GetHcclDataType(op_desc_, kernel_hccl_infos_); | |||||
Status dmrt = HcomOmeUtil::GetHcclDataType(op_desc, kernel_hccl_infos_); | |||||
if (dmrt != SUCCESS) { | if (dmrt != SUCCESS) { | ||||
GELOGE(dmrt, "davinci_model: GetHcomDataType fail! domi error: %u", dmrt); | GELOGE(dmrt, "davinci_model: GetHcomDataType fail! domi error: %u", dmrt); | ||||
return dmrt; | return dmrt; | ||||
} | } | ||||
dmrt = HcomOmeUtil::GetHcclCount(op_desc_, kernel_hccl_infos_); | |||||
dmrt = HcomOmeUtil::GetHcclCount(op_desc, kernel_hccl_infos_); | |||||
if (dmrt != SUCCESS) { | if (dmrt != SUCCESS) { | ||||
GELOGE(dmrt, "davinci_model: GetHcomCount fail! domi error: %u", dmrt); | GELOGE(dmrt, "davinci_model: GetHcomCount fail! domi error: %u", dmrt); | ||||
return dmrt; | return dmrt; | ||||
} | } | ||||
// Only HCOMBROADCAST and HVDCALLBACKBROADCAST need to get the rootId | // Only HCOMBROADCAST and HVDCALLBACKBROADCAST need to get the rootId | ||||
dmrt = HcomOmeUtil::GetAllRootId(op_desc_, kernel_hccl_infos_); | |||||
dmrt = HcomOmeUtil::GetAllRootId(op_desc, kernel_hccl_infos_); | |||||
if (dmrt != SUCCESS) { | if (dmrt != SUCCESS) { | ||||
GELOGE(dmrt, "davinci_model: Get rootId fail! domi error: %u", dmrt); | GELOGE(dmrt, "davinci_model: Get rootId fail! domi error: %u", dmrt); | ||||
return dmrt; | return dmrt; | ||||
} | } | ||||
// GE's new process: hccl declares the number of streams required, creates a stream by GE, and sends it to hccl | // GE's new process: hccl declares the number of streams required, creates a stream by GE, and sends it to hccl | ||||
ret = SetFollowStream(op_desc_, davinci_model); | |||||
ret = SetFollowStream(op_desc, davinci_model); | |||||
if (ret != SUCCESS) { | if (ret != SUCCESS) { | ||||
GELOGE(ret, "SetStream Fail."); | GELOGE(ret, "SetStream Fail."); | ||||
return ret; | return ret; | ||||
@@ -100,21 +100,28 @@ Status HcclTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davinci_m | |||||
if (davinci_model_->IsKnownNode()) { | if (davinci_model_->IsKnownNode()) { | ||||
args_ = davinci_model_->GetCurrentArgsAddr(args_offset_); | args_ = davinci_model_->GetCurrentArgsAddr(args_offset_); | ||||
GELOGI("Known node %s args addr %p, offset %u.", op_desc_->GetName().c_str(), args_, args_offset_); | |||||
GELOGI("Known node %s args addr %p, offset %u.", op_desc->GetName().c_str(), args_, args_offset_); | |||||
} | } | ||||
ret = SetAddrs(op_desc_, kernel_hccl_infos_); | |||||
ret = SetAddrs(op_desc, kernel_hccl_infos_); | |||||
if (ret != SUCCESS) { | if (ret != SUCCESS) { | ||||
GELOGE(ret, "Setaddrs Fail."); | GELOGE(ret, "Setaddrs Fail."); | ||||
return ret; | return ret; | ||||
} | } | ||||
// GE's new process: hccl declares the need for Workspace size, and GE allocates Workspace | // GE's new process: hccl declares the need for Workspace size, and GE allocates Workspace | ||||
ret = SetWorkspace(op_desc_, kernel_hccl_infos_); | |||||
ret = SetWorkspace(op_desc, kernel_hccl_infos_); | |||||
if (ret != SUCCESS) { | if (ret != SUCCESS) { | ||||
GELOGE(ret, "SetWorkspace Fail."); | GELOGE(ret, "SetWorkspace Fail."); | ||||
return ret; | return ret; | ||||
} | } | ||||
const RuntimeParam &rts_param = davinci_model_->GetRuntimeParam(); | |||||
const auto input_data_addrs = ModelUtils::GetInputDataAddrs(rts_param, op_desc); | |||||
const auto output_data_addrs = ModelUtils::GetOutputDataAddrs(rts_param, op_desc); | |||||
const auto workspace_data_addrs = ModelUtils::GetWorkspaceDataAddrs(rts_param, op_desc); | |||||
io_addrs_.insert(io_addrs_.end(), input_data_addrs.begin(), input_data_addrs.end()); | |||||
io_addrs_.insert(io_addrs_.end(), output_data_addrs.begin(), output_data_addrs.end()); | |||||
io_addrs_.insert(io_addrs_.end(), workspace_data_addrs.begin(), workspace_data_addrs.end()); | |||||
GELOGI("HcclTaskInfo Init Success"); | GELOGI("HcclTaskInfo Init Success"); | ||||
return SUCCESS; | return SUCCESS; | ||||
} | } | ||||
@@ -231,18 +238,7 @@ Status HcclTaskInfo::CalculateArgs(const domi::TaskDef &task_def, DavinciModel * | |||||
Status HcclTaskInfo::UpdateArgs() { | Status HcclTaskInfo::UpdateArgs() { | ||||
GELOGI("HcclTaskInfo::UpdateArgs in."); | GELOGI("HcclTaskInfo::UpdateArgs in."); | ||||
const RuntimeParam &rts_param = davinci_model_->GetRuntimeParam(); | |||||
input_data_addrs_ = ModelUtils::GetInputDataAddrs(rts_param, op_desc_); | |||||
output_data_addrs_ = ModelUtils::GetOutputDataAddrs(rts_param, op_desc_); | |||||
workspace_data_addrs_ = ModelUtils::GetWorkspaceDataAddrs(rts_param, op_desc_); | |||||
vector<void *> io_addrs; | |||||
io_addrs.insert(io_addrs.end(), input_data_addrs_.begin(), input_data_addrs_.end()); | |||||
io_addrs.insert(io_addrs.end(), output_data_addrs_.begin(), output_data_addrs_.end()); | |||||
io_addrs.insert(io_addrs.end(), workspace_data_addrs_.begin(), workspace_data_addrs_.end()); | |||||
davinci_model_->SetTotalIOAddrs(io_addrs); | |||||
davinci_model_->SetTotalIOAddrs(io_addrs_); | |||||
GELOGI("HcclTaskInfo::UpdateArgs success."); | GELOGI("HcclTaskInfo::UpdateArgs success."); | ||||
return SUCCESS; | return SUCCESS; | ||||
} | } | ||||
@@ -261,9 +257,11 @@ Status HcclTaskInfo::SetAddrs(const std::shared_ptr<OpDesc> &op_desc, | |||||
HcclReduceOp op_type = HCCL_REDUCE_SUM; | HcclReduceOp op_type = HCCL_REDUCE_SUM; | ||||
GE_CHECK_NOTNULL(davinci_model_); | GE_CHECK_NOTNULL(davinci_model_); | ||||
GELOGI("Calc opType[%s] input address before. Node name[%s]", op_desc->GetType().c_str(), op_desc->GetName().c_str()); | GELOGI("Calc opType[%s] input address before. Node name[%s]", op_desc->GetType().c_str(), op_desc->GetName().c_str()); | ||||
vector<void *> input_data_addrs; | |||||
vector<void *> output_data_addrs; | |||||
if (!davinci_model_->IsKnownNode()) { | if (!davinci_model_->IsKnownNode()) { | ||||
input_data_addrs_ = ModelUtils::GetInputDataAddrs(davinci_model_->GetRuntimeParam(), op_desc); | |||||
output_data_addrs_ = ModelUtils::GetOutputDataAddrs(davinci_model_->GetRuntimeParam(), op_desc); | |||||
input_data_addrs = ModelUtils::GetInputDataAddrs(davinci_model_->GetRuntimeParam(), op_desc); | |||||
output_data_addrs = ModelUtils::GetOutputDataAddrs(davinci_model_->GetRuntimeParam(), op_desc); | |||||
} | } | ||||
void *input_data_addr = nullptr; | void *input_data_addr = nullptr; | ||||
void *output_data_addr = nullptr; | void *output_data_addr = nullptr; | ||||
@@ -275,8 +273,8 @@ Status HcclTaskInfo::SetAddrs(const std::shared_ptr<OpDesc> &op_desc, | |||||
output_data_addr = reinterpret_cast<void *>(reinterpret_cast<uint64_t *>(args_) + op_desc->GetInputsSize() + i); | output_data_addr = reinterpret_cast<void *>(reinterpret_cast<uint64_t *>(args_) + op_desc->GetInputsSize() + i); | ||||
GELOGI("Hccl task info known input addr %p, output addr %p.", input_data_addr, output_data_addr); | GELOGI("Hccl task info known input addr %p, output addr %p.", input_data_addr, output_data_addr); | ||||
} else { | } else { | ||||
input_data_addr = input_data_addrs_.empty() ? nullptr : input_data_addrs_[i]; | |||||
output_data_addr = output_data_addrs_.empty() ? nullptr : output_data_addrs_[i]; | |||||
input_data_addr = input_data_addrs.empty() ? nullptr : input_data_addrs[i]; | |||||
output_data_addr = output_data_addrs.empty() ? nullptr : output_data_addrs[i]; | |||||
} | } | ||||
kernel_hccl_infos[i].inputDataAddr = input_data_addr; | kernel_hccl_infos[i].inputDataAddr = input_data_addr; | ||||
if (hccl_type == HCOMALLGATHER || hccl_type == HCOMRECEIVE || hccl_type == HVDCALLBACKALLGATHER) { | if (hccl_type == HCOMALLGATHER || hccl_type == HCOMRECEIVE || hccl_type == HVDCALLBACKALLGATHER) { | ||||
@@ -366,8 +364,8 @@ Status HcclTaskInfo::SetWorkspace(const std::shared_ptr<OpDesc> &op_desc, | |||||
workspace_addr = reinterpret_cast<void *>(reinterpret_cast<uint64_t *>(args_) + op_desc->GetInputsSize() + | workspace_addr = reinterpret_cast<void *>(reinterpret_cast<uint64_t *>(args_) + op_desc->GetInputsSize() + | ||||
op_desc->GetOutputsSize()); | op_desc->GetOutputsSize()); | ||||
} else { | } else { | ||||
workspace_data_addrs_ = ModelUtils::GetWorkspaceDataAddrs(davinci_model_->GetRuntimeParam(), op_desc); | |||||
workspace_addr = workspace_data_addrs_.empty() ? nullptr : workspace_data_addrs_[0]; | |||||
const auto workspace_data_addrs = ModelUtils::GetWorkspaceDataAddrs(davinci_model_->GetRuntimeParam(), op_desc); | |||||
workspace_addr = workspace_data_addrs.empty() ? nullptr : workspace_data_addrs[0]; | |||||
} | } | ||||
} | } | ||||
} | } | ||||
@@ -35,7 +35,6 @@ class HcclTaskInfo : public TaskInfo { | |||||
ops_kernel_store_(nullptr), | ops_kernel_store_(nullptr), | ||||
private_def_(nullptr), | private_def_(nullptr), | ||||
private_def_len_(0), | private_def_len_(0), | ||||
op_desc_(nullptr), | |||||
args_(nullptr), | args_(nullptr), | ||||
args_offset_(0) {} | args_offset_(0) {} | ||||
@@ -76,10 +75,7 @@ class HcclTaskInfo : public TaskInfo { | |||||
uint32_t private_def_len_; | uint32_t private_def_len_; | ||||
static std::mutex hccl_follow_stream_mutex_; | static std::mutex hccl_follow_stream_mutex_; | ||||
vector<GETaskKernelHcclInfo> kernel_hccl_infos_; | vector<GETaskKernelHcclInfo> kernel_hccl_infos_; | ||||
vector<void *> input_data_addrs_; | |||||
vector<void *> output_data_addrs_; | |||||
vector<void *> workspace_data_addrs_; | |||||
OpDescPtr op_desc_; | |||||
vector<void *> io_addrs_; | |||||
void *args_; | void *args_; | ||||
uint32_t args_offset_; | uint32_t args_offset_; | ||||
}; | }; | ||||
@@ -30,11 +30,7 @@ | |||||
namespace ge { | namespace ge { | ||||
Status KernelExTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davinci_model) { | Status KernelExTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davinci_model) { | ||||
GELOGI("KernelExTaskInfo Init Start."); | GELOGI("KernelExTaskInfo Init Start."); | ||||
if (davinci_model == nullptr) { | |||||
GELOGE(PARAM_INVALID, "davinci_model is null!"); | |||||
return PARAM_INVALID; | |||||
} | |||||
GE_CHECK_NOTNULL(davinci_model); | |||||
davinci_model_ = davinci_model; | davinci_model_ = davinci_model; | ||||
Status ret = SetStream(task_def.stream_id(), davinci_model_->GetStreamList()); | Status ret = SetStream(task_def.stream_id(), davinci_model_->GetStreamList()); | ||||
if (ret != SUCCESS) { | if (ret != SUCCESS) { | ||||
@@ -51,7 +47,6 @@ Status KernelExTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davin | |||||
GELOGE(INTERNAL_ERROR, "Init aicpu task info error, index is out of range!"); | GELOGE(INTERNAL_ERROR, "Init aicpu task info error, index is out of range!"); | ||||
return INTERNAL_ERROR; | return INTERNAL_ERROR; | ||||
} | } | ||||
op_desc_ = op_desc; | |||||
// 2. Reconstruct kernelExDef.args to STR_FWK_OP_KERNEL | // 2. Reconstruct kernelExDef.args to STR_FWK_OP_KERNEL | ||||
STR_FWK_OP_KERNEL fwk_op_kernel = {0}; | STR_FWK_OP_KERNEL fwk_op_kernel = {0}; | ||||
@@ -79,8 +74,8 @@ Status KernelExTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davin | |||||
return RT_ERROR_TO_GE_STATUS(rt_ret);) | return RT_ERROR_TO_GE_STATUS(rt_ret);) | ||||
} | } | ||||
GELOGI("Node[%s] type[%s] kernel_ext_info size=%zu, ext_info_addr_=%p", op_desc_->GetName().c_str(), | |||||
op_desc_->GetType().c_str(), ext_info.size(), ext_info_addr_); | |||||
GELOGI("Node[%s] type[%s] kernel_ext_info size=%zu, ext_info_addr_=%p", op_desc->GetName().c_str(), | |||||
op_desc->GetType().c_str(), ext_info.size(), ext_info_addr_); | |||||
// 2.1 get loop cond variable for tensor array write | // 2.1 get loop cond variable for tensor array write | ||||
uint64_t step_id_addr = 0; | uint64_t step_id_addr = 0; | ||||
@@ -133,7 +128,7 @@ Status KernelExTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davin | |||||
return RT_ERROR_TO_GE_STATUS(rt_ret);) | return RT_ERROR_TO_GE_STATUS(rt_ret);) | ||||
GELOGI("KernelExTaskInfo knonw node Init Success."); | GELOGI("KernelExTaskInfo knonw node Init Success."); | ||||
return SUCCESS; | |||||
return SetIoAddrs(op_desc); | |||||
} | } | ||||
// 3. Set workspaceaddr, inputOutputDataAddr | // 3. Set workspaceaddr, inputOutputDataAddr | ||||
@@ -197,7 +192,7 @@ Status KernelExTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davin | |||||
davinci_model_->SetZeroCopyAddr(op_desc, io_addrs, io_addrs.data(), input_output_addr_, addrs_size, 0); | davinci_model_->SetZeroCopyAddr(op_desc, io_addrs, io_addrs.data(), input_output_addr_, addrs_size, 0); | ||||
GELOGI("KernelExTaskInfo Init Success. session id: %lu", session_id); | GELOGI("KernelExTaskInfo Init Success. session id: %lu", session_id); | ||||
return SUCCESS; | |||||
return SetIoAddrs(op_desc); | |||||
} | } | ||||
Status KernelExTaskInfo::CalculateArgs(const domi::TaskDef &task_def, DavinciModel *davinci_model) { | Status KernelExTaskInfo::CalculateArgs(const domi::TaskDef &task_def, DavinciModel *davinci_model) { | ||||
@@ -236,36 +231,40 @@ Status KernelExTaskInfo::CalculateArgs(const domi::TaskDef &task_def, DavinciMod | |||||
return SUCCESS; | return SUCCESS; | ||||
} | } | ||||
Status KernelExTaskInfo::UpdateArgs() { | |||||
GELOGI("KernelExTaskInfo::UpdateArgs in."); | |||||
Status KernelExTaskInfo::SetIoAddrs(const OpDescPtr &op_desc) { | |||||
const RuntimeParam &rts_param = davinci_model_->GetRuntimeParam(); | const RuntimeParam &rts_param = davinci_model_->GetRuntimeParam(); | ||||
vector<void *> input_data_addrs = ModelUtils::GetInputDataAddrs(rts_param, op_desc_); | |||||
vector<void *> output_data_addrs = ModelUtils::GetOutputDataAddrs(rts_param, op_desc_); | |||||
vector<void *> io_addrs; | |||||
if (!op_desc_->HasAttr(ATTR_DYNAMIC_SHAPE_FIXED_ADDR)) { | |||||
io_addrs.insert(io_addrs.end(), input_data_addrs.begin(), input_data_addrs.end()); | |||||
io_addrs.insert(io_addrs.end(), output_data_addrs.begin(), output_data_addrs.end()); | |||||
vector<void *> input_data_addrs = ModelUtils::GetInputDataAddrs(rts_param, op_desc); | |||||
vector<void *> output_data_addrs = ModelUtils::GetOutputDataAddrs(rts_param, op_desc); | |||||
if (!op_desc->HasAttr(ATTR_DYNAMIC_SHAPE_FIXED_ADDR)) { | |||||
io_addrs_.insert(io_addrs_.end(), input_data_addrs.begin(), input_data_addrs.end()); | |||||
io_addrs_.insert(io_addrs_.end(), output_data_addrs.begin(), output_data_addrs.end()); | |||||
} else { | } else { | ||||
string peer_input_name; | string peer_input_name; | ||||
if (AttrUtils::GetStr(op_desc_, ATTR_DYNAMIC_SHAPE_FIXED_ADDR, peer_input_name)) { | |||||
if (AttrUtils::GetStr(op_desc, ATTR_DYNAMIC_SHAPE_FIXED_ADDR, peer_input_name)) { | |||||
uint32_t output_index = davinci_model_->GetFixedAddrOutputIndex(peer_input_name); | uint32_t output_index = davinci_model_->GetFixedAddrOutputIndex(peer_input_name); | ||||
if (output_index > output_data_addrs.size()) { | if (output_index > output_data_addrs.size()) { | ||||
GELOGE(FAILED, "The output data addr size[%zu] and output index[%u] are inconsistent.", | GELOGE(FAILED, "The output data addr size[%zu] and output index[%u] are inconsistent.", | ||||
output_data_addrs.size(), output_index); | output_data_addrs.size(), output_index); | ||||
return FAILED; | return FAILED; | ||||
} | } | ||||
io_addrs.insert(io_addrs.end(), input_data_addrs.begin(), input_data_addrs.end()); | |||||
io_addrs_.insert(io_addrs_.end(), input_data_addrs.begin(), input_data_addrs.end()); | |||||
for (size_t i = 0; i < output_data_addrs.size(); ++i) { | for (size_t i = 0; i < output_data_addrs.size(); ++i) { | ||||
if (i == output_index) { | if (i == output_index) { | ||||
void *fixed_addr = davinci_model_->GetCurrentFixedAddr(fixed_addr_offset_); | void *fixed_addr = davinci_model_->GetCurrentFixedAddr(fixed_addr_offset_); | ||||
io_addrs.emplace_back(fixed_addr); | |||||
io_addrs_.emplace_back(fixed_addr); | |||||
continue; | continue; | ||||
} | } | ||||
io_addrs.emplace_back(output_data_addrs[i]); | |||||
io_addrs_.emplace_back(output_data_addrs[i]); | |||||
} | } | ||||
} | } | ||||
} | } | ||||
davinci_model_->SetTotalIOAddrs(io_addrs); | |||||
return SUCCESS; | |||||
} | |||||
Status KernelExTaskInfo::UpdateArgs() { | |||||
GELOGI("KernelExTaskInfo::UpdateArgs in."); | |||||
davinci_model_->SetTotalIOAddrs(io_addrs_); | |||||
GELOGI("KernelExTaskInfo::UpdateArgs success."); | GELOGI("KernelExTaskInfo::UpdateArgs success."); | ||||
return SUCCESS; | return SUCCESS; | ||||
} | } | ||||
@@ -59,6 +59,7 @@ class KernelExTaskInfo : public TaskInfo { | |||||
}; | }; | ||||
private: | private: | ||||
Status CopyTaskInfo(const domi::KernelExDef &kernel_def, const RuntimeParam &rts_param, const OpDescPtr &op_desc); | Status CopyTaskInfo(const domi::KernelExDef &kernel_def, const RuntimeParam &rts_param, const OpDescPtr &op_desc); | ||||
Status SetIoAddrs(const OpDescPtr &op_desc); | |||||
uint32_t task_id_; | uint32_t task_id_; | ||||
uint32_t stream_id_; | uint32_t stream_id_; | ||||
@@ -69,7 +70,7 @@ class KernelExTaskInfo : public TaskInfo { | |||||
void *input_output_addr_; | void *input_output_addr_; | ||||
void *ext_info_addr_; | void *ext_info_addr_; | ||||
void *dump_args_; | void *dump_args_; | ||||
OpDescPtr op_desc_ = nullptr; | |||||
vector<void *> io_addrs_; | |||||
uint32_t args_offset_ = 0; | uint32_t args_offset_ = 0; | ||||
int64_t fixed_addr_offset_ = 0; | int64_t fixed_addr_offset_ = 0; | ||||
}; | }; | ||||
@@ -53,9 +53,6 @@ const int kArgsAttrHandle = 4; | |||||
} // namespace | } // namespace | ||||
namespace ge { | namespace ge { | ||||
KernelTaskInfo::SuperKernelTaskInfo KernelTaskInfo::skt_info_ = { | |||||
0, 0, 0, 0, nullptr, nullptr, {}, {}, {}, {}, {}, RT_KERNEL_DEFAULT, kInvalidGroupKey, 0, nullptr}; | |||||
Status KernelTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davinci_model) { | Status KernelTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davinci_model) { | ||||
GE_CHECK_NOTNULL(davinci_model); | GE_CHECK_NOTNULL(davinci_model); | ||||
davinci_model_ = davinci_model; | davinci_model_ = davinci_model; | ||||
@@ -137,6 +134,7 @@ Status KernelTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davinci | |||||
ret = InitCceTask(kernel_def); | ret = InitCceTask(kernel_def); | ||||
} | } | ||||
SetIoAddrs(op_desc_); | |||||
GELOGD("KernelTaskInfo init finish, result=%u.", ret); | GELOGD("KernelTaskInfo init finish, result=%u.", ret); | ||||
return ret; | return ret; | ||||
} | } | ||||
@@ -148,9 +146,10 @@ Status KernelTaskInfo::SaveSKTDumpInfo() { | |||||
return SUCCESS; | return SUCCESS; | ||||
} | } | ||||
// all op in super kernel share one taskid and streamid | // all op in super kernel share one taskid and streamid | ||||
for (size_t i = 0; i < skt_info_.op_desc_list.size(); i++) { | |||||
davinci_model_->SaveDumpTask(skt_info_.last_task_id, skt_info_.last_stream_id, skt_info_.op_desc_list[i], | |||||
skt_info_.dump_args_list[i]); | |||||
const SuperKernelTaskInfo &skt_info = davinci_model_->GetSuperKernelTaskInfo(); | |||||
for (size_t i = 0; i < skt_info.op_desc_list.size(); i++) { | |||||
davinci_model_->SaveDumpTask(skt_info.last_task_id, skt_info.last_stream_id, skt_info.op_desc_list[i], | |||||
skt_info.dump_args_list[i]); | |||||
} | } | ||||
return SUCCESS; | return SUCCESS; | ||||
} | } | ||||
@@ -164,9 +163,10 @@ void KernelTaskInfo::UpdateSKTTaskId() { | |||||
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret); | GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret); | ||||
return; | return; | ||||
} | } | ||||
skt_info_.last_task_id = task_id; | |||||
skt_info_.last_stream_id = stream_id; | |||||
skt_id_ = skt_info_.last_task_id; | |||||
SuperKernelTaskInfo &skt_info = davinci_model_->GetSuperKernelTaskInfo(); | |||||
skt_info.last_task_id = task_id; | |||||
skt_info.last_stream_id = stream_id; | |||||
skt_id_ = skt_info.last_task_id; | |||||
GELOGI("UpdateTaskId:UpdateSKTTaskId [%u],stream id [%u]", task_id, stream_id); | GELOGI("UpdateTaskId:UpdateSKTTaskId [%u],stream id [%u]", task_id, stream_id); | ||||
} | } | ||||
@@ -191,23 +191,25 @@ Status KernelTaskInfo::SKTFinalize() { | |||||
UpdateSKTTaskId(); | UpdateSKTTaskId(); | ||||
GE_CHK_STATUS_RET(SaveSKTDumpInfo(), "skt save dump info failed"); | GE_CHK_STATUS_RET(SaveSKTDumpInfo(), "skt save dump info failed"); | ||||
GELOGI("SuperKernel Distribute [skt_id:%u]", skt_id_); | GELOGI("SuperKernel Distribute [skt_id:%u]", skt_id_); | ||||
skt_info_.kernel_list.clear(); | |||||
skt_info_.arg_list.clear(); | |||||
skt_info_.dump_flag_list.clear(); | |||||
skt_info_.op_desc_list.clear(); | |||||
skt_info_.dump_args_list.clear(); | |||||
skt_info_.last_stream = nullptr; | |||||
skt_info_.last_block_dim = 0; | |||||
skt_info_.last_sm_desc = sm_desc_; | |||||
skt_info_.last_group_key = kInvalidGroupKey; | |||||
skt_info_.last_dump_flag = RT_KERNEL_DEFAULT; | |||||
skt_info_.last_dump_args = 0; | |||||
skt_info_.last_op = nullptr; | |||||
SuperKernelTaskInfo &skt_info = davinci_model_->GetSuperKernelTaskInfo(); | |||||
skt_info.kernel_list.clear(); | |||||
skt_info.arg_list.clear(); | |||||
skt_info.dump_flag_list.clear(); | |||||
skt_info.op_desc_list.clear(); | |||||
skt_info.dump_args_list.clear(); | |||||
skt_info.last_stream = nullptr; | |||||
skt_info.last_block_dim = 0; | |||||
skt_info.last_sm_desc = sm_desc_; | |||||
skt_info.last_group_key = kInvalidGroupKey; | |||||
skt_info.last_dump_flag = RT_KERNEL_DEFAULT; | |||||
skt_info.last_dump_args = 0; | |||||
skt_info.last_op = nullptr; | |||||
return SUCCESS; | return SUCCESS; | ||||
} | } | ||||
uint32_t KernelTaskInfo::GetDumpFlag() { | uint32_t KernelTaskInfo::GetDumpFlag() { | ||||
for (auto flag : skt_info_.dump_flag_list) { | |||||
const SuperKernelTaskInfo &skt_info = davinci_model_->GetSuperKernelTaskInfo(); | |||||
for (auto flag : skt_info.dump_flag_list) { | |||||
if (flag == RT_KERNEL_DUMPFLAG) { | if (flag == RT_KERNEL_DUMPFLAG) { | ||||
return RT_KERNEL_DUMPFLAG; | return RT_KERNEL_DUMPFLAG; | ||||
} | } | ||||
@@ -216,19 +218,20 @@ uint32_t KernelTaskInfo::GetDumpFlag() { | |||||
} | } | ||||
Status KernelTaskInfo::SuperKernelLaunch() { | Status KernelTaskInfo::SuperKernelLaunch() { | ||||
if (skt_info_.kernel_list.empty()) { | |||||
SuperKernelTaskInfo &skt_info = davinci_model_->GetSuperKernelTaskInfo(); | |||||
if (skt_info.kernel_list.empty()) { | |||||
GELOGI("SuperKernelLaunch: Skt_kernel_list has no task, just return"); | GELOGI("SuperKernelLaunch: Skt_kernel_list has no task, just return"); | ||||
return SUCCESS; | return SUCCESS; | ||||
} | } | ||||
rtError_t rt_ret; | rtError_t rt_ret; | ||||
auto &skt_kernel_list = skt_info_.kernel_list; | |||||
auto &skt_arg_list = skt_info_.arg_list; | |||||
auto &skt_kernel_list = skt_info.kernel_list; | |||||
auto &skt_arg_list = skt_info.arg_list; | |||||
GELOGI("SuperKernelLaunch: Skt_kernel_list size[%zu] skt_arg_list[%zu]", skt_kernel_list.size(), skt_arg_list.size()); | GELOGI("SuperKernelLaunch: Skt_kernel_list size[%zu] skt_arg_list[%zu]", skt_kernel_list.size(), skt_arg_list.size()); | ||||
if (skt_kernel_list.size() == kSKTSingleSize && skt_arg_list.size() == kSKTSingleSize) { | if (skt_kernel_list.size() == kSKTSingleSize && skt_arg_list.size() == kSKTSingleSize) { | ||||
rt_ret = rtKernelLaunchWithFlag(skt_info_.kernel_list[0], static_cast<uint32_t>(skt_info_.last_block_dim), | |||||
skt_info_.arg_list[0], skt_info_.last_args_size, | |||||
static_cast<rtSmDesc_t *>(skt_info_.last_sm_desc), skt_info_.last_stream, | |||||
skt_info_.last_dump_flag); | |||||
rt_ret = rtKernelLaunchWithFlag(skt_info.kernel_list[0], static_cast<uint32_t>(skt_info.last_block_dim), | |||||
skt_info.arg_list[0], skt_info.last_args_size, | |||||
static_cast<rtSmDesc_t *>(skt_info.last_sm_desc), skt_info.last_stream, | |||||
skt_info.last_dump_flag); | |||||
if (rt_ret != RT_ERROR_NONE) { | if (rt_ret != RT_ERROR_NONE) { | ||||
GELOGE(RT_FAILED, "SuperKernelLaunch: Call rt api failed, ret: 0x%X", rt_ret); | GELOGE(RT_FAILED, "SuperKernelLaunch: Call rt api failed, ret: 0x%X", rt_ret); | ||||
return RT_ERROR_TO_GE_STATUS(rt_ret); | return RT_ERROR_TO_GE_STATUS(rt_ret); | ||||
@@ -247,14 +250,14 @@ Status KernelTaskInfo::SuperKernelLaunch() { | |||||
} | } | ||||
// Call the fuse API | // Call the fuse API | ||||
std::unique_ptr<skt::SuperKernel> superKernel = nullptr; | std::unique_ptr<skt::SuperKernel> superKernel = nullptr; | ||||
ge_ret = factory->FuseKernels(skt_kernel_list, skt_arg_list, skt_info_.last_block_dim, superKernel); | |||||
ge_ret = factory->FuseKernels(skt_kernel_list, skt_arg_list, skt_info.last_block_dim, superKernel); | |||||
if (ge_ret != SUCCESS) { | if (ge_ret != SUCCESS) { | ||||
GELOGE(ge_ret, "SuperKernelLaunch: fuse call failed"); | GELOGE(ge_ret, "SuperKernelLaunch: fuse call failed"); | ||||
return ge_ret; | return ge_ret; | ||||
} | } | ||||
// Launch a super kernel | // Launch a super kernel | ||||
skt_dump_flag_ = GetDumpFlag(); | skt_dump_flag_ = GetDumpFlag(); | ||||
ge_ret = superKernel->Launch(skt_info_.last_stream, skt_dump_flag_); | |||||
ge_ret = superKernel->Launch(skt_info.last_stream, skt_dump_flag_); | |||||
if (ge_ret != SUCCESS) { | if (ge_ret != SUCCESS) { | ||||
GELOGE(ge_ret, "SuperKernelLaunch: launch failed"); | GELOGE(ge_ret, "SuperKernelLaunch: launch failed"); | ||||
return ge_ret; | return ge_ret; | ||||
@@ -269,23 +272,26 @@ Status KernelTaskInfo::SuperKernelLaunch() { | |||||
} | } | ||||
Status KernelTaskInfo::SaveSuperKernelInfo() { | Status KernelTaskInfo::SaveSuperKernelInfo() { | ||||
skt_info_.kernel_list.push_back(stub_func_); | |||||
skt_info_.arg_list.push_back(args_); | |||||
skt_info_.last_stream = stream_; | |||||
skt_info_.last_block_dim = block_dim_; | |||||
skt_info_.last_args_size = args_size_; | |||||
skt_info_.last_sm_desc = sm_desc_; | |||||
skt_info_.last_dump_flag = dump_flag_; | |||||
skt_info_.dump_flag_list.push_back(dump_flag_); | |||||
skt_info_.op_desc_list.push_back(op_desc_); | |||||
skt_info_.dump_args_list.push_back(reinterpret_cast<uintptr_t>(skt_dump_args_)); | |||||
skt_info_.last_group_key = group_key_; | |||||
skt_info_.last_dump_args = reinterpret_cast<uintptr_t>(skt_dump_args_); | |||||
skt_info_.last_op = op_desc_; | |||||
SuperKernelTaskInfo &skt_info = davinci_model_->GetSuperKernelTaskInfo(); | |||||
skt_info.kernel_list.push_back(stub_func_); | |||||
skt_info.arg_list.push_back(args_); | |||||
skt_info.last_stream = stream_; | |||||
skt_info.last_block_dim = block_dim_; | |||||
skt_info.last_args_size = args_size_; | |||||
skt_info.last_sm_desc = sm_desc_; | |||||
skt_info.last_dump_flag = dump_flag_; | |||||
skt_info.dump_flag_list.push_back(dump_flag_); | |||||
skt_info.op_desc_list.push_back(op_desc_); | |||||
skt_info.dump_args_list.push_back(reinterpret_cast<uintptr_t>(skt_dump_args_)); | |||||
skt_info.last_group_key = group_key_; | |||||
skt_info.last_dump_args = reinterpret_cast<uintptr_t>(skt_dump_args_); | |||||
skt_info.last_op = op_desc_; | |||||
// last node in a stream, just launch | // last node in a stream, just launch | ||||
if (IsMarkedLastNode()) { | if (IsMarkedLastNode()) { | ||||
return SuperKernelLaunch(); | return SuperKernelLaunch(); | ||||
} | } | ||||
GELOGI("Save Current task [block_dim:%u, size:%zu].", block_dim_, skt_info.kernel_list.size()); | |||||
return SUCCESS; | return SUCCESS; | ||||
} | } | ||||
@@ -322,8 +328,9 @@ bool KernelTaskInfo::IsMarkedFirstNode() { | |||||
// then may be saved to skt task list; else | // then may be saved to skt task list; else | ||||
// call skt launch those saved tasks before | // call skt launch those saved tasks before | ||||
bool KernelTaskInfo::FirstCallSKTLaunchCheck() { | bool KernelTaskInfo::FirstCallSKTLaunchCheck() { | ||||
return ((block_dim_ != skt_info_.last_block_dim) || (stream_ != skt_info_.last_stream) || | |||||
(has_group_key_ && (group_key_ != skt_info_.last_group_key))); | |||||
const SuperKernelTaskInfo &skt_info = davinci_model_->GetSuperKernelTaskInfo(); | |||||
return ((block_dim_ != skt_info.last_block_dim) || (stream_ != skt_info.last_stream) || | |||||
(has_group_key_ && (group_key_ != skt_info.last_group_key))); | |||||
} | } | ||||
// current task has group_id or has n ATTR_N_BATCH_SPLIT then save it to skt task list; else | // current task has group_id or has n ATTR_N_BATCH_SPLIT then save it to skt task list; else | ||||
@@ -362,7 +369,6 @@ Status KernelTaskInfo::SuperKernelDistribute() { | |||||
GELOGE(ret, "Call SuperKernelLaunch failed!"); | GELOGE(ret, "Call SuperKernelLaunch failed!"); | ||||
return ret; | return ret; | ||||
} | } | ||||
GELOGI("Save Current task [block_dim:%u, size:%zu].", block_dim_, skt_info_.kernel_list.size()); | |||||
} | } | ||||
return SUCCESS; | return SUCCESS; | ||||
} | } | ||||
@@ -391,10 +397,11 @@ Status KernelTaskInfo::Distribute() { | |||||
call_save_dump_ = true; | call_save_dump_ = true; | ||||
} else { | } else { | ||||
/* default: not skt launch */ | /* default: not skt launch */ | ||||
const SuperKernelTaskInfo &skt_info = davinci_model_->GetSuperKernelTaskInfo(); | |||||
GELOGD( | GELOGD( | ||||
"KernelTaskInfo Distribute Start, sktenable:%d taskid:%u sktid:%u last_sktid:%u stubfunc_name:%s " | "KernelTaskInfo Distribute Start, sktenable:%d taskid:%u sktid:%u last_sktid:%u stubfunc_name:%s " | ||||
"stubfunc:%p blockdim:%u stream:%p", | "stubfunc:%p blockdim:%u stream:%p", | ||||
call_skt, task_id_, skt_id_, skt_info_.last_task_id, stub_func_name_.c_str(), stub_func_, block_dim_, stream_); | |||||
call_skt, task_id_, skt_id_, skt_info.last_task_id, stub_func_name_.c_str(), stub_func_, block_dim_, stream_); | |||||
// l1 fusion enable and env flag open (kCloseSkt for skt debug) | // l1 fusion enable and env flag open (kCloseSkt for skt debug) | ||||
bool open_dump = false; | bool open_dump = false; | ||||
auto all_dump_model = davinci_model_->GetDumpProperties().GetAllDumpModel(); | auto all_dump_model = davinci_model_->GetDumpProperties().GetAllDumpModel(); | ||||
@@ -422,23 +429,30 @@ Status KernelTaskInfo::Distribute() { | |||||
"KernelTaskInfo Distribute Success. sktenable:%d taskid:%d sktid:%d stubfunc_name:%s stubfunc:%p " | "KernelTaskInfo Distribute Success. sktenable:%d taskid:%d sktid:%d stubfunc_name:%s stubfunc:%p " | ||||
"blockdim:%d stream:%p", | "blockdim:%d stream:%p", | ||||
call_skt, task_id_, skt_id_, stub_func_name_.c_str(), stub_func_, block_dim_, stream_); | call_skt, task_id_, skt_id_, stub_func_name_.c_str(), stub_func_, block_dim_, stream_); | ||||
op_desc_.reset(); // Not hold OpDesc after distribute. | |||||
return SUCCESS; | return SUCCESS; | ||||
} | } | ||||
void KernelTaskInfo::SetIoAddrs(const OpDescPtr &op_desc) { | |||||
const RuntimeParam &rts_param = davinci_model_->GetRuntimeParam(); | |||||
vector<void *> input_data_addrs = ModelUtils::GetInputDataAddrs(rts_param, op_desc); | |||||
vector<void *> output_data_addrs = ModelUtils::GetOutputDataAddrs(rts_param, op_desc); | |||||
io_addrs_.insert(io_addrs_.end(), input_data_addrs.begin(), input_data_addrs.end()); | |||||
io_addrs_.insert(io_addrs_.end(), output_data_addrs.begin(), output_data_addrs.end()); | |||||
if (kernel_type_ == ccKernelType::TE) { | |||||
vector<void *> workspace_data_addrs = ModelUtils::GetWorkspaceDataAddrs(rts_param, op_desc); | |||||
io_addrs_.insert(io_addrs_.end(), workspace_data_addrs.begin(), workspace_data_addrs.end()); | |||||
} | |||||
} | |||||
Status KernelTaskInfo::UpdateArgs() { | Status KernelTaskInfo::UpdateArgs() { | ||||
GELOGI("KernelTaskInfo::UpdateArgs in."); | GELOGI("KernelTaskInfo::UpdateArgs in."); | ||||
const RuntimeParam &rts_param = davinci_model_->GetRuntimeParam(); | |||||
vector<void *> input_data_addrs = ModelUtils::GetInputDataAddrs(rts_param, op_desc_); | |||||
vector<void *> output_data_addrs = ModelUtils::GetOutputDataAddrs(rts_param, op_desc_); | |||||
vector<void *> io_addrs; | |||||
io_addrs.insert(io_addrs.end(), input_data_addrs.begin(), input_data_addrs.end()); | |||||
io_addrs.insert(io_addrs.end(), output_data_addrs.begin(), output_data_addrs.end()); | |||||
if (kernel_type_ == ccKernelType::TE) { | if (kernel_type_ == ccKernelType::TE) { | ||||
vector<void *> workspace_data_addrs = ModelUtils::GetWorkspaceDataAddrs(rts_param, op_desc_); | |||||
io_addrs.insert(io_addrs.end(), workspace_data_addrs.begin(), workspace_data_addrs.end()); | |||||
davinci_model_->SetTotalIOAddrs(io_addrs); | |||||
davinci_model_->SetTotalIOAddrs(io_addrs_); | |||||
} else if (kernel_type_ == ccKernelType::AI_CPU || kernel_type_ == ccKernelType::CUST_AI_CPU) { | } else if (kernel_type_ == ccKernelType::AI_CPU || kernel_type_ == ccKernelType::CUST_AI_CPU) { | ||||
vector<void *> io_addrs = io_addrs_; | |||||
davinci_model_->UpdateKnownZeroCopyAddr(io_addrs); | davinci_model_->UpdateKnownZeroCopyAddr(io_addrs); | ||||
uintptr_t io_addr = reinterpret_cast<uintptr_t>(args_addr.get()) + sizeof(aicpu::AicpuParamHead); | uintptr_t io_addr = reinterpret_cast<uintptr_t>(args_addr.get()) + sizeof(aicpu::AicpuParamHead); | ||||
auto addrs_size = sizeof(uint64_t) * io_addrs.size(); | auto addrs_size = sizeof(uint64_t) * io_addrs.size(); | ||||
@@ -789,7 +803,6 @@ Status KernelTaskInfo::InitCceTask(const domi::KernelDef &kernel_def) { | |||||
GELOGE(FAILED, "flowtable is null."); | GELOGE(FAILED, "flowtable is null."); | ||||
return FAILED; | return FAILED; | ||||
} | } | ||||
flowtable_size_ = flowtable.size(); | |||||
} | } | ||||
// get smDesc stored in model | // get smDesc stored in model | ||||
@@ -854,14 +867,14 @@ Status KernelTaskInfo::InitAicpuTask(uint32_t op_index, const domi::KernelDef &k | |||||
GELOGI("Do InitAicpuTask"); | GELOGI("Do InitAicpuTask"); | ||||
so_name_ = kernel_def.so_name(); | so_name_ = kernel_def.so_name(); | ||||
kernel_name_ = kernel_def.kernel_name(); | kernel_name_ = kernel_def.kernel_name(); | ||||
GELOGI("node[%s] test so name %s, kernel name %s", op_desc_->GetName().c_str(), so_name_.c_str(), | |||||
kernel_name_.c_str()); | |||||
OpDescPtr op_desc = davinci_model_->GetOpByIndex(op_index); | OpDescPtr op_desc = davinci_model_->GetOpByIndex(op_index); | ||||
if (op_desc == nullptr) { | if (op_desc == nullptr) { | ||||
GELOGE(INTERNAL_ERROR, "index is out of range, index: %u", op_index); | GELOGE(INTERNAL_ERROR, "index is out of range, index: %u", op_index); | ||||
return INTERNAL_ERROR; | return INTERNAL_ERROR; | ||||
} | } | ||||
GELOGI("node[%s] test so name %s, kernel name %s", op_desc->GetName().c_str(), so_name_.c_str(), | |||||
kernel_name_.c_str()); | |||||
if (kernel_type_ == ccKernelType::CUST_AI_CPU) { | if (kernel_type_ == ccKernelType::CUST_AI_CPU) { | ||||
bool loaded = false; | bool loaded = false; | ||||
@@ -885,8 +898,8 @@ Status KernelTaskInfo::InitAicpuTask(uint32_t op_index, const domi::KernelDef &k | |||||
GELOGE(init_ret, "Init aicpu task ext info failed, ext_info size=%zu", ext_info.size()); | GELOGE(init_ret, "Init aicpu task ext info failed, ext_info size=%zu", ext_info.size()); | ||||
return init_ret; | return init_ret; | ||||
} | } | ||||
GELOGI("Node[%s] type[%s] kernel_ext_info size=%zu, aicpu_ext_info_addr_=%p", op_desc_->GetName().c_str(), | |||||
op_desc_->GetType().c_str(), ext_info.size(), aicpu_ext_info_addr_); | |||||
GELOGI("Node[%s] type[%s] kernel_ext_info size=%zu, aicpu_ext_info_addr_=%p", op_desc->GetName().c_str(), | |||||
op_desc->GetType().c_str(), ext_info.size(), aicpu_ext_info_addr_); | |||||
aicpu_param_head->extInfoAddr = reinterpret_cast<uintptr_t>(aicpu_ext_info_addr_); | aicpu_param_head->extInfoAddr = reinterpret_cast<uintptr_t>(aicpu_ext_info_addr_); | ||||
aicpu_param_head->extInfoLength = static_cast<uintptr_t>(ext_info.size()); | aicpu_param_head->extInfoLength = static_cast<uintptr_t>(ext_info.size()); | ||||
@@ -38,7 +38,6 @@ class KernelTaskInfo : public TaskInfo { | |||||
flowtable_(nullptr), | flowtable_(nullptr), | ||||
block_dim_(0), | block_dim_(0), | ||||
args_size_(0), | args_size_(0), | ||||
flowtable_size_(0), | |||||
task_id_(0), | task_id_(0), | ||||
stream_id_(0), | stream_id_(0), | ||||
so_name_(""), | so_name_(""), | ||||
@@ -46,7 +45,6 @@ class KernelTaskInfo : public TaskInfo { | |||||
kernel_type_(ccKernelType::CCE_AI_CORE), | kernel_type_(ccKernelType::CCE_AI_CORE), | ||||
dump_flag_(RT_KERNEL_DEFAULT), | dump_flag_(RT_KERNEL_DEFAULT), | ||||
dump_args_(nullptr), | dump_args_(nullptr), | ||||
op_desc_(nullptr), | |||||
davinci_model_(nullptr), | davinci_model_(nullptr), | ||||
skt_id_(0), | skt_id_(0), | ||||
stub_func_name_(""), | stub_func_name_(""), | ||||
@@ -128,6 +126,7 @@ class KernelTaskInfo : public TaskInfo { | |||||
Status SuperKernelDistribute(); | Status SuperKernelDistribute(); | ||||
bool IsL1FusionOp(const OpDescPtr &op_desc); | bool IsL1FusionOp(const OpDescPtr &op_desc); | ||||
void SetIoAddrs(const OpDescPtr &op_desc); | |||||
// For super kernel | // For super kernel | ||||
Status SaveSKTDumpInfo(); | Status SaveSKTDumpInfo(); | ||||
@@ -148,7 +147,6 @@ class KernelTaskInfo : public TaskInfo { | |||||
void *flowtable_; | void *flowtable_; | ||||
uint32_t block_dim_; | uint32_t block_dim_; | ||||
uint32_t args_size_; | uint32_t args_size_; | ||||
uint32_t flowtable_size_; | |||||
uint32_t task_id_; | uint32_t task_id_; | ||||
uint32_t stream_id_; | uint32_t stream_id_; | ||||
std::string so_name_; | std::string so_name_; | ||||
@@ -156,7 +154,8 @@ class KernelTaskInfo : public TaskInfo { | |||||
ccKernelType kernel_type_; | ccKernelType kernel_type_; | ||||
uint32_t dump_flag_; | uint32_t dump_flag_; | ||||
void *dump_args_; | void *dump_args_; | ||||
OpDescPtr op_desc_; | |||||
OpDescPtr op_desc_; // Clear after distribute. | |||||
vector<void *> io_addrs_; | |||||
DavinciModel *davinci_model_; | DavinciModel *davinci_model_; | ||||
uint32_t args_offset_ = 0; | uint32_t args_offset_ = 0; | ||||
uint32_t hybrid_args_offset_ = 0; | uint32_t hybrid_args_offset_ = 0; | ||||
@@ -186,25 +185,6 @@ class KernelTaskInfo : public TaskInfo { | |||||
void *output_addrs = nullptr; | void *output_addrs = nullptr; | ||||
void *attr_handle = nullptr; | void *attr_handle = nullptr; | ||||
} custom_info_; | } custom_info_; | ||||
// For super kernel | |||||
static struct SuperKernelTaskInfo { | |||||
uint32_t last_block_dim; | |||||
uint32_t last_args_size; | |||||
uint32_t last_task_id; | |||||
uint32_t last_stream_id; | |||||
void *last_stream; | |||||
void *last_sm_desc; | |||||
std::vector<void *> kernel_list; | |||||
std::vector<void *> arg_list; | |||||
std::vector<uint32_t> dump_flag_list; | |||||
std::vector<OpDescPtr> op_desc_list; | |||||
std::vector<uintptr_t> dump_args_list; | |||||
uint32_t last_dump_flag; | |||||
int64_t last_group_key; | |||||
uintptr_t last_dump_args; | |||||
OpDescPtr last_op; | |||||
} skt_info_; | |||||
}; | }; | ||||
} // namespace ge | } // namespace ge | ||||
#endif // GE_GRAPH_LOAD_NEW_MODEL_MANAGER_TASK_INFO_KERNEL_TASK_INFO_H_ | #endif // GE_GRAPH_LOAD_NEW_MODEL_MANAGER_TASK_INFO_KERNEL_TASK_INFO_H_ |
@@ -30,14 +30,13 @@ Status MemcpyAsyncTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *da | |||||
return ret; | return ret; | ||||
} | } | ||||
memcpy_async_ = task_def.memcpy_async(); | |||||
count_ = memcpy_async_.count(); | |||||
kind_ = memcpy_async_.kind(); | |||||
dst_max_ = memcpy_async_.dst_max(); | |||||
OpDescPtr op_desc = davinci_model_->GetOpByIndex(memcpy_async_.op_index()); | |||||
op_desc_ = op_desc; | |||||
const domi::MemcpyAsyncDef &memcpy_async = task_def.memcpy_async(); | |||||
count_ = memcpy_async.count(); | |||||
kind_ = memcpy_async.kind(); | |||||
dst_max_ = memcpy_async.dst_max(); | |||||
OpDescPtr op_desc = davinci_model_->GetOpByIndex(memcpy_async.op_index()); | |||||
if (op_desc == nullptr) { | if (op_desc == nullptr) { | ||||
GELOGE(INTERNAL_ERROR, "Task op index:%u out of range", memcpy_async_.op_index()); | |||||
GELOGE(INTERNAL_ERROR, "Task op index:%u out of range", memcpy_async.op_index()); | |||||
return INTERNAL_ERROR; | return INTERNAL_ERROR; | ||||
} | } | ||||
@@ -52,7 +51,7 @@ Status MemcpyAsyncTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *da | |||||
} | } | ||||
const RuntimeParam &rts_param = davinci_model_->GetRuntimeParam(); | const RuntimeParam &rts_param = davinci_model_->GetRuntimeParam(); | ||||
ret = ModelUtils::GetRtAddress(rts_param, memcpy_async_.src(), src_); | |||||
ret = ModelUtils::GetRtAddress(rts_param, memcpy_async.src(), src_); | |||||
if (ret != SUCCESS) { | if (ret != SUCCESS) { | ||||
return ret; | return ret; | ||||
} | } | ||||
@@ -61,23 +60,32 @@ Status MemcpyAsyncTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *da | |||||
vector<int64_t> memory_type_list; | vector<int64_t> memory_type_list; | ||||
(void)AttrUtils::GetListInt(op_desc, ATTR_NAME_OUTPUT_MEM_TYPE_LIST, memory_type_list); | (void)AttrUtils::GetListInt(op_desc, ATTR_NAME_OUTPUT_MEM_TYPE_LIST, memory_type_list); | ||||
if (!memory_type_list.empty() && memory_type_list[0] == RT_MEMORY_TS_4G) { // TS Feature, Just one. | if (!memory_type_list.empty() && memory_type_list[0] == RT_MEMORY_TS_4G) { // TS Feature, Just one. | ||||
uint64_t mem_offset = memcpy_async_.dst() - rts_param.logic_mem_base; | |||||
dst_ = static_cast<uint8_t *>(rts_param.ts_mem_mall->Acquire(mem_offset, memcpy_async_.dst_max())); | |||||
uint64_t mem_offset = memcpy_async.dst() - rts_param.logic_mem_base; | |||||
dst_ = static_cast<uint8_t *>(rts_param.ts_mem_mall->Acquire(mem_offset, memcpy_async.dst_max())); | |||||
if (dst_ == nullptr) { | if (dst_ == nullptr) { | ||||
return FAILED; | return FAILED; | ||||
} | } | ||||
} else { | } else { | ||||
ret = ModelUtils::GetRtAddress(rts_param, memcpy_async_.dst(), dst_); | |||||
ret = ModelUtils::GetRtAddress(rts_param, memcpy_async.dst(), dst_); | |||||
if (ret != SUCCESS) { | if (ret != SUCCESS) { | ||||
return ret; | return ret; | ||||
} | } | ||||
} | } | ||||
GELOGI("MemcpyAsyncTaskInfo Init Success, logic[0x%lx, 0x%lx], src:%p, dst:%p, max:%lu, count:%lu", | GELOGI("MemcpyAsyncTaskInfo Init Success, logic[0x%lx, 0x%lx], src:%p, dst:%p, max:%lu, count:%lu", | ||||
memcpy_async_.src(), memcpy_async_.dst(), src_, dst_, dst_max_, count_); | |||||
memcpy_async.src(), memcpy_async.dst(), src_, dst_, dst_max_, count_); | |||||
davinci_model_->DisableZeroCopy(src_); | davinci_model_->DisableZeroCopy(src_); | ||||
davinci_model_->DisableZeroCopy(dst_); | davinci_model_->DisableZeroCopy(dst_); | ||||
io_addrs_.emplace_back(reinterpret_cast<void *>(src_)); | |||||
if (op_desc->HasAttr(ATTR_DYNAMIC_SHAPE_FIXED_ADDR)) { | |||||
void *fixed_addr = davinci_model_->GetCurrentFixedAddr(fixed_addr_offset_); | |||||
io_addrs_.emplace_back(fixed_addr); | |||||
} else { | |||||
io_addrs_.emplace_back(reinterpret_cast<void *>(dst_)); | |||||
} | |||||
return SUCCESS; | return SUCCESS; | ||||
} | } | ||||
@@ -118,25 +126,7 @@ Status MemcpyAsyncTaskInfo::CalculateArgs(const domi::TaskDef &task_def, Davinci | |||||
Status MemcpyAsyncTaskInfo::UpdateArgs() { | Status MemcpyAsyncTaskInfo::UpdateArgs() { | ||||
GELOGI("MemcpyAsyncTaskInfo::UpdateArgs in."); | GELOGI("MemcpyAsyncTaskInfo::UpdateArgs in."); | ||||
GE_CHECK_NOTNULL(davinci_model_); | GE_CHECK_NOTNULL(davinci_model_); | ||||
Status ret = ModelUtils::GetRtAddress(davinci_model_->GetRuntimeParam(), memcpy_async_.src(), src_); | |||||
if (ret != SUCCESS) { | |||||
return ret; | |||||
} | |||||
ret = ModelUtils::GetRtAddress(davinci_model_->GetRuntimeParam(), memcpy_async_.dst(), dst_); | |||||
if (ret != SUCCESS) { | |||||
return ret; | |||||
} | |||||
vector<void *> io_addrs; | |||||
io_addrs.emplace_back(reinterpret_cast<void *>(src_)); | |||||
if (op_desc_->HasAttr(ATTR_DYNAMIC_SHAPE_FIXED_ADDR)) { | |||||
void *fixed_addr = davinci_model_->GetCurrentFixedAddr(fixed_addr_offset_); | |||||
io_addrs.emplace_back(fixed_addr); | |||||
} else { | |||||
io_addrs.emplace_back(reinterpret_cast<void *>(dst_)); | |||||
} | |||||
davinci_model_->SetTotalIOAddrs(io_addrs); | |||||
davinci_model_->SetTotalIOAddrs(io_addrs_); | |||||
GELOGI("MemcpyAsyncTaskInfo::UpdateArgs success."); | GELOGI("MemcpyAsyncTaskInfo::UpdateArgs success."); | ||||
return SUCCESS; | return SUCCESS; | ||||
@@ -44,11 +44,10 @@ class MemcpyAsyncTaskInfo : public TaskInfo { | |||||
uint8_t *src_; | uint8_t *src_; | ||||
uint64_t count_; | uint64_t count_; | ||||
uint32_t kind_; | uint32_t kind_; | ||||
OpDescPtr op_desc_; | |||||
vector<void *> io_addrs_; | |||||
int64_t fixed_addr_offset_; | int64_t fixed_addr_offset_; | ||||
DavinciModel *davinci_model_ = nullptr; | DavinciModel *davinci_model_ = nullptr; | ||||
uint32_t args_offset_ = 0; | uint32_t args_offset_ = 0; | ||||
domi::MemcpyAsyncDef memcpy_async_; | |||||
}; | }; | ||||
} // namespace ge | } // namespace ge | ||||
#endif // GE_GRAPH_LOAD_NEW_MODEL_MANAGER_TASK_INFO_MEMCPY_ASYNC_TASK_INFO_H_ | #endif // GE_GRAPH_LOAD_NEW_MODEL_MANAGER_TASK_INFO_MEMCPY_ASYNC_TASK_INFO_H_ |