@@ -77,7 +77,7 @@ struct timeInfo { | |||||
}; | }; | ||||
// For super kernel | // For super kernel | ||||
static struct SuperKernelTaskInfo { | |||||
struct SuperKernelTaskInfo { | |||||
uint32_t last_block_dim; | uint32_t last_block_dim; | ||||
uint32_t last_args_size; | uint32_t last_args_size; | ||||
uint32_t last_task_id; | uint32_t last_task_id; | ||||
@@ -117,7 +117,7 @@ enum ExecuteMode { | |||||
// comments | // comments | ||||
class DavinciModel { | class DavinciModel { | ||||
public: | |||||
public: | |||||
/// | /// | ||||
/// @ingroup ge | /// @ingroup ge | ||||
/// @brief DavinciModel constructor | /// @brief DavinciModel constructor | ||||
@@ -283,7 +283,7 @@ public: | |||||
std::vector<TaskInfoPtr> GetTaskList() { return task_list_; } | std::vector<TaskInfoPtr> GetTaskList() { return task_list_; } | ||||
// Modified from KernelTaskInfo. | // Modified from KernelTaskInfo. | ||||
SuperKernelTaskInfo &GetSupperKernelTaskInfo() { return skt_info_; } | |||||
SuperKernelTaskInfo &GetSuperKernelTaskInfo() { return skt_info_; } | |||||
/// | /// | ||||
/// @ingroup ge | /// @ingroup ge | ||||
@@ -445,7 +445,6 @@ public: | |||||
const RuntimeParam &GetRuntimeParam() { return runtime_param_; } | const RuntimeParam &GetRuntimeParam() { return runtime_param_; } | ||||
int32_t GetDataInputTid() const { return dataInputTid; } | int32_t GetDataInputTid() const { return dataInputTid; } | ||||
void SetDataInputTid(int32_t data_input_tid) { dataInputTid = data_input_tid; } | void SetDataInputTid(int32_t data_input_tid) { dataInputTid = data_input_tid; } | ||||
void DisableZeroCopy(const void *addr); | void DisableZeroCopy(const void *addr); | ||||
@@ -484,7 +483,6 @@ public: | |||||
} | } | ||||
void SetEndGraphId(uint32_t task_id, uint32_t stream_id); | void SetEndGraphId(uint32_t task_id, uint32_t stream_id); | ||||
DavinciModel &operator=(const DavinciModel &model) = delete; | DavinciModel &operator=(const DavinciModel &model) = delete; | ||||
DavinciModel(const DavinciModel &model) = delete; | DavinciModel(const DavinciModel &model) = delete; | ||||
@@ -492,46 +490,34 @@ public: | |||||
const map<int64_t, std::vector<rtStream_t>> &GetHcclFolowStream() { | const map<int64_t, std::vector<rtStream_t>> &GetHcclFolowStream() { | ||||
return main_follow_stream_mapping_; | return main_follow_stream_mapping_; | ||||
} | } | ||||
void SaveHcclFollowStream(int64_t main_stream_id, rtStream_t stream); | void SaveHcclFollowStream(int64_t main_stream_id, rtStream_t stream); | ||||
void InitRuntimeParams(); | void InitRuntimeParams(); | ||||
Status InitVariableMem(); | Status InitVariableMem(); | ||||
void UpdateMemBase(uint8_t *mem_base) { | void UpdateMemBase(uint8_t *mem_base) { | ||||
runtime_param_.mem_base = mem_base; | runtime_param_.mem_base = mem_base; | ||||
mem_base_ = mem_base; | mem_base_ = mem_base; | ||||
} | } | ||||
void SetTotalArgsSize(uint32_t args_size) { total_args_size_ += args_size; } | void SetTotalArgsSize(uint32_t args_size) { total_args_size_ += args_size; } | ||||
uint32_t GetTotalArgsSize() { return total_args_size_; } | uint32_t GetTotalArgsSize() { return total_args_size_; } | ||||
void *GetCurrentArgsAddr(uint32_t offset) { | void *GetCurrentArgsAddr(uint32_t offset) { | ||||
void *cur_args = static_cast<char *>(args_) + offset; | void *cur_args = static_cast<char *>(args_) + offset; | ||||
return cur_args; | return cur_args; | ||||
} | } | ||||
void SetTotalIOAddrs(vector<void *> &io_addrs) { | void SetTotalIOAddrs(vector<void *> &io_addrs) { | ||||
total_io_addrs_.insert(total_io_addrs_.end(), io_addrs.begin(), io_addrs.end()); | total_io_addrs_.insert(total_io_addrs_.end(), io_addrs.begin(), io_addrs.end()); | ||||
} | } | ||||
void SetHybridArgsSize(uint32_t args_size) { total_hybrid_args_size_ += args_size; } | void SetHybridArgsSize(uint32_t args_size) { total_hybrid_args_size_ += args_size; } | ||||
uint32_t GetHybridArgsSize() { | uint32_t GetHybridArgsSize() { | ||||
return total_hybrid_args_size_; | return total_hybrid_args_size_; | ||||
} | } | ||||
void *GetCurrentHybridArgsAddr(uint32_t offset) { | void *GetCurrentHybridArgsAddr(uint32_t offset) { | ||||
void *cur_args = static_cast<char *>(hybrid_addrs_) + offset; | void *cur_args = static_cast<char *>(hybrid_addrs_) + offset; | ||||
return cur_args; | return cur_args; | ||||
} | } | ||||
void SetTotalFixedAddrsSize(string tensor_name, int64_t fix_addr_size); | void SetTotalFixedAddrsSize(string tensor_name, int64_t fix_addr_size); | ||||
int64_t GetFixedAddrsSize(string tensor_name); | int64_t GetFixedAddrsSize(string tensor_name); | ||||
void *GetCurrentFixedAddr(int64_t offset) const { | void *GetCurrentFixedAddr(int64_t offset) const { | ||||
void *cur_addr = static_cast<char *>(fixed_addrs_) + offset; | void *cur_addr = static_cast<char *>(fixed_addrs_) + offset; | ||||
return cur_addr; | return cur_addr; | ||||
@@ -543,42 +529,30 @@ public: | |||||
} | } | ||||
return UINT32_MAX; | return UINT32_MAX; | ||||
} | } | ||||
void SetKnownNode(bool known_node) { known_node_ = known_node; } | void SetKnownNode(bool known_node) { known_node_ = known_node; } | ||||
bool IsKnownNode() { return known_node_; } | bool IsKnownNode() { return known_node_; } | ||||
Status MallocKnownArgs(); | Status MallocKnownArgs(); | ||||
Status UpdateKnownNodeArgs(const vector<void *> &inputs, const vector<void *> &outputs); | Status UpdateKnownNodeArgs(const vector<void *> &inputs, const vector<void *> &outputs); | ||||
Status CreateKnownZeroCopyMap(const vector<void *> &inputs, const vector<void *> &outputs); | Status CreateKnownZeroCopyMap(const vector<void *> &inputs, const vector<void *> &outputs); | ||||
Status UpdateKnownZeroCopyAddr(vector<void *> &total_io_addrs); | Status UpdateKnownZeroCopyAddr(vector<void *> &total_io_addrs); | ||||
void SetKnownNodeAddrNotChanged(bool base_addr_not_changed) { base_addr_not_changed_ = base_addr_not_changed; } | void SetKnownNodeAddrNotChanged(bool base_addr_not_changed) { base_addr_not_changed_ = base_addr_not_changed; } | ||||
Status GetOrigInputInfo(uint32_t index, OriginInputInfo &orig_input_info); | Status GetOrigInputInfo(uint32_t index, OriginInputInfo &orig_input_info); | ||||
Status GetAllAippInputOutputDims(uint32_t index, std::vector<InputOutputDims> &input_dims, | Status GetAllAippInputOutputDims(uint32_t index, std::vector<InputOutputDims> &input_dims, | ||||
std::vector<InputOutputDims> &output_dims); | std::vector<InputOutputDims> &output_dims); | ||||
void SetModelDescVersion(bool is_new_model_desc) { is_new_model_desc_ = is_new_model_desc; } | void SetModelDescVersion(bool is_new_model_desc) { is_new_model_desc_ = is_new_model_desc; } | ||||
// om file name | // om file name | ||||
void SetOmName(string om_name) { om_name_ = om_name; } | void SetOmName(string om_name) { om_name_ = om_name; } | ||||
void SetDumpProperties(const DumpProperties &dump_properties) { data_dumper_.SetDumpProperties(dump_properties); } | void SetDumpProperties(const DumpProperties &dump_properties) { data_dumper_.SetDumpProperties(dump_properties); } | ||||
const DumpProperties &GetDumpProperties() const { return data_dumper_.GetDumpProperties(); } | const DumpProperties &GetDumpProperties() const { return data_dumper_.GetDumpProperties(); } | ||||
bool GetOpDescInfo(uint32_t stream_id, uint32_t task_id, OpDescInfo &op_desc_info) const { | bool GetOpDescInfo(uint32_t stream_id, uint32_t task_id, OpDescInfo &op_desc_info) const { | ||||
return data_dumper_.GetOpDescInfo(stream_id, task_id, op_desc_info); | return data_dumper_.GetOpDescInfo(stream_id, task_id, op_desc_info); | ||||
} | } | ||||
Status InitInputOutputForDynamic(const ComputeGraphPtr &compute_graph); | Status InitInputOutputForDynamic(const ComputeGraphPtr &compute_graph); | ||||
private: | |||||
private: | |||||
// memory address of weights | // memory address of weights | ||||
uint8_t *weights_mem_base_; | uint8_t *weights_mem_base_; | ||||
uint8_t *var_mem_base_; | uint8_t *var_mem_base_; | ||||
@@ -753,7 +727,6 @@ private: | |||||
Status InitTbeHandle(const OpDescPtr &op_desc); | Status InitTbeHandle(const OpDescPtr &op_desc); | ||||
void StoreTbeHandle(const std::string &handle_key); | void StoreTbeHandle(const std::string &handle_key); | ||||
void CleanTbeHandle(); | void CleanTbeHandle(); | ||||
/// | /// | ||||
@@ -792,7 +765,6 @@ private: | |||||
/// @return: 0 for success / others for fail | /// @return: 0 for success / others for fail | ||||
/// | /// | ||||
Status BindOutputQueue(); | Status BindOutputQueue(); | ||||
Status CpuModelPrepareOutput(uintptr_t addr, uint32_t size); | Status CpuModelPrepareOutput(uintptr_t addr, uint32_t size); | ||||
/// | /// | ||||
@@ -830,9 +802,7 @@ private: | |||||
Status CpuWaitEndGraph(); | Status CpuWaitEndGraph(); | ||||
Status BindEnqueue(); | Status BindEnqueue(); | ||||
Status CpuModelEnqueue(uint32_t queue_id, uintptr_t out_mbuf); | Status CpuModelEnqueue(uint32_t queue_id, uintptr_t out_mbuf); | ||||
/// | /// | ||||
/// @ingroup ge | /// @ingroup ge | ||||
/// @brief definiteness queue schedule, repeat run model. | /// @brief definiteness queue schedule, repeat run model. | ||||
@@ -841,7 +811,6 @@ private: | |||||
Status CpuModelRepeat(); | Status CpuModelRepeat(); | ||||
Status InitEntryTask(); | Status InitEntryTask(); | ||||
Status AddHeadStream(); | Status AddHeadStream(); | ||||
/// | /// | ||||
@@ -869,7 +838,6 @@ private: | |||||
void SetDataDumperArgs(const ComputeGraphPtr &compute_graph); | void SetDataDumperArgs(const ComputeGraphPtr &compute_graph); | ||||
Status InitModelProfile(); | Status InitModelProfile(); | ||||
Status SinkModelProfile(); | Status SinkModelProfile(); | ||||
Status SinkTimeProfile(const InputData ¤t_data); | Status SinkTimeProfile(const InputData ¤t_data); | ||||
@@ -878,21 +846,14 @@ private: | |||||
std::vector<ge::OutputTensorInfo> &outputs); | std::vector<ge::OutputTensorInfo> &outputs); | ||||
void ParseAIPPInfo(std::string in_out_info, InputOutputDims &dims_info); | void ParseAIPPInfo(std::string in_out_info, InputOutputDims &dims_info); | ||||
void SetLabelForDynamic(const NodePtr &node); | void SetLabelForDynamic(const NodePtr &node); | ||||
void ParseDynamicOutShape(const std::vector<std::string> &str_info, std::vector<vector<int64_t>> &vec_info); | void ParseDynamicOutShape(const std::vector<std::string> &str_info, std::vector<vector<int64_t>> &vec_info); | ||||
bool IsGetNextSinkDynamic(const OpDescPtr &op_desc); | bool IsGetNextSinkDynamic(const OpDescPtr &op_desc); | ||||
void GetAllGearsInfo(const NodePtr &node); | void GetAllGearsInfo(const NodePtr &node); | ||||
Status GetGetDynamicDimsNodeInfo(const NodePtr &node); | Status GetGetDynamicDimsNodeInfo(const NodePtr &node); | ||||
Status GetGearAndRealOutSizeInfo(size_t input_count, const NodePtr &node); | Status GetGearAndRealOutSizeInfo(size_t input_count, const NodePtr &node); | ||||
Status GetRealOutputSizeOfMerge(size_t input_index, const NodePtr &merge_node); | Status GetRealOutputSizeOfMerge(size_t input_index, const NodePtr &merge_node); | ||||
Status GetGearAndRealOutShapeInfo(size_t input_count, const OpDescPtr &op_desc); | Status GetGearAndRealOutShapeInfo(size_t input_count, const OpDescPtr &op_desc); | ||||
bool is_weight_mem_has_inited_; | bool is_weight_mem_has_inited_; | ||||
@@ -59,40 +59,40 @@ Status HcclTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davinci_m | |||||
GELOGI("HcclTaskInfo Init, op_index is: %u", op_index); | GELOGI("HcclTaskInfo Init, op_index is: %u", op_index); | ||||
// Get HCCL op | // Get HCCL op | ||||
op_desc_ = davinci_model->GetOpByIndex(op_index); | |||||
GE_CHECK_NOTNULL(op_desc_); | |||||
const auto op_desc = davinci_model->GetOpByIndex(op_index); | |||||
GE_CHECK_NOTNULL(op_desc); | |||||
// Create the kernel hccl infos | // Create the kernel hccl infos | ||||
CreateKernelHcclInfo(op_desc_); | |||||
CreateKernelHcclInfo(op_desc); | |||||
// Initialize the hccl_type of all kernel hccl info | // Initialize the hccl_type of all kernel hccl info | ||||
HcomOmeUtil::GetHcclType(task_def, kernel_hccl_infos_); | HcomOmeUtil::GetHcclType(task_def, kernel_hccl_infos_); | ||||
// Only in Horovod scenario should get the inputName and GeShape | // Only in Horovod scenario should get the inputName and GeShape | ||||
ret = HcomOmeUtil::GetHorovodInputs(op_desc_, kernel_hccl_infos_); | |||||
ret = HcomOmeUtil::GetHorovodInputs(op_desc, kernel_hccl_infos_); | |||||
if (ret != SUCCESS) { | if (ret != SUCCESS) { | ||||
GELOGE(ret, "davinci_model: GetHorovodInputs fail! domi error: %u", ret); | GELOGE(ret, "davinci_model: GetHorovodInputs fail! domi error: %u", ret); | ||||
return ret; | return ret; | ||||
} | } | ||||
Status dmrt = HcomOmeUtil::GetHcclDataType(op_desc_, kernel_hccl_infos_); | |||||
Status dmrt = HcomOmeUtil::GetHcclDataType(op_desc, kernel_hccl_infos_); | |||||
if (dmrt != SUCCESS) { | if (dmrt != SUCCESS) { | ||||
GELOGE(dmrt, "davinci_model: GetHcomDataType fail! domi error: %u", dmrt); | GELOGE(dmrt, "davinci_model: GetHcomDataType fail! domi error: %u", dmrt); | ||||
return dmrt; | return dmrt; | ||||
} | } | ||||
dmrt = HcomOmeUtil::GetHcclCount(op_desc_, kernel_hccl_infos_); | |||||
dmrt = HcomOmeUtil::GetHcclCount(op_desc, kernel_hccl_infos_); | |||||
if (dmrt != SUCCESS) { | if (dmrt != SUCCESS) { | ||||
GELOGE(dmrt, "davinci_model: GetHcomCount fail! domi error: %u", dmrt); | GELOGE(dmrt, "davinci_model: GetHcomCount fail! domi error: %u", dmrt); | ||||
return dmrt; | return dmrt; | ||||
} | } | ||||
// Only HCOMBROADCAST and HVDCALLBACKBROADCAST need to get the rootId | // Only HCOMBROADCAST and HVDCALLBACKBROADCAST need to get the rootId | ||||
dmrt = HcomOmeUtil::GetAllRootId(op_desc_, kernel_hccl_infos_); | |||||
dmrt = HcomOmeUtil::GetAllRootId(op_desc, kernel_hccl_infos_); | |||||
if (dmrt != SUCCESS) { | if (dmrt != SUCCESS) { | ||||
GELOGE(dmrt, "davinci_model: Get rootId fail! domi error: %u", dmrt); | GELOGE(dmrt, "davinci_model: Get rootId fail! domi error: %u", dmrt); | ||||
return dmrt; | return dmrt; | ||||
} | } | ||||
// GE's new process: hccl declares the number of streams required, creates a stream by GE, and sends it to hccl | // GE's new process: hccl declares the number of streams required, creates a stream by GE, and sends it to hccl | ||||
ret = SetFollowStream(op_desc_, davinci_model); | |||||
ret = SetFollowStream(op_desc, davinci_model); | |||||
if (ret != SUCCESS) { | if (ret != SUCCESS) { | ||||
GELOGE(ret, "SetStream Fail."); | GELOGE(ret, "SetStream Fail."); | ||||
return ret; | return ret; | ||||
@@ -100,21 +100,28 @@ Status HcclTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davinci_m | |||||
if (davinci_model_->IsKnownNode()) { | if (davinci_model_->IsKnownNode()) { | ||||
args_ = davinci_model_->GetCurrentArgsAddr(args_offset_); | args_ = davinci_model_->GetCurrentArgsAddr(args_offset_); | ||||
GELOGI("Known node %s args addr %p, offset %u.", op_desc_->GetName().c_str(), args_, args_offset_); | |||||
GELOGI("Known node %s args addr %p, offset %u.", op_desc->GetName().c_str(), args_, args_offset_); | |||||
} | } | ||||
ret = SetAddrs(op_desc_, kernel_hccl_infos_); | |||||
ret = SetAddrs(op_desc, kernel_hccl_infos_); | |||||
if (ret != SUCCESS) { | if (ret != SUCCESS) { | ||||
GELOGE(ret, "Setaddrs Fail."); | GELOGE(ret, "Setaddrs Fail."); | ||||
return ret; | return ret; | ||||
} | } | ||||
// GE's new process: hccl declares the need for Workspace size, and GE allocates Workspace | // GE's new process: hccl declares the need for Workspace size, and GE allocates Workspace | ||||
ret = SetWorkspace(op_desc_, kernel_hccl_infos_); | |||||
ret = SetWorkspace(op_desc, kernel_hccl_infos_); | |||||
if (ret != SUCCESS) { | if (ret != SUCCESS) { | ||||
GELOGE(ret, "SetWorkspace Fail."); | GELOGE(ret, "SetWorkspace Fail."); | ||||
return ret; | return ret; | ||||
} | } | ||||
const RuntimeParam &rts_param = davinci_model_->GetRuntimeParam(); | |||||
const auto input_data_addrs = ModelUtils::GetInputDataAddrs(rts_param, op_desc); | |||||
const auto output_data_addrs = ModelUtils::GetOutputDataAddrs(rts_param, op_desc); | |||||
const auto workspace_data_addrs = ModelUtils::GetWorkspaceDataAddrs(rts_param, op_desc); | |||||
io_addrs_.insert(io_addrs_.end(), input_data_addrs.begin(), input_data_addrs.end()); | |||||
io_addrs_.insert(io_addrs_.end(), output_data_addrs.begin(), output_data_addrs.end()); | |||||
io_addrs_.insert(io_addrs_.end(), workspace_data_addrs.begin(), workspace_data_addrs.end()); | |||||
GELOGI("HcclTaskInfo Init Success"); | GELOGI("HcclTaskInfo Init Success"); | ||||
return SUCCESS; | return SUCCESS; | ||||
} | } | ||||
@@ -231,18 +238,7 @@ Status HcclTaskInfo::CalculateArgs(const domi::TaskDef &task_def, DavinciModel * | |||||
Status HcclTaskInfo::UpdateArgs() { | Status HcclTaskInfo::UpdateArgs() { | ||||
GELOGI("HcclTaskInfo::UpdateArgs in."); | GELOGI("HcclTaskInfo::UpdateArgs in."); | ||||
const RuntimeParam &rts_param = davinci_model_->GetRuntimeParam(); | |||||
input_data_addrs_ = ModelUtils::GetInputDataAddrs(rts_param, op_desc_); | |||||
output_data_addrs_ = ModelUtils::GetOutputDataAddrs(rts_param, op_desc_); | |||||
workspace_data_addrs_ = ModelUtils::GetWorkspaceDataAddrs(rts_param, op_desc_); | |||||
vector<void *> io_addrs; | |||||
io_addrs.insert(io_addrs.end(), input_data_addrs_.begin(), input_data_addrs_.end()); | |||||
io_addrs.insert(io_addrs.end(), output_data_addrs_.begin(), output_data_addrs_.end()); | |||||
io_addrs.insert(io_addrs.end(), workspace_data_addrs_.begin(), workspace_data_addrs_.end()); | |||||
davinci_model_->SetTotalIOAddrs(io_addrs); | |||||
davinci_model_->SetTotalIOAddrs(io_addrs_); | |||||
GELOGI("HcclTaskInfo::UpdateArgs success."); | GELOGI("HcclTaskInfo::UpdateArgs success."); | ||||
return SUCCESS; | return SUCCESS; | ||||
} | } | ||||
@@ -261,9 +257,11 @@ Status HcclTaskInfo::SetAddrs(const std::shared_ptr<OpDesc> &op_desc, | |||||
HcclReduceOp op_type = HCCL_REDUCE_SUM; | HcclReduceOp op_type = HCCL_REDUCE_SUM; | ||||
GE_CHECK_NOTNULL(davinci_model_); | GE_CHECK_NOTNULL(davinci_model_); | ||||
GELOGI("Calc opType[%s] input address before. Node name[%s]", op_desc->GetType().c_str(), op_desc->GetName().c_str()); | GELOGI("Calc opType[%s] input address before. Node name[%s]", op_desc->GetType().c_str(), op_desc->GetName().c_str()); | ||||
vector<void *> input_data_addrs; | |||||
vector<void *> output_data_addrs; | |||||
if (!davinci_model_->IsKnownNode()) { | if (!davinci_model_->IsKnownNode()) { | ||||
input_data_addrs_ = ModelUtils::GetInputDataAddrs(davinci_model_->GetRuntimeParam(), op_desc); | |||||
output_data_addrs_ = ModelUtils::GetOutputDataAddrs(davinci_model_->GetRuntimeParam(), op_desc); | |||||
input_data_addrs = ModelUtils::GetInputDataAddrs(davinci_model_->GetRuntimeParam(), op_desc); | |||||
output_data_addrs = ModelUtils::GetOutputDataAddrs(davinci_model_->GetRuntimeParam(), op_desc); | |||||
} | } | ||||
void *input_data_addr = nullptr; | void *input_data_addr = nullptr; | ||||
void *output_data_addr = nullptr; | void *output_data_addr = nullptr; | ||||
@@ -275,8 +273,8 @@ Status HcclTaskInfo::SetAddrs(const std::shared_ptr<OpDesc> &op_desc, | |||||
output_data_addr = reinterpret_cast<void *>(reinterpret_cast<uint64_t *>(args_) + op_desc->GetInputsSize() + i); | output_data_addr = reinterpret_cast<void *>(reinterpret_cast<uint64_t *>(args_) + op_desc->GetInputsSize() + i); | ||||
GELOGI("Hccl task info known input addr %p, output addr %p.", input_data_addr, output_data_addr); | GELOGI("Hccl task info known input addr %p, output addr %p.", input_data_addr, output_data_addr); | ||||
} else { | } else { | ||||
input_data_addr = input_data_addrs_.empty() ? nullptr : input_data_addrs_[i]; | |||||
output_data_addr = output_data_addrs_.empty() ? nullptr : output_data_addrs_[i]; | |||||
input_data_addr = input_data_addrs.empty() ? nullptr : input_data_addrs[i]; | |||||
output_data_addr = output_data_addrs.empty() ? nullptr : output_data_addrs[i]; | |||||
} | } | ||||
kernel_hccl_infos[i].inputDataAddr = input_data_addr; | kernel_hccl_infos[i].inputDataAddr = input_data_addr; | ||||
if (hccl_type == HCOMALLGATHER || hccl_type == HCOMRECEIVE || hccl_type == HVDCALLBACKALLGATHER) { | if (hccl_type == HCOMALLGATHER || hccl_type == HCOMRECEIVE || hccl_type == HVDCALLBACKALLGATHER) { | ||||
@@ -366,8 +364,8 @@ Status HcclTaskInfo::SetWorkspace(const std::shared_ptr<OpDesc> &op_desc, | |||||
workspace_addr = reinterpret_cast<void *>(reinterpret_cast<uint64_t *>(args_) + op_desc->GetInputsSize() + | workspace_addr = reinterpret_cast<void *>(reinterpret_cast<uint64_t *>(args_) + op_desc->GetInputsSize() + | ||||
op_desc->GetOutputsSize()); | op_desc->GetOutputsSize()); | ||||
} else { | } else { | ||||
workspace_data_addrs_ = ModelUtils::GetWorkspaceDataAddrs(davinci_model_->GetRuntimeParam(), op_desc); | |||||
workspace_addr = workspace_data_addrs_.empty() ? nullptr : workspace_data_addrs_[0]; | |||||
const auto workspace_data_addrs = ModelUtils::GetWorkspaceDataAddrs(davinci_model_->GetRuntimeParam(), op_desc); | |||||
workspace_addr = workspace_data_addrs.empty() ? nullptr : workspace_data_addrs[0]; | |||||
} | } | ||||
} | } | ||||
} | } | ||||
@@ -76,9 +76,7 @@ class HcclTaskInfo : public TaskInfo { | |||||
uint32_t private_def_len_; | uint32_t private_def_len_; | ||||
static std::mutex hccl_follow_stream_mutex_; | static std::mutex hccl_follow_stream_mutex_; | ||||
vector<GETaskKernelHcclInfo> kernel_hccl_infos_; | vector<GETaskKernelHcclInfo> kernel_hccl_infos_; | ||||
vector<void *> input_data_addrs_; | |||||
vector<void *> output_data_addrs_; | |||||
vector<void *> workspace_data_addrs_; | |||||
vector<void *> io_addrs_; | |||||
OpDescPtr op_desc_; | OpDescPtr op_desc_; | ||||
void *args_; | void *args_; | ||||
uint32_t args_offset_; | uint32_t args_offset_; | ||||
@@ -128,7 +128,7 @@ Status KernelExTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davin | |||||
return RT_ERROR_TO_GE_STATUS(rt_ret);) | return RT_ERROR_TO_GE_STATUS(rt_ret);) | ||||
GELOGI("KernelExTaskInfo knonw node Init Success."); | GELOGI("KernelExTaskInfo knonw node Init Success."); | ||||
return SUCCESS; | |||||
return SetIoAddr(op_desc); | |||||
} | } | ||||
// 3. Set workspaceaddr, inputOutputDataAddr | // 3. Set workspaceaddr, inputOutputDataAddr | ||||
@@ -192,7 +192,7 @@ Status KernelExTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davin | |||||
davinci_model_->SetZeroCopyAddr(op_desc, io_addrs, io_addrs.data(), input_output_addr_, addrs_size, 0); | davinci_model_->SetZeroCopyAddr(op_desc, io_addrs, io_addrs.data(), input_output_addr_, addrs_size, 0); | ||||
GELOGI("KernelExTaskInfo Init Success. session id: %lu", session_id); | GELOGI("KernelExTaskInfo Init Success. session id: %lu", session_id); | ||||
return SUCCESS; | |||||
return SetIoAddr(op_desc); | |||||
} | } | ||||
Status KernelExTaskInfo::CalculateArgs(const domi::TaskDef &task_def, DavinciModel *davinci_model) { | Status KernelExTaskInfo::CalculateArgs(const domi::TaskDef &task_def, DavinciModel *davinci_model) { | ||||
@@ -258,8 +258,10 @@ Status KernelExTaskInfo::SetIoAddr(const OpDescPtr &op_desc) { | |||||
} | } | ||||
} | } | ||||
} | } | ||||
return SUCCESS; | return SUCCESS; | ||||
} | } | ||||
Status KernelExTaskInfo::UpdateArgs() { | Status KernelExTaskInfo::UpdateArgs() { | ||||
GELOGI("KernelExTaskInfo::UpdateArgs in."); | GELOGI("KernelExTaskInfo::UpdateArgs in."); | ||||
davinci_model_->SetTotalIOAddrs(io_addrs_); | davinci_model_->SetTotalIOAddrs(io_addrs_); | ||||
@@ -146,7 +146,7 @@ Status KernelTaskInfo::SaveSKTDumpInfo() { | |||||
return SUCCESS; | return SUCCESS; | ||||
} | } | ||||
// all op in super kernel share one taskid and streamid | // all op in super kernel share one taskid and streamid | ||||
const SuperKernelTaskInfo &skt_info = davinci_model_->GetSupperKernelTaskInfo(); | |||||
const SuperKernelTaskInfo &skt_info = davinci_model_->GetSuperKernelTaskInfo(); | |||||
for (size_t i = 0; i < skt_info.op_desc_list.size(); i++) { | for (size_t i = 0; i < skt_info.op_desc_list.size(); i++) { | ||||
davinci_model_->SaveDumpTask(skt_info.last_task_id, skt_info.last_stream_id, skt_info.op_desc_list[i], | davinci_model_->SaveDumpTask(skt_info.last_task_id, skt_info.last_stream_id, skt_info.op_desc_list[i], | ||||
skt_info.dump_args_list[i]); | skt_info.dump_args_list[i]); | ||||
@@ -163,7 +163,7 @@ void KernelTaskInfo::UpdateSKTTaskId() { | |||||
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret); | GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret); | ||||
return; | return; | ||||
} | } | ||||
SuperKernelTaskInfo &skt_info = davinci_model_->GetSupperKernelTaskInfo(); | |||||
SuperKernelTaskInfo &skt_info = davinci_model_->GetSuperKernelTaskInfo(); | |||||
skt_info.last_task_id = task_id; | skt_info.last_task_id = task_id; | ||||
skt_info.last_stream_id = stream_id; | skt_info.last_stream_id = stream_id; | ||||
skt_id_ = skt_info.last_task_id; | skt_id_ = skt_info.last_task_id; | ||||
@@ -191,7 +191,7 @@ Status KernelTaskInfo::SKTFinalize() { | |||||
UpdateSKTTaskId(); | UpdateSKTTaskId(); | ||||
GE_CHK_STATUS_RET(SaveSKTDumpInfo(), "skt save dump info failed"); | GE_CHK_STATUS_RET(SaveSKTDumpInfo(), "skt save dump info failed"); | ||||
GELOGI("SuperKernel Distribute [skt_id:%u]", skt_id_); | GELOGI("SuperKernel Distribute [skt_id:%u]", skt_id_); | ||||
SuperKernelTaskInfo &skt_info = davinci_model_->GetSupperKernelTaskInfo(); | |||||
SuperKernelTaskInfo &skt_info = davinci_model_->GetSuperKernelTaskInfo(); | |||||
skt_info.kernel_list.clear(); | skt_info.kernel_list.clear(); | ||||
skt_info.arg_list.clear(); | skt_info.arg_list.clear(); | ||||
skt_info.dump_flag_list.clear(); | skt_info.dump_flag_list.clear(); | ||||
@@ -208,7 +208,7 @@ Status KernelTaskInfo::SKTFinalize() { | |||||
} | } | ||||
uint32_t KernelTaskInfo::GetDumpFlag() { | uint32_t KernelTaskInfo::GetDumpFlag() { | ||||
const SuperKernelTaskInfo &skt_info = davinci_model_->GetSupperKernelTaskInfo(); | |||||
const SuperKernelTaskInfo &skt_info = davinci_model_->GetSuperKernelTaskInfo(); | |||||
for (auto flag : skt_info.dump_flag_list) { | for (auto flag : skt_info.dump_flag_list) { | ||||
if (flag == RT_KERNEL_DUMPFLAG) { | if (flag == RT_KERNEL_DUMPFLAG) { | ||||
return RT_KERNEL_DUMPFLAG; | return RT_KERNEL_DUMPFLAG; | ||||
@@ -218,7 +218,7 @@ uint32_t KernelTaskInfo::GetDumpFlag() { | |||||
} | } | ||||
Status KernelTaskInfo::SuperKernelLaunch() { | Status KernelTaskInfo::SuperKernelLaunch() { | ||||
SuperKernelTaskInfo &skt_info = davinci_model_->GetSupperKernelTaskInfo(); | |||||
SuperKernelTaskInfo &skt_info = davinci_model_->GetSuperKernelTaskInfo(); | |||||
if (skt_info.kernel_list.empty()) { | if (skt_info.kernel_list.empty()) { | ||||
GELOGI("SuperKernelLaunch: Skt_kernel_list has no task, just return"); | GELOGI("SuperKernelLaunch: Skt_kernel_list has no task, just return"); | ||||
return SUCCESS; | return SUCCESS; | ||||
@@ -272,7 +272,7 @@ Status KernelTaskInfo::SuperKernelLaunch() { | |||||
} | } | ||||
Status KernelTaskInfo::SaveSuperKernelInfo() { | Status KernelTaskInfo::SaveSuperKernelInfo() { | ||||
SuperKernelTaskInfo &skt_info = davinci_model_->GetSupperKernelTaskInfo(); | |||||
SuperKernelTaskInfo &skt_info = davinci_model_->GetSuperKernelTaskInfo(); | |||||
skt_info.kernel_list.push_back(stub_func_); | skt_info.kernel_list.push_back(stub_func_); | ||||
skt_info.arg_list.push_back(args_); | skt_info.arg_list.push_back(args_); | ||||
skt_info.last_stream = stream_; | skt_info.last_stream = stream_; | ||||
@@ -328,7 +328,7 @@ bool KernelTaskInfo::IsMarkedFirstNode() { | |||||
// then may be saved to skt task list; else | // then may be saved to skt task list; else | ||||
// call skt launch those saved tasks before | // call skt launch those saved tasks before | ||||
bool KernelTaskInfo::FirstCallSKTLaunchCheck() { | bool KernelTaskInfo::FirstCallSKTLaunchCheck() { | ||||
const SuperKernelTaskInfo &skt_info = davinci_model_->GetSupperKernelTaskInfo(); | |||||
const SuperKernelTaskInfo &skt_info = davinci_model_->GetSuperKernelTaskInfo(); | |||||
return ((block_dim_ != skt_info.last_block_dim) || (stream_ != skt_info.last_stream) || | return ((block_dim_ != skt_info.last_block_dim) || (stream_ != skt_info.last_stream) || | ||||
(has_group_key_ && (group_key_ != skt_info.last_group_key))); | (has_group_key_ && (group_key_ != skt_info.last_group_key))); | ||||
} | } | ||||
@@ -397,7 +397,7 @@ Status KernelTaskInfo::Distribute() { | |||||
call_save_dump_ = true; | call_save_dump_ = true; | ||||
} else { | } else { | ||||
/* default: not skt launch */ | /* default: not skt launch */ | ||||
const SuperKernelTaskInfo &skt_info = davinci_model_->GetSupperKernelTaskInfo(); | |||||
const SuperKernelTaskInfo &skt_info = davinci_model_->GetSuperKernelTaskInfo(); | |||||
GELOGD( | GELOGD( | ||||
"KernelTaskInfo Distribute Start, sktenable:%d taskid:%u sktid:%u last_sktid:%u stubfunc_name:%s " | "KernelTaskInfo Distribute Start, sktenable:%d taskid:%u sktid:%u last_sktid:%u stubfunc_name:%s " | ||||
"stubfunc:%p blockdim:%u stream:%p", | "stubfunc:%p blockdim:%u stream:%p", | ||||
@@ -803,7 +803,6 @@ Status KernelTaskInfo::InitCceTask(const domi::KernelDef &kernel_def) { | |||||
GELOGE(FAILED, "flowtable is null."); | GELOGE(FAILED, "flowtable is null."); | ||||
return FAILED; | return FAILED; | ||||
} | } | ||||
flowtable_size_ = flowtable.size(); | |||||
} | } | ||||
// get smDesc stored in model | // get smDesc stored in model | ||||
@@ -899,8 +898,8 @@ Status KernelTaskInfo::InitAicpuTask(uint32_t op_index, const domi::KernelDef &k | |||||
GELOGE(init_ret, "Init aicpu task ext info failed, ext_info size=%zu", ext_info.size()); | GELOGE(init_ret, "Init aicpu task ext info failed, ext_info size=%zu", ext_info.size()); | ||||
return init_ret; | return init_ret; | ||||
} | } | ||||
GELOGI("Node[%s] type[%s] kernel_ext_info size=%zu, aicpu_ext_info_addr_=%p", op_desc_->GetName().c_str(), | |||||
op_desc_->GetType().c_str(), ext_info.size(), aicpu_ext_info_addr_); | |||||
GELOGI("Node[%s] type[%s] kernel_ext_info size=%zu, aicpu_ext_info_addr_=%p", op_desc->GetName().c_str(), | |||||
op_desc->GetType().c_str(), ext_info.size(), aicpu_ext_info_addr_); | |||||
aicpu_param_head->extInfoAddr = reinterpret_cast<uintptr_t>(aicpu_ext_info_addr_); | aicpu_param_head->extInfoAddr = reinterpret_cast<uintptr_t>(aicpu_ext_info_addr_); | ||||
aicpu_param_head->extInfoLength = static_cast<uintptr_t>(ext_info.size()); | aicpu_param_head->extInfoLength = static_cast<uintptr_t>(ext_info.size()); | ||||
@@ -38,7 +38,6 @@ class KernelTaskInfo : public TaskInfo { | |||||
flowtable_(nullptr), | flowtable_(nullptr), | ||||
block_dim_(0), | block_dim_(0), | ||||
args_size_(0), | args_size_(0), | ||||
flowtable_size_(0), | |||||
task_id_(0), | task_id_(0), | ||||
stream_id_(0), | stream_id_(0), | ||||
so_name_(""), | so_name_(""), | ||||
@@ -46,7 +45,6 @@ class KernelTaskInfo : public TaskInfo { | |||||
kernel_type_(ccKernelType::CCE_AI_CORE), | kernel_type_(ccKernelType::CCE_AI_CORE), | ||||
dump_flag_(RT_KERNEL_DEFAULT), | dump_flag_(RT_KERNEL_DEFAULT), | ||||
dump_args_(nullptr), | dump_args_(nullptr), | ||||
op_desc_(nullptr), | |||||
davinci_model_(nullptr), | davinci_model_(nullptr), | ||||
skt_id_(0), | skt_id_(0), | ||||
stub_func_name_(""), | stub_func_name_(""), | ||||
@@ -149,7 +147,6 @@ class KernelTaskInfo : public TaskInfo { | |||||
void *flowtable_; | void *flowtable_; | ||||
uint32_t block_dim_; | uint32_t block_dim_; | ||||
uint32_t args_size_; | uint32_t args_size_; | ||||
uint32_t flowtable_size_; | |||||
uint32_t task_id_; | uint32_t task_id_; | ||||
uint32_t stream_id_; | uint32_t stream_id_; | ||||
std::string so_name_; | std::string so_name_; | ||||
@@ -35,7 +35,6 @@ Status MemcpyAsyncTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *da | |||||
kind_ = memcpy_async.kind(); | kind_ = memcpy_async.kind(); | ||||
dst_max_ = memcpy_async.dst_max(); | dst_max_ = memcpy_async.dst_max(); | ||||
OpDescPtr op_desc = davinci_model_->GetOpByIndex(memcpy_async.op_index()); | OpDescPtr op_desc = davinci_model_->GetOpByIndex(memcpy_async.op_index()); | ||||
op_desc_ = op_desc; | |||||
if (op_desc == nullptr) { | if (op_desc == nullptr) { | ||||
GELOGE(INTERNAL_ERROR, "Task op index:%u out of range", memcpy_async.op_index()); | GELOGE(INTERNAL_ERROR, "Task op index:%u out of range", memcpy_async.op_index()); | ||||
return INTERNAL_ERROR; | return INTERNAL_ERROR; | ||||