Browse Source

TaskInfo not hold OpDesc.

tags/v1.2.0
zhangxiaokun 3 years ago
parent
commit
eed5c0e0bc
8 changed files with 190 additions and 137 deletions
  1. +2
    -0
      ge/graph/load/new_model_manager/davinci_model.cc
  2. +71
    -4
      ge/graph/load/new_model_manager/davinci_model.h
  3. +18
    -21
      ge/graph/load/new_model_manager/task_info/kernel_ex_task_info.cc
  4. +2
    -1
      ge/graph/load/new_model_manager/task_info/kernel_ex_task_info.h
  5. +73
    -59
      ge/graph/load/new_model_manager/task_info/kernel_task_info.cc
  6. +3
    -20
      ge/graph/load/new_model_manager/task_info/kernel_task_info.h
  7. +20
    -30
      ge/graph/load/new_model_manager/task_info/memcpy_async_task_info.cc
  8. +1
    -2
      ge/graph/load/new_model_manager/task_info/memcpy_async_task_info.h

+ 2
- 0
ge/graph/load/new_model_manager/davinci_model.cc View File

@@ -139,6 +139,7 @@ DavinciModel::DavinciModel(int32_t priority, const std::shared_ptr<ModelListener
is_l1_fusion_enable_(false), is_l1_fusion_enable_(false),
is_first_execute_(true) { is_first_execute_(true) {
op_list_.clear(); op_list_.clear();
skt_info_ = {0, 0, 0, 0, nullptr, nullptr, {}, {}, {}, {}, {}, RT_KERNEL_DEFAULT, -1, 0, nullptr};
} }


DavinciModel::~DavinciModel() { DavinciModel::~DavinciModel() {
@@ -261,6 +262,7 @@ Status DavinciModel::Assign(const GeModelPtr &ge_model) {
/// @return: void /// @return: void
/// ///
void DavinciModel::Shrink() { void DavinciModel::Shrink() {
skt_info_ = {0, 0, 0, 0, nullptr, nullptr, {}, {}, {}, {}, {}, RT_KERNEL_DEFAULT, -1, 0, nullptr};
ge_model_.reset(); // delete object. ge_model_.reset(); // delete object.
} }




+ 71
- 4
ge/graph/load/new_model_manager/davinci_model.h View File

@@ -76,6 +76,25 @@ struct timeInfo {
int64_t dumpEndTime; int64_t dumpEndTime;
}; };


// For super kernel
static struct SuperKernelTaskInfo {
uint32_t last_block_dim;
uint32_t last_args_size;
uint32_t last_task_id;
uint32_t last_stream_id;
void *last_stream;
void *last_sm_desc;
std::vector<void *> kernel_list;
std::vector<void *> arg_list;
std::vector<uint32_t> dump_flag_list;
std::vector<OpDescPtr> op_desc_list;
std::vector<uintptr_t> dump_args_list;
uint32_t last_dump_flag;
int64_t last_group_key;
uintptr_t last_dump_args;
OpDescPtr last_op;
};

struct TaskMemInfo { struct TaskMemInfo {
int64_t input_size{0}; int64_t input_size{0};
int64_t output_size{0}; int64_t output_size{0};
@@ -98,7 +117,7 @@ enum ExecuteMode {


// comments // comments
class DavinciModel { class DavinciModel {
public:
public:
/// ///
/// @ingroup ge /// @ingroup ge
/// @brief DavinciModel constructor /// @brief DavinciModel constructor
@@ -204,13 +223,14 @@ class DavinciModel {
// get total mem size // get total mem size
size_t TotalMemSize() const { return runtime_param_.mem_size; } size_t TotalMemSize() const { return runtime_param_.mem_size; }


const std::map<uint32_t, MemInfo> &P2PMemInfos() const {return runtime_param_.memory_infos;}
const std::map<uint32_t, MemInfo> &P2PMemInfos() const { return runtime_param_.memory_infos; }


// model name // model name
string Name() const { return name_; } string Name() const { return name_; }


// om_name // om_name
string OmName() const { return om_name_; } string OmName() const { return om_name_; }

// version // version
uint32_t Version() const { return version_; } uint32_t Version() const { return version_; }


@@ -255,12 +275,16 @@ class DavinciModel {
} }
return nullptr; return nullptr;
} }

// get task info for profiling // get task info for profiling
const std::vector<TaskDescInfo> &GetTaskDescInfo() const { return task_desc_info_; } const std::vector<TaskDescInfo> &GetTaskDescInfo() const { return task_desc_info_; }


// get updated task info list // get updated task info list
std::vector<TaskInfoPtr> GetTaskList() { return task_list_; } std::vector<TaskInfoPtr> GetTaskList() { return task_list_; }


// Modified from KernelTaskInfo.
SuperKernelTaskInfo &GetSupperKernelTaskInfo() { return skt_info_; }

/// ///
/// @ingroup ge /// @ingroup ge
/// @brief get model input and output format /// @brief get model input and output format
@@ -421,6 +445,7 @@ class DavinciModel {
const RuntimeParam &GetRuntimeParam() { return runtime_param_; } const RuntimeParam &GetRuntimeParam() { return runtime_param_; }


int32_t GetDataInputTid() const { return dataInputTid; } int32_t GetDataInputTid() const { return dataInputTid; }

void SetDataInputTid(int32_t data_input_tid) { dataInputTid = data_input_tid; } void SetDataInputTid(int32_t data_input_tid) { dataInputTid = data_input_tid; }


void DisableZeroCopy(const void *addr); void DisableZeroCopy(const void *addr);
@@ -459,6 +484,7 @@ class DavinciModel {
} }


void SetEndGraphId(uint32_t task_id, uint32_t stream_id); void SetEndGraphId(uint32_t task_id, uint32_t stream_id);

DavinciModel &operator=(const DavinciModel &model) = delete; DavinciModel &operator=(const DavinciModel &model) = delete;


DavinciModel(const DavinciModel &model) = delete; DavinciModel(const DavinciModel &model) = delete;
@@ -466,34 +492,46 @@ class DavinciModel {
const map<int64_t, std::vector<rtStream_t>> &GetHcclFolowStream() { const map<int64_t, std::vector<rtStream_t>> &GetHcclFolowStream() {
return main_follow_stream_mapping_; return main_follow_stream_mapping_;
} }

void SaveHcclFollowStream(int64_t main_stream_id, rtStream_t stream); void SaveHcclFollowStream(int64_t main_stream_id, rtStream_t stream);


void InitRuntimeParams(); void InitRuntimeParams();

Status InitVariableMem(); Status InitVariableMem();


void UpdateMemBase(uint8_t *mem_base) { void UpdateMemBase(uint8_t *mem_base) {
runtime_param_.mem_base = mem_base; runtime_param_.mem_base = mem_base;
mem_base_ = mem_base; mem_base_ = mem_base;
} }

void SetTotalArgsSize(uint32_t args_size) { total_args_size_ += args_size; } void SetTotalArgsSize(uint32_t args_size) { total_args_size_ += args_size; }

uint32_t GetTotalArgsSize() { return total_args_size_; } uint32_t GetTotalArgsSize() { return total_args_size_; }

void *GetCurrentArgsAddr(uint32_t offset) { void *GetCurrentArgsAddr(uint32_t offset) {
void *cur_args = static_cast<char *>(args_) + offset; void *cur_args = static_cast<char *>(args_) + offset;
return cur_args; return cur_args;
} }

void SetTotalIOAddrs(vector<void *> &io_addrs) { void SetTotalIOAddrs(vector<void *> &io_addrs) {
total_io_addrs_.insert(total_io_addrs_.end(), io_addrs.begin(), io_addrs.end()); total_io_addrs_.insert(total_io_addrs_.end(), io_addrs.begin(), io_addrs.end());
} }

void SetHybridArgsSize(uint32_t args_size) { total_hybrid_args_size_ += args_size; } void SetHybridArgsSize(uint32_t args_size) { total_hybrid_args_size_ += args_size; }

uint32_t GetHybridArgsSize() { uint32_t GetHybridArgsSize() {
return total_hybrid_args_size_; return total_hybrid_args_size_;
} }

void *GetCurrentHybridArgsAddr(uint32_t offset) { void *GetCurrentHybridArgsAddr(uint32_t offset) {
void *cur_args = static_cast<char *>(hybrid_addrs_) + offset; void *cur_args = static_cast<char *>(hybrid_addrs_) + offset;
return cur_args; return cur_args;
} }

void SetTotalFixedAddrsSize(string tensor_name, int64_t fix_addr_size); void SetTotalFixedAddrsSize(string tensor_name, int64_t fix_addr_size);

int64_t GetFixedAddrsSize(string tensor_name); int64_t GetFixedAddrsSize(string tensor_name);

void *GetCurrentFixedAddr(int64_t offset) const { void *GetCurrentFixedAddr(int64_t offset) const {
void *cur_addr = static_cast<char *>(fixed_addrs_) + offset; void *cur_addr = static_cast<char *>(fixed_addrs_) + offset;
return cur_addr; return cur_addr;
@@ -505,30 +543,42 @@ class DavinciModel {
} }
return UINT32_MAX; return UINT32_MAX;
} }

void SetKnownNode(bool known_node) { known_node_ = known_node; } void SetKnownNode(bool known_node) { known_node_ = known_node; }

bool IsKnownNode() { return known_node_; } bool IsKnownNode() { return known_node_; }

Status MallocKnownArgs(); Status MallocKnownArgs();

Status UpdateKnownNodeArgs(const vector<void *> &inputs, const vector<void *> &outputs); Status UpdateKnownNodeArgs(const vector<void *> &inputs, const vector<void *> &outputs);

Status CreateKnownZeroCopyMap(const vector<void *> &inputs, const vector<void *> &outputs); Status CreateKnownZeroCopyMap(const vector<void *> &inputs, const vector<void *> &outputs);

Status UpdateKnownZeroCopyAddr(vector<void *> &total_io_addrs); Status UpdateKnownZeroCopyAddr(vector<void *> &total_io_addrs);

void SetKnownNodeAddrNotChanged(bool base_addr_not_changed) { base_addr_not_changed_ = base_addr_not_changed; } void SetKnownNodeAddrNotChanged(bool base_addr_not_changed) { base_addr_not_changed_ = base_addr_not_changed; }


Status GetOrigInputInfo(uint32_t index, OriginInputInfo &orig_input_info); Status GetOrigInputInfo(uint32_t index, OriginInputInfo &orig_input_info);

Status GetAllAippInputOutputDims(uint32_t index, std::vector<InputOutputDims> &input_dims, Status GetAllAippInputOutputDims(uint32_t index, std::vector<InputOutputDims> &input_dims,
std::vector<InputOutputDims> &output_dims); std::vector<InputOutputDims> &output_dims);

void SetModelDescVersion(bool is_new_model_desc) { is_new_model_desc_ = is_new_model_desc; } void SetModelDescVersion(bool is_new_model_desc) { is_new_model_desc_ = is_new_model_desc; }

// om file name // om file name
void SetOmName(string om_name) { om_name_ = om_name; } void SetOmName(string om_name) { om_name_ = om_name; }


void SetDumpProperties(const DumpProperties &dump_properties) { data_dumper_.SetDumpProperties(dump_properties); } void SetDumpProperties(const DumpProperties &dump_properties) { data_dumper_.SetDumpProperties(dump_properties); }

const DumpProperties &GetDumpProperties() const { return data_dumper_.GetDumpProperties(); } const DumpProperties &GetDumpProperties() const { return data_dumper_.GetDumpProperties(); }


bool GetOpDescInfo(uint32_t stream_id, uint32_t task_id, OpDescInfo &op_desc_info) const { bool GetOpDescInfo(uint32_t stream_id, uint32_t task_id, OpDescInfo &op_desc_info) const {
return data_dumper_.GetOpDescInfo(stream_id, task_id, op_desc_info); return data_dumper_.GetOpDescInfo(stream_id, task_id, op_desc_info);
} }

Status InitInputOutputForDynamic(const ComputeGraphPtr &compute_graph); Status InitInputOutputForDynamic(const ComputeGraphPtr &compute_graph);


private:
private:
// memory address of weights // memory address of weights
uint8_t *weights_mem_base_; uint8_t *weights_mem_base_;
uint8_t *var_mem_base_; uint8_t *var_mem_base_;
@@ -592,6 +642,7 @@ class DavinciModel {
Status SyncVarData(); Status SyncVarData();


Status InitWeightMem(void *dev_ptr, void *weight_ptr, size_t weight_size); Status InitWeightMem(void *dev_ptr, void *weight_ptr, size_t weight_size);

Status InitFeatureMapAndP2PMem(void *dev_ptr, size_t mem_size); Status InitFeatureMapAndP2PMem(void *dev_ptr, size_t mem_size);


void CreateInputDimsInfo(const OpDescPtr &op_desc, Format format, InputOutputDescInfo &input); void CreateInputDimsInfo(const OpDescPtr &op_desc, Format format, InputOutputDescInfo &input);
@@ -610,7 +661,7 @@ class DavinciModel {


uint8_t *MallocWeightsMem(size_t weights_size); uint8_t *MallocWeightsMem(size_t weights_size);


uint8_t* MallocP2PMem(size_t p2p_data_size);
uint8_t *MallocP2PMem(size_t p2p_data_size);


void FreeFeatureMapMem(); void FreeFeatureMapMem();


@@ -702,6 +753,7 @@ class DavinciModel {
Status InitTbeHandle(const OpDescPtr &op_desc); Status InitTbeHandle(const OpDescPtr &op_desc);


void StoreTbeHandle(const std::string &handle_key); void StoreTbeHandle(const std::string &handle_key);

void CleanTbeHandle(); void CleanTbeHandle();


/// ///
@@ -740,6 +792,7 @@ class DavinciModel {
/// @return: 0 for success / others for fail /// @return: 0 for success / others for fail
/// ///
Status BindOutputQueue(); Status BindOutputQueue();

Status CpuModelPrepareOutput(uintptr_t addr, uint32_t size); Status CpuModelPrepareOutput(uintptr_t addr, uint32_t size);


/// ///
@@ -777,7 +830,9 @@ class DavinciModel {
Status CpuWaitEndGraph(); Status CpuWaitEndGraph();


Status BindEnqueue(); Status BindEnqueue();

Status CpuModelEnqueue(uint32_t queue_id, uintptr_t out_mbuf); Status CpuModelEnqueue(uint32_t queue_id, uintptr_t out_mbuf);

/// ///
/// @ingroup ge /// @ingroup ge
/// @brief definiteness queue schedule, repeat run model. /// @brief definiteness queue schedule, repeat run model.
@@ -786,6 +841,7 @@ class DavinciModel {
Status CpuModelRepeat(); Status CpuModelRepeat();


Status InitEntryTask(); Status InitEntryTask();

Status AddHeadStream(); Status AddHeadStream();


/// ///
@@ -813,6 +869,7 @@ class DavinciModel {
void SetDataDumperArgs(const ComputeGraphPtr &compute_graph); void SetDataDumperArgs(const ComputeGraphPtr &compute_graph);


Status InitModelProfile(); Status InitModelProfile();

Status SinkModelProfile(); Status SinkModelProfile();


Status SinkTimeProfile(const InputData &current_data); Status SinkTimeProfile(const InputData &current_data);
@@ -821,14 +878,21 @@ class DavinciModel {
std::vector<ge::OutputTensorInfo> &outputs); std::vector<ge::OutputTensorInfo> &outputs);


void ParseAIPPInfo(std::string in_out_info, InputOutputDims &dims_info); void ParseAIPPInfo(std::string in_out_info, InputOutputDims &dims_info);

void SetLabelForDynamic(const NodePtr &node); void SetLabelForDynamic(const NodePtr &node);


void ParseDynamicOutShape(const std::vector<std::string> &str_info, std::vector<vector<int64_t>> &vec_info); void ParseDynamicOutShape(const std::vector<std::string> &str_info, std::vector<vector<int64_t>> &vec_info);

bool IsGetNextSinkDynamic(const OpDescPtr &op_desc); bool IsGetNextSinkDynamic(const OpDescPtr &op_desc);

void GetAllGearsInfo(const NodePtr &node); void GetAllGearsInfo(const NodePtr &node);

Status GetGetDynamicDimsNodeInfo(const NodePtr &node); Status GetGetDynamicDimsNodeInfo(const NodePtr &node);

Status GetGearAndRealOutSizeInfo(size_t input_count, const NodePtr &node); Status GetGearAndRealOutSizeInfo(size_t input_count, const NodePtr &node);

Status GetRealOutputSizeOfMerge(size_t input_index, const NodePtr &merge_node); Status GetRealOutputSizeOfMerge(size_t input_index, const NodePtr &merge_node);

Status GetGearAndRealOutShapeInfo(size_t input_count, const OpDescPtr &op_desc); Status GetGearAndRealOutShapeInfo(size_t input_count, const OpDescPtr &op_desc);


bool is_weight_mem_has_inited_; bool is_weight_mem_has_inited_;
@@ -996,6 +1060,9 @@ class DavinciModel {


std::multimap<uint32_t, uint32_t> op_id_map_; std::multimap<uint32_t, uint32_t> op_id_map_;
std::vector<ProfileInfo> profile_list_; std::vector<ProfileInfo> profile_list_;

// For super kernel.
SuperKernelTaskInfo skt_info_;
}; };
} // namespace ge } // namespace ge
#endif // GE_GRAPH_LOAD_NEW_MODEL_MANAGER_DAVINCI_MODEL_H_ #endif // GE_GRAPH_LOAD_NEW_MODEL_MANAGER_DAVINCI_MODEL_H_

+ 18
- 21
ge/graph/load/new_model_manager/task_info/kernel_ex_task_info.cc View File

@@ -30,11 +30,7 @@
namespace ge { namespace ge {
Status KernelExTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davinci_model) { Status KernelExTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davinci_model) {
GELOGI("KernelExTaskInfo Init Start."); GELOGI("KernelExTaskInfo Init Start.");
if (davinci_model == nullptr) {
GELOGE(PARAM_INVALID, "davinci_model is null!");
return PARAM_INVALID;
}

GE_CHECK_NOTNULL(davinci_model);
davinci_model_ = davinci_model; davinci_model_ = davinci_model;
Status ret = SetStream(task_def.stream_id(), davinci_model_->GetStreamList()); Status ret = SetStream(task_def.stream_id(), davinci_model_->GetStreamList());
if (ret != SUCCESS) { if (ret != SUCCESS) {
@@ -51,7 +47,6 @@ Status KernelExTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davin
GELOGE(INTERNAL_ERROR, "Init aicpu task info error, index is out of range!"); GELOGE(INTERNAL_ERROR, "Init aicpu task info error, index is out of range!");
return INTERNAL_ERROR; return INTERNAL_ERROR;
} }
op_desc_ = op_desc;


// 2. Reconstruct kernelExDef.args to STR_FWK_OP_KERNEL // 2. Reconstruct kernelExDef.args to STR_FWK_OP_KERNEL
STR_FWK_OP_KERNEL fwk_op_kernel = {0}; STR_FWK_OP_KERNEL fwk_op_kernel = {0};
@@ -79,8 +74,8 @@ Status KernelExTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davin
return RT_ERROR_TO_GE_STATUS(rt_ret);) return RT_ERROR_TO_GE_STATUS(rt_ret);)
} }


GELOGI("Node[%s] type[%s] kernel_ext_info size=%zu, ext_info_addr_=%p", op_desc_->GetName().c_str(),
op_desc_->GetType().c_str(), ext_info.size(), ext_info_addr_);
GELOGI("Node[%s] type[%s] kernel_ext_info size=%zu, ext_info_addr_=%p", op_desc->GetName().c_str(),
op_desc->GetType().c_str(), ext_info.size(), ext_info_addr_);


// 2.1 get loop cond variable for tensor array write // 2.1 get loop cond variable for tensor array write
uint64_t step_id_addr = 0; uint64_t step_id_addr = 0;
@@ -236,36 +231,38 @@ Status KernelExTaskInfo::CalculateArgs(const domi::TaskDef &task_def, DavinciMod
return SUCCESS; return SUCCESS;
} }


Status KernelExTaskInfo::UpdateArgs() {
GELOGI("KernelExTaskInfo::UpdateArgs in.");
Status KernelExTaskInfo::SetIoAddr(const OpDescPtr &op_desc) {
const RuntimeParam &rts_param = davinci_model_->GetRuntimeParam(); const RuntimeParam &rts_param = davinci_model_->GetRuntimeParam();
vector<void *> input_data_addrs = ModelUtils::GetInputDataAddrs(rts_param, op_desc_);
vector<void *> output_data_addrs = ModelUtils::GetOutputDataAddrs(rts_param, op_desc_);
vector<void *> io_addrs;
if (!op_desc_->HasAttr(ATTR_DYNAMIC_SHAPE_FIXED_ADDR)) {
io_addrs.insert(io_addrs.end(), input_data_addrs.begin(), input_data_addrs.end());
io_addrs.insert(io_addrs.end(), output_data_addrs.begin(), output_data_addrs.end());
vector<void *> input_data_addrs = ModelUtils::GetInputDataAddrs(rts_param, op_desc);
vector<void *> output_data_addrs = ModelUtils::GetOutputDataAddrs(rts_param, op_desc);
if (!op_desc->HasAttr(ATTR_DYNAMIC_SHAPE_FIXED_ADDR)) {
io_addrs_.insert(io_addrs_.end(), input_data_addrs.begin(), input_data_addrs.end());
io_addrs_.insert(io_addrs_.end(), output_data_addrs.begin(), output_data_addrs.end());
} else { } else {
string peer_input_name; string peer_input_name;
if (AttrUtils::GetStr(op_desc_, ATTR_DYNAMIC_SHAPE_FIXED_ADDR, peer_input_name)) {
if (AttrUtils::GetStr(op_desc, ATTR_DYNAMIC_SHAPE_FIXED_ADDR, peer_input_name)) {
uint32_t output_index = davinci_model_->GetFixedAddrOutputIndex(peer_input_name); uint32_t output_index = davinci_model_->GetFixedAddrOutputIndex(peer_input_name);
if (output_index > output_data_addrs.size()) { if (output_index > output_data_addrs.size()) {
GELOGE(FAILED, "The output data addr size[%zu] and output index[%u] are inconsistent.", GELOGE(FAILED, "The output data addr size[%zu] and output index[%u] are inconsistent.",
output_data_addrs.size(), output_index); output_data_addrs.size(), output_index);
return FAILED; return FAILED;
} }
io_addrs.insert(io_addrs.end(), input_data_addrs.begin(), input_data_addrs.end());
io_addrs_.insert(io_addrs_.end(), input_data_addrs.begin(), input_data_addrs.end());
for (size_t i = 0; i < output_data_addrs.size(); ++i) { for (size_t i = 0; i < output_data_addrs.size(); ++i) {
if (i == output_index) { if (i == output_index) {
void *fixed_addr = davinci_model_->GetCurrentFixedAddr(fixed_addr_offset_); void *fixed_addr = davinci_model_->GetCurrentFixedAddr(fixed_addr_offset_);
io_addrs.emplace_back(fixed_addr);
io_addrs_.emplace_back(fixed_addr);
continue; continue;
} }
io_addrs.emplace_back(output_data_addrs[i]);
io_addrs_.emplace_back(output_data_addrs[i]);
} }
} }
} }
davinci_model_->SetTotalIOAddrs(io_addrs);
return SUCCESS;
}
Status KernelExTaskInfo::UpdateArgs() {
GELOGI("KernelExTaskInfo::UpdateArgs in.");
davinci_model_->SetTotalIOAddrs(io_addrs_);
GELOGI("KernelExTaskInfo::UpdateArgs success."); GELOGI("KernelExTaskInfo::UpdateArgs success.");
return SUCCESS; return SUCCESS;
} }


+ 2
- 1
ge/graph/load/new_model_manager/task_info/kernel_ex_task_info.h View File

@@ -59,6 +59,7 @@ class KernelExTaskInfo : public TaskInfo {
}; };
private: private:
Status CopyTaskInfo(const domi::KernelExDef &kernel_def, const RuntimeParam &rts_param, const OpDescPtr &op_desc); Status CopyTaskInfo(const domi::KernelExDef &kernel_def, const RuntimeParam &rts_param, const OpDescPtr &op_desc);
Status SetIoAddr(const OpDescPtr &op_desc);


uint32_t task_id_; uint32_t task_id_;
uint32_t stream_id_; uint32_t stream_id_;
@@ -69,7 +70,7 @@ class KernelExTaskInfo : public TaskInfo {
void *input_output_addr_; void *input_output_addr_;
void *ext_info_addr_; void *ext_info_addr_;
void *dump_args_; void *dump_args_;
OpDescPtr op_desc_ = nullptr;
vector<void *> io_addrs_;
uint32_t args_offset_ = 0; uint32_t args_offset_ = 0;
int64_t fixed_addr_offset_ = 0; int64_t fixed_addr_offset_ = 0;
}; };


+ 73
- 59
ge/graph/load/new_model_manager/task_info/kernel_task_info.cc View File

@@ -53,9 +53,6 @@ const int kArgsAttrHandle = 4;
} // namespace } // namespace


namespace ge { namespace ge {
KernelTaskInfo::SuperKernelTaskInfo KernelTaskInfo::skt_info_ = {
0, 0, 0, 0, nullptr, nullptr, {}, {}, {}, {}, {}, RT_KERNEL_DEFAULT, kInvalidGroupKey, 0, nullptr};

Status KernelTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davinci_model) { Status KernelTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davinci_model) {
GE_CHECK_NOTNULL(davinci_model); GE_CHECK_NOTNULL(davinci_model);
davinci_model_ = davinci_model; davinci_model_ = davinci_model;
@@ -137,6 +134,7 @@ Status KernelTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davinci
ret = InitCceTask(kernel_def); ret = InitCceTask(kernel_def);
} }


SetIoAddrs(op_desc_);
GELOGD("KernelTaskInfo init finish, result=%u.", ret); GELOGD("KernelTaskInfo init finish, result=%u.", ret);
return ret; return ret;
} }
@@ -148,9 +146,10 @@ Status KernelTaskInfo::SaveSKTDumpInfo() {
return SUCCESS; return SUCCESS;
} }
// all op in super kernel share one taskid and streamid // all op in super kernel share one taskid and streamid
for (size_t i = 0; i < skt_info_.op_desc_list.size(); i++) {
davinci_model_->SaveDumpTask(skt_info_.last_task_id, skt_info_.last_stream_id, skt_info_.op_desc_list[i],
skt_info_.dump_args_list[i]);
const SuperKernelTaskInfo &skt_info = davinci_model_->GetSupperKernelTaskInfo();
for (size_t i = 0; i < skt_info.op_desc_list.size(); i++) {
davinci_model_->SaveDumpTask(skt_info.last_task_id, skt_info.last_stream_id, skt_info.op_desc_list[i],
skt_info.dump_args_list[i]);
} }
return SUCCESS; return SUCCESS;
} }
@@ -164,9 +163,10 @@ void KernelTaskInfo::UpdateSKTTaskId() {
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret); GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
return; return;
} }
skt_info_.last_task_id = task_id;
skt_info_.last_stream_id = stream_id;
skt_id_ = skt_info_.last_task_id;
SuperKernelTaskInfo &skt_info = davinci_model_->GetSupperKernelTaskInfo();
skt_info.last_task_id = task_id;
skt_info.last_stream_id = stream_id;
skt_id_ = skt_info.last_task_id;


GELOGI("UpdateTaskId:UpdateSKTTaskId [%u],stream id [%u]", task_id, stream_id); GELOGI("UpdateTaskId:UpdateSKTTaskId [%u],stream id [%u]", task_id, stream_id);
} }
@@ -191,23 +191,25 @@ Status KernelTaskInfo::SKTFinalize() {
UpdateSKTTaskId(); UpdateSKTTaskId();
GE_CHK_STATUS_RET(SaveSKTDumpInfo(), "skt save dump info failed"); GE_CHK_STATUS_RET(SaveSKTDumpInfo(), "skt save dump info failed");
GELOGI("SuperKernel Distribute [skt_id:%u]", skt_id_); GELOGI("SuperKernel Distribute [skt_id:%u]", skt_id_);
skt_info_.kernel_list.clear();
skt_info_.arg_list.clear();
skt_info_.dump_flag_list.clear();
skt_info_.op_desc_list.clear();
skt_info_.dump_args_list.clear();
skt_info_.last_stream = nullptr;
skt_info_.last_block_dim = 0;
skt_info_.last_sm_desc = sm_desc_;
skt_info_.last_group_key = kInvalidGroupKey;
skt_info_.last_dump_flag = RT_KERNEL_DEFAULT;
skt_info_.last_dump_args = 0;
skt_info_.last_op = nullptr;
SuperKernelTaskInfo &skt_info = davinci_model_->GetSupperKernelTaskInfo();
skt_info.kernel_list.clear();
skt_info.arg_list.clear();
skt_info.dump_flag_list.clear();
skt_info.op_desc_list.clear();
skt_info.dump_args_list.clear();
skt_info.last_stream = nullptr;
skt_info.last_block_dim = 0;
skt_info.last_sm_desc = sm_desc_;
skt_info.last_group_key = kInvalidGroupKey;
skt_info.last_dump_flag = RT_KERNEL_DEFAULT;
skt_info.last_dump_args = 0;
skt_info.last_op = nullptr;
return SUCCESS; return SUCCESS;
} }


uint32_t KernelTaskInfo::GetDumpFlag() { uint32_t KernelTaskInfo::GetDumpFlag() {
for (auto flag : skt_info_.dump_flag_list) {
const SuperKernelTaskInfo &skt_info = davinci_model_->GetSupperKernelTaskInfo();
for (auto flag : skt_info.dump_flag_list) {
if (flag == RT_KERNEL_DUMPFLAG) { if (flag == RT_KERNEL_DUMPFLAG) {
return RT_KERNEL_DUMPFLAG; return RT_KERNEL_DUMPFLAG;
} }
@@ -216,19 +218,20 @@ uint32_t KernelTaskInfo::GetDumpFlag() {
} }


Status KernelTaskInfo::SuperKernelLaunch() { Status KernelTaskInfo::SuperKernelLaunch() {
if (skt_info_.kernel_list.empty()) {
SuperKernelTaskInfo &skt_info = davinci_model_->GetSupperKernelTaskInfo();
if (skt_info.kernel_list.empty()) {
GELOGI("SuperKernelLaunch: Skt_kernel_list has no task, just return"); GELOGI("SuperKernelLaunch: Skt_kernel_list has no task, just return");
return SUCCESS; return SUCCESS;
} }
rtError_t rt_ret; rtError_t rt_ret;
auto &skt_kernel_list = skt_info_.kernel_list;
auto &skt_arg_list = skt_info_.arg_list;
auto &skt_kernel_list = skt_info.kernel_list;
auto &skt_arg_list = skt_info.arg_list;
GELOGI("SuperKernelLaunch: Skt_kernel_list size[%zu] skt_arg_list[%zu]", skt_kernel_list.size(), skt_arg_list.size()); GELOGI("SuperKernelLaunch: Skt_kernel_list size[%zu] skt_arg_list[%zu]", skt_kernel_list.size(), skt_arg_list.size());
if (skt_kernel_list.size() == kSKTSingleSize && skt_arg_list.size() == kSKTSingleSize) { if (skt_kernel_list.size() == kSKTSingleSize && skt_arg_list.size() == kSKTSingleSize) {
rt_ret = rtKernelLaunchWithFlag(skt_info_.kernel_list[0], static_cast<uint32_t>(skt_info_.last_block_dim),
skt_info_.arg_list[0], skt_info_.last_args_size,
static_cast<rtSmDesc_t *>(skt_info_.last_sm_desc), skt_info_.last_stream,
skt_info_.last_dump_flag);
rt_ret = rtKernelLaunchWithFlag(skt_info.kernel_list[0], static_cast<uint32_t>(skt_info.last_block_dim),
skt_info.arg_list[0], skt_info.last_args_size,
static_cast<rtSmDesc_t *>(skt_info.last_sm_desc), skt_info.last_stream,
skt_info.last_dump_flag);
if (rt_ret != RT_ERROR_NONE) { if (rt_ret != RT_ERROR_NONE) {
GELOGE(RT_FAILED, "SuperKernelLaunch: Call rt api failed, ret: 0x%X", rt_ret); GELOGE(RT_FAILED, "SuperKernelLaunch: Call rt api failed, ret: 0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret); return RT_ERROR_TO_GE_STATUS(rt_ret);
@@ -247,14 +250,14 @@ Status KernelTaskInfo::SuperKernelLaunch() {
} }
// Call the fuse API // Call the fuse API
std::unique_ptr<skt::SuperKernel> superKernel = nullptr; std::unique_ptr<skt::SuperKernel> superKernel = nullptr;
ge_ret = factory->FuseKernels(skt_kernel_list, skt_arg_list, skt_info_.last_block_dim, superKernel);
ge_ret = factory->FuseKernels(skt_kernel_list, skt_arg_list, skt_info.last_block_dim, superKernel);
if (ge_ret != SUCCESS) { if (ge_ret != SUCCESS) {
GELOGE(ge_ret, "SuperKernelLaunch: fuse call failed"); GELOGE(ge_ret, "SuperKernelLaunch: fuse call failed");
return ge_ret; return ge_ret;
} }
// Launch a super kernel // Launch a super kernel
skt_dump_flag_ = GetDumpFlag(); skt_dump_flag_ = GetDumpFlag();
ge_ret = superKernel->Launch(skt_info_.last_stream, skt_dump_flag_);
ge_ret = superKernel->Launch(skt_info.last_stream, skt_dump_flag_);
if (ge_ret != SUCCESS) { if (ge_ret != SUCCESS) {
GELOGE(ge_ret, "SuperKernelLaunch: launch failed"); GELOGE(ge_ret, "SuperKernelLaunch: launch failed");
return ge_ret; return ge_ret;
@@ -269,23 +272,26 @@ Status KernelTaskInfo::SuperKernelLaunch() {
} }


Status KernelTaskInfo::SaveSuperKernelInfo() { Status KernelTaskInfo::SaveSuperKernelInfo() {
skt_info_.kernel_list.push_back(stub_func_);
skt_info_.arg_list.push_back(args_);
skt_info_.last_stream = stream_;
skt_info_.last_block_dim = block_dim_;
skt_info_.last_args_size = args_size_;
skt_info_.last_sm_desc = sm_desc_;
skt_info_.last_dump_flag = dump_flag_;
skt_info_.dump_flag_list.push_back(dump_flag_);
skt_info_.op_desc_list.push_back(op_desc_);
skt_info_.dump_args_list.push_back(reinterpret_cast<uintptr_t>(skt_dump_args_));
skt_info_.last_group_key = group_key_;
skt_info_.last_dump_args = reinterpret_cast<uintptr_t>(skt_dump_args_);
skt_info_.last_op = op_desc_;
SuperKernelTaskInfo &skt_info = davinci_model_->GetSupperKernelTaskInfo();
skt_info.kernel_list.push_back(stub_func_);
skt_info.arg_list.push_back(args_);
skt_info.last_stream = stream_;
skt_info.last_block_dim = block_dim_;
skt_info.last_args_size = args_size_;
skt_info.last_sm_desc = sm_desc_;
skt_info.last_dump_flag = dump_flag_;
skt_info.dump_flag_list.push_back(dump_flag_);
skt_info.op_desc_list.push_back(op_desc_);
skt_info.dump_args_list.push_back(reinterpret_cast<uintptr_t>(skt_dump_args_));
skt_info.last_group_key = group_key_;
skt_info.last_dump_args = reinterpret_cast<uintptr_t>(skt_dump_args_);
skt_info.last_op = op_desc_;
// last node in a stream, just launch // last node in a stream, just launch
if (IsMarkedLastNode()) { if (IsMarkedLastNode()) {
return SuperKernelLaunch(); return SuperKernelLaunch();
} }

GELOGI("Save Current task [block_dim:%u, size:%zu].", block_dim_, skt_info.kernel_list.size());
return SUCCESS; return SUCCESS;
} }


@@ -322,8 +328,9 @@ bool KernelTaskInfo::IsMarkedFirstNode() {
// then may be saved to skt task list; else // then may be saved to skt task list; else
// call skt launch those saved tasks before // call skt launch those saved tasks before
bool KernelTaskInfo::FirstCallSKTLaunchCheck() { bool KernelTaskInfo::FirstCallSKTLaunchCheck() {
return ((block_dim_ != skt_info_.last_block_dim) || (stream_ != skt_info_.last_stream) ||
(has_group_key_ && (group_key_ != skt_info_.last_group_key)));
const SuperKernelTaskInfo &skt_info = davinci_model_->GetSupperKernelTaskInfo();
return ((block_dim_ != skt_info.last_block_dim) || (stream_ != skt_info.last_stream) ||
(has_group_key_ && (group_key_ != skt_info.last_group_key)));
} }


// current task has group_id or has n ATTR_N_BATCH_SPLIT then save it to skt task list; else // current task has group_id or has n ATTR_N_BATCH_SPLIT then save it to skt task list; else
@@ -362,7 +369,6 @@ Status KernelTaskInfo::SuperKernelDistribute() {
GELOGE(ret, "Call SuperKernelLaunch failed!"); GELOGE(ret, "Call SuperKernelLaunch failed!");
return ret; return ret;
} }
GELOGI("Save Current task [block_dim:%u, size:%zu].", block_dim_, skt_info_.kernel_list.size());
} }
return SUCCESS; return SUCCESS;
} }
@@ -391,10 +397,11 @@ Status KernelTaskInfo::Distribute() {
call_save_dump_ = true; call_save_dump_ = true;
} else { } else {
/* default: not skt launch */ /* default: not skt launch */
const SuperKernelTaskInfo &skt_info = davinci_model_->GetSupperKernelTaskInfo();
GELOGD( GELOGD(
"KernelTaskInfo Distribute Start, sktenable:%d taskid:%u sktid:%u last_sktid:%u stubfunc_name:%s " "KernelTaskInfo Distribute Start, sktenable:%d taskid:%u sktid:%u last_sktid:%u stubfunc_name:%s "
"stubfunc:%p blockdim:%u stream:%p", "stubfunc:%p blockdim:%u stream:%p",
call_skt, task_id_, skt_id_, skt_info_.last_task_id, stub_func_name_.c_str(), stub_func_, block_dim_, stream_);
call_skt, task_id_, skt_id_, skt_info.last_task_id, stub_func_name_.c_str(), stub_func_, block_dim_, stream_);
// l1 fusion enable and env flag open (kCloseSkt for skt debug) // l1 fusion enable and env flag open (kCloseSkt for skt debug)
bool open_dump = false; bool open_dump = false;
auto all_dump_model = davinci_model_->GetDumpProperties().GetAllDumpModel(); auto all_dump_model = davinci_model_->GetDumpProperties().GetAllDumpModel();
@@ -422,23 +429,30 @@ Status KernelTaskInfo::Distribute() {
"KernelTaskInfo Distribute Success. sktenable:%d taskid:%d sktid:%d stubfunc_name:%s stubfunc:%p " "KernelTaskInfo Distribute Success. sktenable:%d taskid:%d sktid:%d stubfunc_name:%s stubfunc:%p "
"blockdim:%d stream:%p", "blockdim:%d stream:%p",
call_skt, task_id_, skt_id_, stub_func_name_.c_str(), stub_func_, block_dim_, stream_); call_skt, task_id_, skt_id_, stub_func_name_.c_str(), stub_func_, block_dim_, stream_);
op_desc_.reset(); // Not hold OpDesc after distribute.
return SUCCESS; return SUCCESS;
} }


void KernelTaskInfo::SetIoAddrs(const OpDescPtr &op_desc) {
const RuntimeParam &rts_param = davinci_model_->GetRuntimeParam();
vector<void *> input_data_addrs = ModelUtils::GetInputDataAddrs(rts_param, op_desc);
vector<void *> output_data_addrs = ModelUtils::GetOutputDataAddrs(rts_param, op_desc);

io_addrs_.insert(io_addrs_.end(), input_data_addrs.begin(), input_data_addrs.end());
io_addrs_.insert(io_addrs_.end(), output_data_addrs.begin(), output_data_addrs.end());
if (kernel_type_ == ccKernelType::TE) {
vector<void *> workspace_data_addrs = ModelUtils::GetWorkspaceDataAddrs(rts_param, op_desc);
io_addrs_.insert(io_addrs_.end(), workspace_data_addrs.begin(), workspace_data_addrs.end());
}
}

Status KernelTaskInfo::UpdateArgs() { Status KernelTaskInfo::UpdateArgs() {
GELOGI("KernelTaskInfo::UpdateArgs in."); GELOGI("KernelTaskInfo::UpdateArgs in.");
const RuntimeParam &rts_param = davinci_model_->GetRuntimeParam();
vector<void *> input_data_addrs = ModelUtils::GetInputDataAddrs(rts_param, op_desc_);
vector<void *> output_data_addrs = ModelUtils::GetOutputDataAddrs(rts_param, op_desc_);


vector<void *> io_addrs;
io_addrs.insert(io_addrs.end(), input_data_addrs.begin(), input_data_addrs.end());
io_addrs.insert(io_addrs.end(), output_data_addrs.begin(), output_data_addrs.end());
if (kernel_type_ == ccKernelType::TE) { if (kernel_type_ == ccKernelType::TE) {
vector<void *> workspace_data_addrs = ModelUtils::GetWorkspaceDataAddrs(rts_param, op_desc_);
io_addrs.insert(io_addrs.end(), workspace_data_addrs.begin(), workspace_data_addrs.end());
davinci_model_->SetTotalIOAddrs(io_addrs);
davinci_model_->SetTotalIOAddrs(io_addrs_);
} else if (kernel_type_ == ccKernelType::AI_CPU || kernel_type_ == ccKernelType::CUST_AI_CPU) { } else if (kernel_type_ == ccKernelType::AI_CPU || kernel_type_ == ccKernelType::CUST_AI_CPU) {
vector<void *> io_addrs = io_addrs_;
davinci_model_->UpdateKnownZeroCopyAddr(io_addrs); davinci_model_->UpdateKnownZeroCopyAddr(io_addrs);
uintptr_t io_addr = reinterpret_cast<uintptr_t>(args_addr.get()) + sizeof(aicpu::AicpuParamHead); uintptr_t io_addr = reinterpret_cast<uintptr_t>(args_addr.get()) + sizeof(aicpu::AicpuParamHead);
auto addrs_size = sizeof(uint64_t) * io_addrs.size(); auto addrs_size = sizeof(uint64_t) * io_addrs.size();
@@ -854,14 +868,14 @@ Status KernelTaskInfo::InitAicpuTask(uint32_t op_index, const domi::KernelDef &k
GELOGI("Do InitAicpuTask"); GELOGI("Do InitAicpuTask");
so_name_ = kernel_def.so_name(); so_name_ = kernel_def.so_name();
kernel_name_ = kernel_def.kernel_name(); kernel_name_ = kernel_def.kernel_name();
GELOGI("node[%s] test so name %s, kernel name %s", op_desc_->GetName().c_str(), so_name_.c_str(),
kernel_name_.c_str());


OpDescPtr op_desc = davinci_model_->GetOpByIndex(op_index); OpDescPtr op_desc = davinci_model_->GetOpByIndex(op_index);
if (op_desc == nullptr) { if (op_desc == nullptr) {
GELOGE(INTERNAL_ERROR, "index is out of range, index: %u", op_index); GELOGE(INTERNAL_ERROR, "index is out of range, index: %u", op_index);
return INTERNAL_ERROR; return INTERNAL_ERROR;
} }
GELOGI("node[%s] test so name %s, kernel name %s", op_desc->GetName().c_str(), so_name_.c_str(),
kernel_name_.c_str());


if (kernel_type_ == ccKernelType::CUST_AI_CPU) { if (kernel_type_ == ccKernelType::CUST_AI_CPU) {
bool loaded = false; bool loaded = false;


+ 3
- 20
ge/graph/load/new_model_manager/task_info/kernel_task_info.h View File

@@ -128,6 +128,7 @@ class KernelTaskInfo : public TaskInfo {


Status SuperKernelDistribute(); Status SuperKernelDistribute();
bool IsL1FusionOp(const OpDescPtr &op_desc); bool IsL1FusionOp(const OpDescPtr &op_desc);
void SetIoAddrs(const OpDescPtr &op_desc);


// For super kernel // For super kernel
Status SaveSKTDumpInfo(); Status SaveSKTDumpInfo();
@@ -156,7 +157,8 @@ class KernelTaskInfo : public TaskInfo {
ccKernelType kernel_type_; ccKernelType kernel_type_;
uint32_t dump_flag_; uint32_t dump_flag_;
void *dump_args_; void *dump_args_;
OpDescPtr op_desc_;
OpDescPtr op_desc_; // Clear after distribute.
vector<void *> io_addrs_;
DavinciModel *davinci_model_; DavinciModel *davinci_model_;
uint32_t args_offset_ = 0; uint32_t args_offset_ = 0;
uint32_t hybrid_args_offset_ = 0; uint32_t hybrid_args_offset_ = 0;
@@ -186,25 +188,6 @@ class KernelTaskInfo : public TaskInfo {
void *output_addrs = nullptr; void *output_addrs = nullptr;
void *attr_handle = nullptr; void *attr_handle = nullptr;
} custom_info_; } custom_info_;

// For super kernel
static struct SuperKernelTaskInfo {
uint32_t last_block_dim;
uint32_t last_args_size;
uint32_t last_task_id;
uint32_t last_stream_id;
void *last_stream;
void *last_sm_desc;
std::vector<void *> kernel_list;
std::vector<void *> arg_list;
std::vector<uint32_t> dump_flag_list;
std::vector<OpDescPtr> op_desc_list;
std::vector<uintptr_t> dump_args_list;
uint32_t last_dump_flag;
int64_t last_group_key;
uintptr_t last_dump_args;
OpDescPtr last_op;
} skt_info_;
}; };
} // namespace ge } // namespace ge
#endif // GE_GRAPH_LOAD_NEW_MODEL_MANAGER_TASK_INFO_KERNEL_TASK_INFO_H_ #endif // GE_GRAPH_LOAD_NEW_MODEL_MANAGER_TASK_INFO_KERNEL_TASK_INFO_H_

+ 20
- 30
ge/graph/load/new_model_manager/task_info/memcpy_async_task_info.cc View File

@@ -30,14 +30,14 @@ Status MemcpyAsyncTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *da
return ret; return ret;
} }


memcpy_async_ = task_def.memcpy_async();
count_ = memcpy_async_.count();
kind_ = memcpy_async_.kind();
dst_max_ = memcpy_async_.dst_max();
OpDescPtr op_desc = davinci_model_->GetOpByIndex(memcpy_async_.op_index());
const domi::MemcpyAsyncDef &memcpy_async = task_def.memcpy_async();
count_ = memcpy_async.count();
kind_ = memcpy_async.kind();
dst_max_ = memcpy_async.dst_max();
OpDescPtr op_desc = davinci_model_->GetOpByIndex(memcpy_async.op_index());
op_desc_ = op_desc; op_desc_ = op_desc;
if (op_desc == nullptr) { if (op_desc == nullptr) {
GELOGE(INTERNAL_ERROR, "Task op index:%u out of range", memcpy_async_.op_index());
GELOGE(INTERNAL_ERROR, "Task op index:%u out of range", memcpy_async.op_index());
return INTERNAL_ERROR; return INTERNAL_ERROR;
} }


@@ -52,7 +52,7 @@ Status MemcpyAsyncTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *da
} }


const RuntimeParam &rts_param = davinci_model_->GetRuntimeParam(); const RuntimeParam &rts_param = davinci_model_->GetRuntimeParam();
ret = ModelUtils::GetRtAddress(rts_param, memcpy_async_.src(), src_);
ret = ModelUtils::GetRtAddress(rts_param, memcpy_async.src(), src_);
if (ret != SUCCESS) { if (ret != SUCCESS) {
return ret; return ret;
} }
@@ -61,23 +61,31 @@ Status MemcpyAsyncTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *da
vector<int64_t> memory_type_list; vector<int64_t> memory_type_list;
(void)AttrUtils::GetListInt(op_desc, ATTR_NAME_OUTPUT_MEM_TYPE_LIST, memory_type_list); (void)AttrUtils::GetListInt(op_desc, ATTR_NAME_OUTPUT_MEM_TYPE_LIST, memory_type_list);
if (!memory_type_list.empty() && memory_type_list[0] == RT_MEMORY_TS_4G) { // TS Feature, Just one. if (!memory_type_list.empty() && memory_type_list[0] == RT_MEMORY_TS_4G) { // TS Feature, Just one.
uint64_t mem_offset = memcpy_async_.dst() - rts_param.logic_mem_base;
dst_ = static_cast<uint8_t *>(rts_param.ts_mem_mall->Acquire(mem_offset, memcpy_async_.dst_max()));
uint64_t mem_offset = memcpy_async.dst() - rts_param.logic_mem_base;
dst_ = static_cast<uint8_t *>(rts_param.ts_mem_mall->Acquire(mem_offset, memcpy_async.dst_max()));
if (dst_ == nullptr) { if (dst_ == nullptr) {
return FAILED; return FAILED;
} }
} else { } else {
ret = ModelUtils::GetRtAddress(rts_param, memcpy_async_.dst(), dst_);
ret = ModelUtils::GetRtAddress(rts_param, memcpy_async.dst(), dst_);
if (ret != SUCCESS) { if (ret != SUCCESS) {
return ret; return ret;
} }
} }


GELOGI("MemcpyAsyncTaskInfo Init Success, logic[0x%lx, 0x%lx], src:%p, dst:%p, max:%lu, count:%lu", GELOGI("MemcpyAsyncTaskInfo Init Success, logic[0x%lx, 0x%lx], src:%p, dst:%p, max:%lu, count:%lu",
memcpy_async_.src(), memcpy_async_.dst(), src_, dst_, dst_max_, count_);
memcpy_async.src(), memcpy_async.dst(), src_, dst_, dst_max_, count_);


davinci_model_->DisableZeroCopy(src_); davinci_model_->DisableZeroCopy(src_);
davinci_model_->DisableZeroCopy(dst_); davinci_model_->DisableZeroCopy(dst_);

io_addrs_.emplace_back(reinterpret_cast<void *>(src_));
if (op_desc->HasAttr(ATTR_DYNAMIC_SHAPE_FIXED_ADDR)) {
void *fixed_addr = davinci_model_->GetCurrentFixedAddr(fixed_addr_offset_);
io_addrs_.emplace_back(fixed_addr);
} else {
io_addrs_.emplace_back(reinterpret_cast<void *>(dst_));
}
return SUCCESS; return SUCCESS;
} }


@@ -118,25 +126,7 @@ Status MemcpyAsyncTaskInfo::CalculateArgs(const domi::TaskDef &task_def, Davinci
Status MemcpyAsyncTaskInfo::UpdateArgs() { Status MemcpyAsyncTaskInfo::UpdateArgs() {
GELOGI("MemcpyAsyncTaskInfo::UpdateArgs in."); GELOGI("MemcpyAsyncTaskInfo::UpdateArgs in.");
GE_CHECK_NOTNULL(davinci_model_); GE_CHECK_NOTNULL(davinci_model_);
Status ret = ModelUtils::GetRtAddress(davinci_model_->GetRuntimeParam(), memcpy_async_.src(), src_);
if (ret != SUCCESS) {
return ret;
}

ret = ModelUtils::GetRtAddress(davinci_model_->GetRuntimeParam(), memcpy_async_.dst(), dst_);
if (ret != SUCCESS) {
return ret;
}

vector<void *> io_addrs;
io_addrs.emplace_back(reinterpret_cast<void *>(src_));
if (op_desc_->HasAttr(ATTR_DYNAMIC_SHAPE_FIXED_ADDR)) {
void *fixed_addr = davinci_model_->GetCurrentFixedAddr(fixed_addr_offset_);
io_addrs.emplace_back(fixed_addr);
} else {
io_addrs.emplace_back(reinterpret_cast<void *>(dst_));
}
davinci_model_->SetTotalIOAddrs(io_addrs);
davinci_model_->SetTotalIOAddrs(io_addrs_);


GELOGI("MemcpyAsyncTaskInfo::UpdateArgs success."); GELOGI("MemcpyAsyncTaskInfo::UpdateArgs success.");
return SUCCESS; return SUCCESS;


+ 1
- 2
ge/graph/load/new_model_manager/task_info/memcpy_async_task_info.h View File

@@ -44,11 +44,10 @@ class MemcpyAsyncTaskInfo : public TaskInfo {
uint8_t *src_; uint8_t *src_;
uint64_t count_; uint64_t count_;
uint32_t kind_; uint32_t kind_;
OpDescPtr op_desc_;
vector<void *> io_addrs_;
int64_t fixed_addr_offset_; int64_t fixed_addr_offset_;
DavinciModel *davinci_model_ = nullptr; DavinciModel *davinci_model_ = nullptr;
uint32_t args_offset_ = 0; uint32_t args_offset_ = 0;
domi::MemcpyAsyncDef memcpy_async_;
}; };
} // namespace ge } // namespace ge
#endif // GE_GRAPH_LOAD_NEW_MODEL_MANAGER_TASK_INFO_MEMCPY_ASYNC_TASK_INFO_H_ #endif // GE_GRAPH_LOAD_NEW_MODEL_MANAGER_TASK_INFO_MEMCPY_ASYNC_TASK_INFO_H_

Loading…
Cancel
Save