Browse Source

!1071 update commit id

From: @shenwei41
Reviewed-by: @liujunzhu,@lilongfei15
Signed-off-by: @lilongfei15
tags/v1.2.0
mindspore-ci-bot Gitee 3 years ago
parent
commit
ea0e2eadad
27 changed files with 709 additions and 230 deletions
  1. +4
    -2
      ge/CMakeLists.txt
  2. +1
    -0
      ge/ge_inference.mk
  3. +1
    -0
      ge/ge_runner.mk
  4. +6
    -1
      ge/generator/ge_generator.cc
  5. +21
    -0
      ge/graph/build/memory/graph_mem_assigner.cc
  6. +4
    -0
      ge/graph/build/memory/graph_mem_assigner.h
  7. +5
    -0
      ge/graph/build/memory/memory_assigner.cc
  8. +1
    -4
      ge/graph/build/memory/var_mem_assign_util.cc
  9. +59
    -66
      ge/graph/load/model_manager/davinci_model.cc
  10. +3
    -3
      ge/graph/load/model_manager/davinci_model.h
  11. +36
    -10
      ge/graph/load/model_manager/task_info/kernel_ex_task_info.cc
  12. +1
    -0
      ge/graph/load/model_manager/task_info/kernel_ex_task_info.h
  13. +24
    -29
      ge/graph/load/model_manager/task_info/kernel_task_info.cc
  14. +4
    -3
      ge/graph/manager/graph_manager.cc
  15. +411
    -0
      ge/graph/passes/hccl_continuous_memcpy_pass.cc
  16. +59
    -0
      ge/graph/passes/hccl_continuous_memcpy_pass.h
  17. +4
    -91
      ge/graph/passes/hccl_memcpy_pass.cc
  18. +0
    -4
      ge/graph/passes/hccl_memcpy_pass.h
  19. +3
    -0
      ge/graph/preprocess/graph_preprocess.cc
  20. +26
    -0
      ge/hybrid/node_executor/aicpu/aicpu_ext_info.cc
  21. +4
    -0
      ge/hybrid/node_executor/aicpu/aicpu_ext_info.h
  22. +1
    -0
      ge/hybrid/node_executor/aicpu/aicpu_node_executor.cc
  23. +4
    -2
      ge/single_op/task/op_task.cc
  24. +1
    -0
      tests/ut/ge/CMakeLists.txt
  25. +8
    -0
      tests/ut/ge/graph/load/kernel_ex_task_info_unittest.cc
  26. +3
    -0
      tests/ut/ge/graph/load/kernel_task_info_unittest.cc
  27. +15
    -15
      third_party/fwkacllib/inc/toolchain/slog.h

+ 4
- 2
ge/CMakeLists.txt View File

@@ -258,6 +258,7 @@ set(TRAIN_SRC_LIST
"graph/passes/get_original_format_pass.cc"
"graph/passes/guarantee_const_pass.cc"
"graph/passes/hccl_memcpy_pass.cc"
"graph/passes/hccl_continuous_memcpy_pass.cc"
"graph/passes/identity_pass.cc"
"graph/passes/ref_identity_delete_op_pass.cc"
"graph/passes/infershape_pass.cc"
@@ -595,6 +596,7 @@ set(INFER_SRC_LIST
"graph/passes/cast_remove_pass.cc"
"graph/passes/transpose_transdata_pass.cc"
"graph/passes/hccl_memcpy_pass.cc"
"graph/passes/hccl_continuous_memcpy_pass.cc"
"graph/passes/flow_ctrl_pass.cc"
"graph/passes/global_step_insert_pass.cc"
"graph/passes/link_gen_mask_nodes_pass.cc"
@@ -707,7 +709,7 @@ target_compile_options(ge_runner PRIVATE
-O2
-fno-common
$<$<STREQUAL:${CMAKE_CXX_COMPILER_VERSION},7.3.0>:-Werror=unused-variable>
$<$<STREQUAL:${CMAKE_CXX_COMPILER_VERSION},7.3.0>:-Werror=unused-const-variable>
$<$<STREQUAL:${CMAKE_CXX_COMPILER_VERSION},7.3.0>:-Werror=unused-const-variable -Werror=format>
)

target_include_directories(ge_runner SYSTEM PRIVATE
@@ -776,7 +778,7 @@ target_compile_options(ge_compiler PRIVATE
-O2
-fno-common
$<$<STREQUAL:${CMAKE_CXX_COMPILER_VERSION},7.3.0>:-Werror=unused-variable>
$<$<STREQUAL:${CMAKE_CXX_COMPILER_VERSION},7.3.0>:-Werror=unused-const-variable>
$<$<STREQUAL:${CMAKE_CXX_COMPILER_VERSION},7.3.0>:-Werror=unused-const-variable -Werror=format>
)

target_include_directories(ge_compiler SYSTEM PRIVATE


+ 1
- 0
ge/ge_inference.mk View File

@@ -212,6 +212,7 @@ OMG_HOST_SRC_FILES := \
graph/passes/cast_remove_pass.cc \
graph/passes/transpose_transdata_pass.cc \
graph/passes/hccl_memcpy_pass.cc \
graph/passes/hccl_continuous_memcpy_pass.cc \
graph/passes/flow_ctrl_pass.cc \
graph/passes/global_step_insert_pass.cc \
graph/passes/link_gen_mask_nodes_pass.cc \


+ 1
- 0
ge/ge_runner.mk View File

@@ -183,6 +183,7 @@ LIBGE_LOCAL_SRC_FILES := \
graph/passes/get_original_format_pass.cc \
graph/passes/guarantee_const_pass.cc \
graph/passes/hccl_memcpy_pass.cc \
graph/passes/hccl_continuous_memcpy_pass.cc \
graph/passes/identity_pass.cc \
graph/passes/ref_identity_delete_op_pass.cc \
graph/passes/infershape_pass.cc \


+ 6
- 1
ge/generator/ge_generator.cc View File

@@ -47,6 +47,7 @@ const char *const kEngineNameDefault = "default";
const char *const kVectorEngine = "VectorEngine";
const char *const kAIcoreEngine = "AIcoreEngine";
const char *const kFileNameSuffix = "online";
const char *const kAicpuAllshape = "_AllShape";
const size_t kDynamicDimSize = 1;
const int64_t kDynamicDimValue = -2;

@@ -721,8 +722,12 @@ Status GeGenerator::BuildSingleOp(OpDescPtr &op_desc, const vector<GeTensor> &in
GeModelPtr &ge_model = name_to_ge_model.begin()->second;
GELOGD("The opType in op_desc_tmp is [%s]", op_desc_tmp->GetType().c_str());

bool all_shape = false;
bool dynamic_flag = false;
if (CheckShapeReset(op_desc, dynamic_flag) == SUCCESS && dynamic_flag) {
(void)AttrUtils::GetBool(op_desc, kAicpuAllshape, all_shape);
CheckShapeReset(op_desc, dynamic_flag);
if (dynamic_flag || all_shape) {
GELOGD("Get aicpu all_shape kernel!");
vector<GeTensor> inputs_dynamic;
vector<GeTensor> outputs_dynamic;
GE_CHK_STATUS_RET_NOLOG(ResetTensorVecShape(inputs, inputs_dynamic));


+ 21
- 0
ge/graph/build/memory/graph_mem_assigner.cc View File

@@ -88,6 +88,14 @@ Status VariableMemoryAssigner::AssignVarAttr2Nodes() {
return ge::SUCCESS;
}

Status VariableMemoryAssigner::AssignMemory2HasRefAttrNode() {
Status result = ge::VarMemAssignUtil::AssignMemory2HasRefAttrNode(compute_graph_);
if (result != ge::SUCCESS) {
return result;
}
return ge::SUCCESS;
}

Status GraphMemoryAssigner::AssignMemory() {
ge::HybridMemAssignerPtr mem_assigner(new(std::nothrow) HybridMemAssigner(compute_graph_));
if (mem_assigner->Assign() != ge::SUCCESS) {
@@ -135,6 +143,19 @@ ge::Status GraphMemoryAssigner::AssignVarAttr2Nodes() {
return ge::SUCCESS;
}

ge::Status GraphMemoryAssigner::AssignMemory2HasRefAttrNode() {
auto variable_assigner =
std::unique_ptr<ge::VariableMemoryAssigner>(new(std::nothrow) ge::VariableMemoryAssigner(compute_graph_));
if (variable_assigner == nullptr) {
GELOGE(ge::FAILED, "Alloc VariableMemoryAssigner failed.");
return ge::FAILED;
}
if (variable_assigner->AssignMemory2HasRefAttrNode() != ge::SUCCESS) {
return ge::FAILED;
}
return ge::SUCCESS;
}

ge::Status CalculateTensorRealSizeAndOutSize(const ge::ConstGeTensorDescPtr &output_desc,
int64_t dim_index, int64_t &output_mem_size,
int64_t &batch_dim_num, int64_t &out_size) {


+ 4
- 0
ge/graph/build/memory/graph_mem_assigner.h View File

@@ -63,6 +63,8 @@ class VariableMemoryAssigner {
///
ge::Status AssignVarAttr2Nodes();

ge::Status AssignMemory2HasRefAttrNode();

private:
ge::ComputeGraphPtr compute_graph_;
};
@@ -99,6 +101,8 @@ class GraphMemoryAssigner {
///
ge::Status AssignVarAttr2Nodes();

ge::Status AssignMemory2HasRefAttrNode();

ge::Status ReAssignMemory(bool is_loop_graph, map<int64_t, size_t> &mem_type_to_offset);

ge::Status AssignZeroCopyMemory(map<int64_t, size_t> &mem_offset, size_t &zero_mem_copy_size);


+ 5
- 0
ge/graph/build/memory/memory_assigner.cc View File

@@ -40,6 +40,11 @@ Status MemoryAssigner::AssignMemory(bool is_loop_graph, map<int64_t, size_t> &me
return ge::FAILED;
}

if (graph_mem_assigner.AssignMemory2HasRefAttrNode() != ge::SUCCESS) {
GELOGE(ge::FAILED, "Assign memory to node which has ref attr failed!");
return ge::FAILED;
}

// Assign memory for reference
if (graph_mem_assigner.AssignReferenceMemory() != ge::SUCCESS) {
GELOGE(ge::FAILED, "Assign reference memory failed!");


+ 1
- 4
ge/graph/build/memory/var_mem_assign_util.cc View File

@@ -33,10 +33,7 @@ using std::vector;

namespace ge {
Status VarMemAssignUtil::AssignVarMemory(ge::ComputeGraphPtr &compute_graph) {
GE_CHK_STATUS_RET(AssignMemory2VariableNode(compute_graph));
GE_CHK_STATUS_RET(AssignMemory2HasRefAttrNode(compute_graph));

return SUCCESS;
return AssignMemory2VariableNode(compute_graph);
}

Status VarMemAssignUtil::AssignConstantOpMemory(ge::ComputeGraphPtr &compute_graph) {


+ 59
- 66
ge/graph/load/model_manager/davinci_model.cc View File

@@ -446,23 +446,20 @@ void DavinciModel::InitRuntimeParams() {
runtime_param_.mem_size, runtime_param_.weight_size, runtime_param_.var_size);
}

void DavinciModel::CheckHasHcomOp() {
Graph graph = ge_model_->GetGraph();
auto compute_graph = GraphUtils::GetComputeGraph(graph);
if (compute_graph == nullptr) {
return;
}
void DavinciModel::CheckHasHcomOp(const ComputeGraphPtr &compute_graph) {
const set<string> hcom_opp_types({
HCOMBROADCAST, HCOMALLGATHER, HCOMALLREDUCE, HCOMSEND, HCOMRECEIVE, HCOMREDUCESCATTER,
HVDCALLBACKALLREDUCE, HVDCALLBACKALLGATHER, HVDCALLBACKBROADCAST, HVDWAIT, HCOMREDUCE
});
for (const auto &node : compute_graph->GetAllNodes()) {
OpDescPtr op_desc = node->GetOpDesc();
GE_IF_BOOL_EXEC(op_desc == nullptr, GELOGW("Node OpDesc is nullptr"); continue);
GE_IF_BOOL_EXEC(((op_desc->GetType() == HCOMBROADCAST) || (op_desc->GetType() == HCOMALLGATHER) ||
(op_desc->GetType() == HCOMALLREDUCE) || (op_desc->GetType() == HCOMSEND) ||
(op_desc->GetType() == HCOMRECEIVE) || (op_desc->GetType() == HCOMREDUCESCATTER) ||
(op_desc->GetType() == HVDCALLBACKALLREDUCE) || (op_desc->GetType() == HVDCALLBACKALLGATHER) ||
(op_desc->GetType() == HVDCALLBACKBROADCAST) || (op_desc->GetType() == HVDWAIT) ||
(op_desc->GetType() == HCOMREDUCE)),
uint32_t stream_id = static_cast<uint32_t>(op_desc->GetStreamId());
(void)hcom_streams_.emplace(stream_id); GELOGD("hcom stream: %u.", stream_id); continue);
if (hcom_opp_types.count(op_desc->GetType()) > 0) {
uint32_t stream_id = static_cast<uint32_t>(op_desc->GetStreamId());
hcom_streams_.emplace(stream_id);
GELOGD("hcom stream: %u.", stream_id);
}
}
}

@@ -642,7 +639,7 @@ Status DavinciModel::Init(void *dev_ptr, size_t mem_size, void *weight_ptr, size
name_ = ge_model_->GetName();
(void)ge::AttrUtils::GetBool(ge_model_, ATTR_NAME_SWITCH_FOR_L1_FUSION, is_l1_fusion_enable_);
GELOGD("The value of ge.l1Fusion in ge_model is %d.", is_l1_fusion_enable_);
CheckHasHcomOp();
CheckHasHcomOp(compute_graph);

vector<int64_t> huge_stream_list;
(void)ge::AttrUtils::GetListInt(ge_model_, ATTR_MODEL_HUGE_STREAM_LIST, huge_stream_list);
@@ -1028,7 +1025,7 @@ Status DavinciModel::GenInputOutputInfo(const map<uint32_t, OpDescPtr> &data_by_
const vector<OpDescPtr> &output_op_list) {
GELOGD("Data node size: %zu, NetOutput node size: %zu", data_by_index.size(), output_op_list.size());
for (auto &item : data_by_index) {
auto output_addrs = ModelUtils::GetOutputDataAddrs(runtime_param_, item.second);
const auto output_addrs = ModelUtils::GetOutputDataAddrs(runtime_param_, item.second);
GELOGD("Data node: %s, output addr size: %zu", item.second->GetName().c_str(), output_addrs.size());
input_addrs_list_.emplace_back(output_addrs);

@@ -1036,14 +1033,18 @@ Status DavinciModel::GenInputOutputInfo(const map<uint32_t, OpDescPtr> &data_by_
GE_CHK_STATUS_RET(InitAippType(item.first, item.second, data_by_index), "Init AIPP Type failed");
GE_CHK_STATUS_RET(InitOrigInputInfo(item.first, item.second), "Init Orig input failed");
GE_CHK_STATUS_RET(InitAippInputOutputDims(item.first, item.second), "Init AIPP dims failed");
GE_CHK_STATUS_RET(InitInputDescInfo(item.second), "Init input desc info failed");
if (item.second->GetType() == AIPP_DATA_TYPE) {
GELOGI("This is dynamic aipp model, Node: %s", item.second->GetName().c_str());
is_dynamic_aipp_ = true;
}
}

vector<string> out_node_name;
(void)AttrUtils::GetListStr(ge_model_, ATTR_MODEL_OUT_NODES_NAME, out_node_name);
GELOGD("Output node size: %zu, out nodes name: %zu", output_op_list.size(), out_node_name.size());
for (const auto &op_desc : output_op_list) {
auto input_addrs = ModelUtils::GetInputDataAddrs(runtime_param_, op_desc);
const auto input_addrs = ModelUtils::GetInputDataAddrs(runtime_param_, op_desc);
GELOGD("NetOutput node: %s, input addr size: %zu", op_desc->GetName().c_str(), input_addrs.size());
output_addrs_list_.emplace_back(input_addrs);

@@ -1061,10 +1062,11 @@ Status DavinciModel::GenInputOutputInfo(const map<uint32_t, OpDescPtr> &data_by_
if (InitOutputTensorInfo(op_desc) != SUCCESS) {
return INTERNAL_ERROR;
}

GE_CHK_STATUS_RET(InitOutputDescInfo(op_desc, out_node_name), "Init output desc info failed");
}

GE_CHK_STATUS_RET(InitInputDescInfo(data_by_index), "Init input desc info failed");
return InitOutputDescInfo(output_op_list);
return SUCCESS;
}

bool DavinciModel::IsGetNextSinkDynamic(const OpDescPtr &op_desc) {
@@ -1980,27 +1982,24 @@ void DavinciModel::CreateInputDimsInfo(const OpDescPtr &op_desc, Format format,
}
}

Status DavinciModel::InitInputDescInfo(const map<uint32_t, OpDescPtr> &data_by_index) {
for (const auto &item : data_by_index) {
const auto op_desc = item.second;
GE_CHECK_NOTNULL(op_desc->GetInputDescPtr(0));
Status DavinciModel::InitInputDescInfo(const OpDescPtr &op_desc) {
GE_CHECK_NOTNULL(op_desc->GetInputDescPtr(0));

InputOutputDescInfo input;
ShapeDescription dims_info;
Format format = op_desc->GetInputDescPtr(0)->GetFormat();
CreateInputDimsInfo(op_desc, format, input.shape_info, dims_info);
InputOutputDescInfo input;
ShapeDescription dims_info;
Format format = op_desc->GetInputDescPtr(0)->GetFormat();
CreateInputDimsInfo(op_desc, format, input.shape_info, dims_info);

input.data_type = op_desc->GetInputDescPtr(0)->GetDataType();
input.name = op_desc->GetName();
int64_t input_size = 0;
GE_CHK_STATUS_RET(TensorUtils::GetSize(*op_desc->GetInputDescPtr(0), input_size), "get input size failed.");
input.size = input_size;
input_formats_.push_back(format);
input_descs_.push_back(input);
input.data_type = op_desc->GetInputDescPtr(0)->GetDataType();
input.name = op_desc->GetName();
int64_t input_size = 0;
GE_CHK_STATUS_RET(TensorUtils::GetSize(*op_desc->GetInputDescPtr(0), input_size), "get input size failed.");
input.size = input_size;
input_formats_.push_back(format);
input_descs_.push_back(input);

input.shape_info = dims_info;
input_descs_dims_.push_back(input);
}
input.shape_info = dims_info;
input_descs_dims_.push_back(input);
return SUCCESS;
}

@@ -2066,37 +2065,31 @@ void DavinciModel::CreateOutput(uint32_t index, const OpDescPtr &op_desc, InputO
output.data_type = op_desc->GetInputDescPtr(index)->GetDataType();
}

Status DavinciModel::InitOutputDescInfo(const vector<OpDescPtr> &output_op_list) {
GELOGD("Output node size: %zu", output_op_list.size());
vector<string> out_node_name;
(void)ge::AttrUtils::GetListStr(ge_model_, ATTR_MODEL_OUT_NODES_NAME, out_node_name);
for (const auto &op_desc : output_op_list) {
uint32_t out_size = static_cast<uint32_t>(op_desc->GetInputsSize());
for (uint32_t index = 0; index < out_size; index++) {
string output_name;
InputOutputDescInfo output;
uint32_t format_result;
CreateOutput(index, op_desc, output, format_result);

std::vector<std::string> src_name = op_desc->GetSrcName();
std::vector<int64_t> src_index = op_desc->GetSrcIndex();
GE_CHK_BOOL_RET_STATUS(src_name.size() > index && src_index.size() > index, INTERNAL_ERROR,
"construct output_name failed.");
// forward compatbility, if old om has no out_node_name, need to return output follow origin way
if (out_size == out_node_name.size()) {
// neweast plan, the index will add to name during generate model.
bool contains_colon = out_node_name[index].find(":") != std::string::npos;
output_name =
contains_colon ? out_node_name[index] : out_node_name[index] + ":" + std::to_string(src_index[index]);
} else {
output_name = std::string("output_") + std::to_string(index) + "_" + src_name[index] + "_" +
std::to_string(src_index[index]);
}
output.name = output_name;
output_descs_.push_back(output);
output_formats_.push_back(format_result);
Status DavinciModel::InitOutputDescInfo(const OpDescPtr &op_desc, const vector<string> &out_node_name) {
uint32_t out_size = static_cast<uint32_t>(op_desc->GetInputsSize());
for (uint32_t i = 0; i < out_size; ++i) {
string output_name;
InputOutputDescInfo output;
uint32_t format_result;
CreateOutput(i, op_desc, output, format_result);

std::vector<std::string> src_name = op_desc->GetSrcName();
std::vector<int64_t> src_index = op_desc->GetSrcIndex();
GE_CHK_BOOL_RET_STATUS(src_name.size() > i && src_index.size() > i, INTERNAL_ERROR,
"construct output_name failed.");
// forward compatbility, if old om has no out_node_name, need to return output follow origin way
if (out_size == out_node_name.size()) {
// neweast plan, the index will add to name during generate model.
bool contains_colon = out_node_name[i].find(":") != std::string::npos;
output_name = contains_colon ? out_node_name[i] : out_node_name[i] + ":" + std::to_string(src_index[i]);
} else {
output_name = string("output_") + std::to_string(i) + "_" + src_name[i] + "_" + std::to_string(src_index[i]);
}
output.name = output_name;
output_descs_.push_back(output);
output_formats_.push_back(format_result);
}

return SUCCESS;
}



+ 3
- 3
ge/graph/load/model_manager/davinci_model.h View File

@@ -831,7 +831,7 @@ class DavinciModel {

void OpDebugUnRegister();

void CheckHasHcomOp();
void CheckHasHcomOp(const ComputeGraphPtr &graph);

Status DoTaskSink();

@@ -854,8 +854,8 @@ class DavinciModel {
Status InitOutputTensorInfo(const OpDescPtr &op_desc);
Status GenOutputTensorInfo(OutputData *output_data, vector<OutputTensorInfo> &outputs);

Status InitInputDescInfo(const map<uint32_t, OpDescPtr> &data_by_index);
Status InitOutputDescInfo(const vector<OpDescPtr> &output_op_list);
Status InitInputDescInfo(const OpDescPtr &op_desc);
Status InitOutputDescInfo(const OpDescPtr &op_desc, const vector<string> &out_node_name);

Status InitOrigInputInfo(uint32_t index, const OpDescPtr &op_desc);
Status InitAippInfo(uint32_t index, const OpDescPtr &op_desc);


+ 36
- 10
ge/graph/load/model_manager/task_info/kernel_ex_task_info.cc View File

@@ -26,8 +26,42 @@
#include "graph/attr_value.h"
#include "graph/load/model_manager/davinci_model.h"
#include "graph/load/model_manager/model_manager.h"
#include "hybrid/node_executor/aicpu/aicpu_ext_info.h"
#include "framework/common/debug/log.h"

namespace ge {
Status KernelExTaskInfo::InitTaskExtInfo(const std::string &ext_info, const OpDescPtr &op_desc) {
if (ext_info.empty()) {
return SUCCESS;
}
int32_t unknown_shape_type_val = 0;
(void) AttrUtils::GetInt(op_desc, ::ge::ATTR_NAME_UNKNOWN_SHAPE_TYPE, unknown_shape_type_val);
UnknowShapeOpType unknown_type = static_cast<UnknowShapeOpType>(unknown_shape_type_val);
uint32_t num_inputs = op_desc->GetInputsSize();
uint32_t num_outputs = op_desc->GetOutputsSize();
std::unique_ptr<ge::hybrid::AicpuExtInfoHandler> ext_handle(
new(std::nothrow) ::ge::hybrid::AicpuExtInfoHandler(op_desc->GetName(),
num_inputs,
num_outputs,
unknown_type));
GE_CHK_BOOL_RET_STATUS(ext_handle != nullptr, FAILED, "Malloc aicpu_ext_handle mem failed!");
GE_CHK_STATUS_RET(ext_handle->Parse(ext_info),
"Parse kernel ext info failed, kernel_ext_info_size=%zu.", ext_info.size());
GE_CHK_STATUS_RET(ext_handle->UpdateExecuteMode(true), "UpdateExecuteMode failed.");
GELOGD("Update aicpu_task ext_info bit_map execute mode to 1.");

auto rt_ret = rtMalloc(&ext_info_addr_, ext_handle->GetExtInfoLen(), RT_MEMORY_HBM);
GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE,
GELOGE(RT_FAILED, "rtMalloc ext_info error: 0x%X, size=%zu", rt_ret, ext_info.size());
return RT_ERROR_TO_GE_STATUS(rt_ret);)
rt_ret = rtMemcpy(ext_info_addr_, ext_handle->GetExtInfoLen(), ext_handle->GetExtInfo(),
ext_handle->GetExtInfoLen(), RT_MEMCPY_HOST_TO_DEVICE);
GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE,
GELOGE(RT_FAILED, "rtMemcpy ext_info error: 0x%X, size=%zu", rt_ret, ext_info.size());
return RT_ERROR_TO_GE_STATUS(rt_ret);)
return SUCCESS;
}

Status KernelExTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davinci_model) {
GELOGI("KernelExTaskInfo Init Start.");
GE_CHECK_NOTNULL(davinci_model);
@@ -63,16 +97,8 @@ Status KernelExTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davin
}

const auto &ext_info = kernel_ex_def.kernel_ext_info();
if (!ext_info.empty()) {
auto rt_ret = rtMalloc(&ext_info_addr_, ext_info.size(), RT_MEMORY_HBM);
GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE,
GELOGE(RT_FAILED, "rtMalloc ext_info error: 0x%X, size=%zu", rt_ret, ext_info.size());
return RT_ERROR_TO_GE_STATUS(rt_ret);)
rt_ret = rtMemcpy(ext_info_addr_, ext_info.size(), ext_info.c_str(), ext_info.size(), RT_MEMCPY_HOST_TO_DEVICE);
GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE,
GELOGE(RT_FAILED, "rtMemcpy ext_info error: 0x%X, size=%zu", rt_ret, ext_info.size());
return RT_ERROR_TO_GE_STATUS(rt_ret);)
}
GE_CHK_STATUS_RET(InitTaskExtInfo(ext_info, op_desc),
"Init aicpu tf_task ext info failed, ext_info size=%zu", ext_info.size());

GELOGI("Node[%s] type[%s] kernel_ext_info size=%zu, ext_info_addr_=%p", op_desc->GetName().c_str(),
op_desc->GetType().c_str(), ext_info.size(), ext_info_addr_);


+ 1
- 0
ge/graph/load/model_manager/task_info/kernel_ex_task_info.h View File

@@ -62,6 +62,7 @@ class KernelExTaskInfo : public TaskInfo {
void SetIoAddrs(const OpDescPtr &op_desc);

void InitDumpTask(void *addr, const OpDescPtr &op_desc);
Status InitTaskExtInfo(const std::string &ext_info, const OpDescPtr &op_desc);

uint32_t task_id_;
uint32_t stream_id_;


+ 24
- 29
ge/graph/load/model_manager/task_info/kernel_task_info.cc View File

@@ -32,6 +32,8 @@
#include "super_kernel/super_kernel.h"
#include "super_kernel/super_kernel_factory.h"
#include "cce/aicpu_engine_struct.h"
#include "hybrid/node_executor/aicpu/aicpu_ext_info.h"
#include "framework/common/debug/log.h"

namespace {
const uint8_t kL2LoadToDdr = 1;
@@ -964,39 +966,32 @@ Status KernelTaskInfo::InitAicpuTaskExtInfo(const std::string &ext_info) {
return SUCCESS;
}

std::unique_ptr<uint8_t[]> copy_ext_info;
copy_ext_info.reset(new(std::nothrow)uint8_t[ext_info.size()]);
GE_CHECK_NOTNULL(copy_ext_info);
auto sec_ret = memcpy_s(copy_ext_info.get(), ext_info.size(), ext_info.c_str(), ext_info.size());
if (sec_ret != EOK) {
GELOGE(FAILED, "memcpy failed, ret: %d", sec_ret);
return FAILED;
}

auto ext_info_data = copy_ext_info.get();
size_t offset = 0;
while (offset + sizeof(aicpu::FWKAdapter::ExtInfo) <= ext_info.size()) {
auto aicpu_ext_info = reinterpret_cast<aicpu::FWKAdapter::ExtInfo *>(ext_info_data + offset);
GELOGD("Ext infoType=%d, infoLen=%u.", aicpu_ext_info->infoType, aicpu_ext_info->infoLen);
if (aicpu_ext_info->infoType == aicpu::FWKAdapter::FWK_ADPT_EXT_SESSION_INFO) {
GE_CHK_BOOL_RET_STATUS(aicpu_ext_info->infoLen == sizeof(SessionInfo), PARAM_INVALID,
"Parse ext session info failed as infoLen must be %zu but %u.",
sizeof(SessionInfo), aicpu_ext_info->infoLen);
SessionInfo *session_info = reinterpret_cast<SessionInfo *>(aicpu_ext_info->infoMsg);
session_info->sessionId = davinci_model_->GetSessionId();
session_info->sessFlag = true;
GELOGD("Update aicpu_task ext_info session_info session_id is %lu", session_info->sessionId);
}
offset += sizeof(aicpu::FWKAdapter::ExtInfo);
offset += aicpu_ext_info->infoLen;
}

auto rt_ret = rtMalloc(&aicpu_ext_info_addr_, ext_info.size(), RT_MEMORY_HBM);
int32_t unknown_shape_type_val = 0;
(void) AttrUtils::GetInt(op_desc_, ::ge::ATTR_NAME_UNKNOWN_SHAPE_TYPE, unknown_shape_type_val);
UnknowShapeOpType unknown_type = static_cast<UnknowShapeOpType>(unknown_shape_type_val);
uint32_t num_inputs = op_desc_->GetInputsSize();
uint32_t num_outputs = op_desc_->GetOutputsSize();
std::unique_ptr<ge::hybrid::AicpuExtInfoHandler> ext_handle(
new(std::nothrow) ::ge::hybrid::AicpuExtInfoHandler(op_desc_->GetName(),
num_inputs,
num_outputs,
unknown_type));
GE_CHK_BOOL_RET_STATUS(ext_handle != nullptr, FAILED, "Malloc aicpu_ext_handle mem failed!");
GE_CHK_STATUS_RET(ext_handle->Parse(ext_info),
"Parse kernel ext info failed, kernel_ext_info_size=%zu.", ext_info.size());
GE_CHK_STATUS_RET(ext_handle->UpdateSessionInfoSessionId(davinci_model_->GetSessionId()),
"Update session info session id failed.");
GELOGD("Update aicpu_task ext_info session_info session_id is %lu", davinci_model_->GetSessionId());
GE_CHK_STATUS_RET(ext_handle->UpdateExecuteMode(true), "UpdateExecuteMode failed.");
GELOGD("Update aicpu_task ext_info bit_map execute mode to 1.");

auto rt_ret = rtMalloc(&aicpu_ext_info_addr_, ext_handle->GetExtInfoLen(), RT_MEMORY_HBM);
if (rt_ret != RT_ERROR_NONE) {
GELOGE(RT_FAILED, "rtMalloc ext_info error: 0x%X, size=%zu", rt_ret, ext_info.size());
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
rt_ret = rtMemcpy(aicpu_ext_info_addr_, ext_info.size(), ext_info_data, ext_info.size(), RT_MEMCPY_HOST_TO_DEVICE);
rt_ret = rtMemcpy(aicpu_ext_info_addr_, ext_handle->GetExtInfoLen(), ext_handle->GetExtInfo(),
ext_handle->GetExtInfoLen(), RT_MEMCPY_HOST_TO_DEVICE);
if (rt_ret != RT_ERROR_NONE) {
GELOGE(RT_FAILED, "rtMemcpy ext_info error: 0x%X, size=%zu", rt_ret, ext_info.size());
return RT_ERROR_TO_GE_STATUS(rt_ret);


+ 4
- 3
ge/graph/manager/graph_manager.cc View File

@@ -92,7 +92,7 @@
#include "graph/passes/unused_args_clean_pass.h"
#include "graph/passes/global_step_insert_pass.h"
#include "graph/passes/memcpy_addr_async_pass.h"
#include "graph/passes/hccl_memcpy_pass.h"
#include "graph/passes/hccl_continuous_memcpy_pass.h"
#include "graph/build/label_allocator.h"
#include "graph/utils/tensor_adapter.h"
#include "inc/pass_manager.h"
@@ -2151,8 +2151,6 @@ Status GraphManager::OptimizeStage1(ge::ComputeGraphPtr &compute_graph) {
new (std::nothrow) TransOpWithoutReshapeFusionPass))
GE_CHK_STATUS_RET(after_merge_passes.AddPass("OptimizeStage1_1::TransOpBreadthFusionPass",
new (std::nothrow) TransOpBreadthFusionPass))
GE_CHK_STATUS_RET(
after_merge_passes.AddPass("OptimizeStage1_1::HcclMemcpyPass", new (std::nothrow) HcclMemcpyPass));

GE_TIMESTAMP_START(after_merge_passes);
auto ret = after_merge_passes.Run(compute_graph);
@@ -2268,6 +2266,9 @@ Status GraphManager::OptimizeStage2(ge::ComputeGraphPtr &compute_graph) {
GE_CHK_STATUS_RET(after_merge_passes.AddPass("OptimizeStage2::AfterMergePasses::LinkGenMaskNodesPass",
new (std::nothrow)
LinkGenMaskNodesPass(options_.stream_max_parallel_num)));
GE_CHK_STATUS_RET(
after_merge_passes.AddPass("OptimizeStage2::HcclContinuousMemcpyPass",
new (std::nothrow) HcclContinuousMemcpyPass));

GE_TIMESTAMP_START(after_merge_passes);
auto ret = after_merge_passes.Run(compute_graph);


+ 411
- 0
ge/graph/passes/hccl_continuous_memcpy_pass.cc View File

@@ -0,0 +1,411 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "graph/passes/hccl_continuous_memcpy_pass.h"

#include <string>

#include "common/debug/log.h"
#include "framework/common/debug/ge_log.h"
#include "common/ge_inner_error_codes.h"
#include "common/ge/ge_util.h"
#include "framework/common/types.h"
#include "graph/utils/graph_utils.h"

namespace {
const int kAnchorNum = 0;
const int32_t kAnchorAssignRefIndex = 0;
const int32_t kAnchorAssignValueIndex = 1;
} // namespace
namespace ge {
Status HcclContinuousMemcpyPass::Run(ge::ComputeGraphPtr graph) {
GE_CHECK_NOTNULL(graph);
for (const auto &node : graph->GetDirectNode()) {
auto op_desc = node->GetOpDesc();
if (op_desc == nullptr) {
GELOGE(INTERNAL_ERROR, "node has no op_desc, node_name : %s.", node->GetName().c_str());
return INTERNAL_ERROR;
}

Status ret = ContinuousInputProcess(graph, node);
if (ret != SUCCESS) {
GELOGE(INTERNAL_ERROR, "failed ProcessBroadcastMemcpy, node_name:%s.", node->GetName().c_str());
return ret;
}

ret = P2pmemInputProcess(graph, node);
if (ret != SUCCESS) {
GELOGE(INTERNAL_ERROR, "failed P2pmemInputProcess, node_name:%s.", node->GetName().c_str());
return ret;
}

}
return SUCCESS;
}

// If broadcast input size is bigger than 1, and input from variable,
// cause by broadcast input memory should be continuous,
// another featuremap mem will be allocated for broadcast input.
// In this condition, move data from variable mem to broadcast input featuremap mem will be executed each step.
// In order to avoid move action out of model, use memcpy node instead of move action code.
Status HcclContinuousMemcpyPass::ContinuousInputProcess(const ComputeGraphPtr &graph, const NodePtr node) {
auto op_desc = node->GetOpDesc();

bool is_input_continuous = false;
(void)ge::AttrUtils::GetBool(op_desc, ATTR_NAME_CONTINUOUS_INPUT, is_input_continuous);

if (is_input_continuous && op_desc->GetInputsSize() > 1) {
GELOGI("continuous input op is:%s.", op_desc->GetName().c_str());
// if input size bigger than one, insert memcpy between var data for support continous mem alloc
for (auto &hccl_in_anchor : node->GetAllInDataAnchors()) {
if (hccl_in_anchor == nullptr) {
continue;
}
auto src_out_anchor = hccl_in_anchor->GetPeerOutAnchor();
if (src_out_anchor == nullptr) {
GELOGE(INTERNAL_ERROR, "hcom op input has no peer anchor, node_name:%s", node->GetName().c_str());
return INTERNAL_ERROR;
}

if (IsDataNode(src_out_anchor->GetOwnerNode()->GetType())) {
Status ret = ModifyEdgeConnection(graph, src_out_anchor, hccl_in_anchor);
if (ret != SUCCESS) {
GELOGE(INTERNAL_ERROR, "Failed to modify the connection.");
return ret;
}
}
}
}
return SUCCESS;
}

// if input is var type, and node input need p2p mem, then memcpy should be insert between the two
Status HcclContinuousMemcpyPass::P2pmemInputProcess(const ComputeGraphPtr &graph, const NodePtr node) {
auto op_desc = node->GetOpDesc();

vector<int64_t> input_memory_types;
(void) ge::AttrUtils::GetListInt(op_desc, ATTR_NAME_INPUT_MEM_TYPE_LIST, input_memory_types);

if (input_memory_types.empty()) {
return SUCCESS;
}

for (uint32_t index = 0; index < input_memory_types.size() && index < op_desc->GetInputsSize(); index++) {
if (input_memory_types[index] != RT_MEMORY_P2P_DDR) {
continue;
}

GELOGD("p2p input op is:%s.", op_desc->GetName().c_str());
auto hccl_in_anchor = node->GetInDataAnchor(index);
if (hccl_in_anchor == nullptr) {
continue;
}
auto src_out_anchor = hccl_in_anchor->GetPeerOutAnchor();
if (src_out_anchor == nullptr) {
GELOGE(INTERNAL_ERROR, "hcom op input has no peer anchor, node_name:%s", node->GetName().c_str());
return INTERNAL_ERROR;
}

if (IsDataNode(src_out_anchor->GetOwnerNode()->GetType())) {
Status ret = ModifyEdgeConnection(graph, src_out_anchor, hccl_in_anchor);
if (ret != SUCCESS) {
GELOGE(INTERNAL_ERROR, "Failed to modify the connection.");
return ret;
}
}
}
return SUCCESS;
}

bool HcclContinuousMemcpyPass::IsDataNode(const std::string& node_type) {
return (node_type == CONSTANTOP) || (node_type == VARIABLE) || (node_type == DATA) || (node_type == CONSTANT);
}

///
/// @brief Add Identity Node
/// @param [in] ge::ComputeGraphPtr graph
/// @param [in] ge::OutDataAnchorPtr in_node
/// @return ge::NodePtr
///
NodePtr HcclContinuousMemcpyPass::CreateIdentityNode(const ComputeGraphPtr &graph, const OutDataAnchorPtr &out_data_anchor) {
GE_CHECK_NOTNULL_EXEC(graph, return nullptr);
NodePtr pre_node = out_data_anchor->GetOwnerNode();
OpDescPtr pre_op_desc = pre_node->GetOpDesc();
if (pre_op_desc == nullptr) {
GELOGE(INTERNAL_ERROR, "OpDesc of pre node is invalid.");
return nullptr;
}

std::string node_name = pre_node->GetName() + "_" + IDENTITY;
node_name = CheckDuplicateName(node_name);
OpDescPtr op_desc = MakeShared<OpDesc>(node_name.c_str(), IDENTITY);
if (op_desc == nullptr) {
GELOGE(INTERNAL_ERROR, "Create Identity op: MakeShared op_desc fail.");
return nullptr;
}
GELOGI("Create Identity op:%s.", op_desc->GetName().c_str());

graphStatus ret = op_desc->AddInputDesc("x", pre_op_desc->GetOutputDesc(out_data_anchor->GetIdx()));
if (ret != GRAPH_SUCCESS) {
GELOGE(INTERNAL_ERROR, "Create Identity op: add input desc fail.");
return nullptr;
}

ret = op_desc->AddOutputDesc("y", pre_op_desc->GetOutputDesc(out_data_anchor->GetIdx()));
if (ret != GRAPH_SUCCESS) {
GELOGE(INTERNAL_ERROR, "Create Identity op: add output desc fail.");
return nullptr;
}
// because history reason ,this pass can not do work after constant fold so mark it
(void)AttrUtils::SetBool(op_desc, ATTR_NO_NEED_CONSTANT_FOLDING, false);

NodePtr memcpy_node = graph->AddNode(op_desc);
if (memcpy_node == nullptr) {
GELOGE(INTERNAL_ERROR, "Insert Identity node fail.");
return nullptr;
}

return memcpy_node;
}

///
/// @brief Check duplicate node_name
/// @param [in] std::string& node_name
/// @return std::string
///
std::string HcclContinuousMemcpyPass::CheckDuplicateName(const std::string &node_name) {
std::string tmp_name = node_name;
auto iter = node_num_map_.find(tmp_name);
if (iter != node_num_map_.end()) {
tmp_name = tmp_name + "_" + std::to_string(iter->second);
(iter->second)++;
} else {
node_num_map_[tmp_name] = 1;
}
return tmp_name;
}

///
/// @brief Modify edge connection
/// @param [in] ComputeGraphPtr graph
/// @param [in] OutDataAnchorPtr src_out_anchor
/// @param [in] InDataAnchorPtr hccl_in_anchor
/// @return status
///
Status HcclContinuousMemcpyPass::ModifyEdgeConnection(const ComputeGraphPtr &graph, const OutDataAnchorPtr &src_out_anchor,
const InDataAnchorPtr &hccl_in_anchor) {
GE_CHECK_NOTNULL(src_out_anchor->GetOwnerNode());
GE_CHECK_NOTNULL(hccl_in_anchor->GetOwnerNode());

Status ret = InsertIdentityBeforeHccl(graph, src_out_anchor, hccl_in_anchor);
if (ret != SUCCESS) {
GELOGE(INTERNAL_ERROR, "add identity failed, var_node:%s, hccl_node:%s.",
src_out_anchor->GetOwnerNode()->GetName().c_str(),
hccl_in_anchor->GetOwnerNode()->GetName().c_str());
return ret;
}

ret = InsertAssignAfterBroadcastIfNeed(graph, src_out_anchor, hccl_in_anchor);
if (ret != SUCCESS) {
GELOGE(INTERNAL_ERROR, "add assign failed, var_node:%s, hccl_node:%s.",
src_out_anchor->GetOwnerNode()->GetName().c_str(),
hccl_in_anchor->GetOwnerNode()->GetName().c_str());
return ret;
}
return SUCCESS;
}

///
/// @brief Insert Identity node Between Hccl node and variable
/// @param [in] ComputeGraphPtr graph
/// @param [in] OutDataAnchorPtr src_out_anchor
/// @param [in] InDataAnchorPtr hccl_in_anchor
/// @return status
///
Status HcclContinuousMemcpyPass::InsertIdentityBeforeHccl(const ComputeGraphPtr &graph, const OutDataAnchorPtr &src_out_anchor,
const InDataAnchorPtr &hccl_in_anchor) {
GELOGI("Between op %s and op %s need insert memcpy async op.", src_out_anchor->GetOwnerNode()->GetName().c_str(),
hccl_in_anchor->GetOwnerNode()->GetName().c_str());
NodePtr memcpy_node = CreateIdentityNode(graph, src_out_anchor);
GE_CHECK_NOTNULL(memcpy_node);

Status ret1 = src_out_anchor->Unlink(hccl_in_anchor);
if (ret1 != SUCCESS) {
GELOGE(INTERNAL_ERROR, "The op %s Unlink anchor %s fail.", src_out_anchor->GetOwnerNode()->GetName().c_str(),
hccl_in_anchor->GetOwnerNode()->GetName().c_str());
return FAILED;
}
auto out_data_anchor_0 = memcpy_node->GetOutDataAnchor(kAnchorNum);
GE_CHECK_NOTNULL(out_data_anchor_0);
ret1 = out_data_anchor_0->LinkTo(hccl_in_anchor);
if (ret1 != SUCCESS) {
GELOGE(INTERNAL_ERROR, "The op %s link anchor %s fail.", memcpy_node->GetName().c_str(),
hccl_in_anchor->GetOwnerNode()->GetName().c_str());
return FAILED;
}

Status ret = src_out_anchor->LinkTo(memcpy_node->GetInDataAnchor(kAnchorNum));
if (ret != SUCCESS) {
GELOGE(INTERNAL_ERROR, "The op %s link anchor %s fail.", src_out_anchor->GetOwnerNode()->GetName().c_str(),
memcpy_node->GetName().c_str());
return FAILED;
}
return SUCCESS;
}

///
/// @brief Insert assign node after broadcast node and variable to refresh variable data
/// @param [in] ComputeGraphPtr graph
/// @param [in] OutDataAnchorPtr var_out_anchor
/// @param [in] InDataAnchorPtr hccl_in_anchor
/// @return status
///
Status HcclContinuousMemcpyPass::InsertAssignAfterBroadcastIfNeed(const ComputeGraphPtr &graph,
const OutDataAnchorPtr &var_out_anchor,
const InDataAnchorPtr &hccl_in_anchor) {
if (hccl_in_anchor->GetOwnerNode()->GetType() != HCOMBROADCAST) {
GELOGD("%s not broadcast, no need to insert assign node", hccl_in_anchor->GetOwnerNode()->GetName().c_str());
return SUCCESS;
}

if (var_out_anchor->GetOwnerNode()->GetType() != VARIABLE) {
GELOGD("%s not variable, no need to insert assign node", var_out_anchor->GetOwnerNode()->GetName().c_str());
return SUCCESS;
}

GELOGI("after op %s and op %s need insert assign op.", var_out_anchor->GetOwnerNode()->GetName().c_str(),
hccl_in_anchor->GetOwnerNode()->GetName().c_str());

for (auto peer_in_anchor : var_out_anchor->GetPeerInDataAnchors()) {
if (peer_in_anchor->GetOwnerNode()->GetType() == ASSIGN) {
GELOGD("variable %s out assign node is exist.", var_out_anchor->GetOwnerNode()->GetName().c_str());
return SUCCESS;
}
}

NodePtr assign_node = CreateAssignNode(graph, var_out_anchor);
GE_CHECK_NOTNULL(assign_node);

OutDataAnchorPtr hccl_out_anchor = hccl_in_anchor->GetOwnerNode()->GetOutDataAnchor(hccl_in_anchor->GetIdx());
GE_CHECK_NOTNULL(hccl_out_anchor);

Status ret = hccl_out_anchor->LinkTo(assign_node->GetInDataAnchor(kAnchorAssignValueIndex));
if (ret != SUCCESS) {
GELOGE(INTERNAL_ERROR, "The op %s link anchor %s fail.", hccl_out_anchor->GetOwnerNode()->GetName().c_str(),
assign_node->GetName().c_str());
return FAILED;
}

ret = var_out_anchor->LinkTo(assign_node->GetInDataAnchor(kAnchorAssignRefIndex));
if (ret != SUCCESS) {
GELOGE(INTERNAL_ERROR, "The op %s link anchor %s fail.", var_out_anchor->GetOwnerNode()->GetName().c_str(),
assign_node->GetName().c_str());
return FAILED;
}

// add control edge between assign node and node after broadcast node
OutControlAnchorPtr assign_out_control_anchor = assign_node->GetOutControlAnchor();
GE_CHECK_NOTNULL(assign_out_control_anchor);

for (auto in_data_anchor : hccl_out_anchor->GetPeerInDataAnchors()) {
if (in_data_anchor->GetOwnerNode()->GetName() == assign_node->GetName()) {
continue;
}
ret = assign_out_control_anchor->LinkTo(in_data_anchor->GetOwnerNode()->GetInControlAnchor());
if (ret != SUCCESS) {
GELOGE(INTERNAL_ERROR, "The op %s link control anchor %s fail.",
assign_out_control_anchor->GetOwnerNode()->GetName().c_str(),
in_data_anchor->GetOwnerNode()->GetName().c_str());
return FAILED;
}
}

for (auto in_control_anchor : hccl_out_anchor->GetOwnerNode()->GetOutControlAnchor()->GetPeerInControlAnchors()) {
if (in_control_anchor->GetOwnerNode()->GetName() == assign_node->GetName()) {
continue;
}
ret = assign_out_control_anchor->LinkTo(in_control_anchor);
if (ret != SUCCESS) {
GELOGE(INTERNAL_ERROR, "The op %s link control anchor %s fail.",
assign_out_control_anchor->GetOwnerNode()->GetName().c_str(),
in_control_anchor->GetOwnerNode()->GetName().c_str());
return FAILED;
}
}
return SUCCESS;
}

///
/// @brief create assign Node, add to graph
/// @param [in] ge::ComputeGraphPtr graph
/// @param [in] ge::OutDataAnchorPtr variable node out anchor
/// @return ge::NodePtr
///
NodePtr HcclContinuousMemcpyPass::CreateAssignNode(const ComputeGraphPtr &graph, const OutDataAnchorPtr &out_data_anchor) {
GE_CHECK_NOTNULL_EXEC(graph , return nullptr);
NodePtr pre_node = out_data_anchor->GetOwnerNode();
OpDescPtr pre_op_desc = pre_node->GetOpDesc();
if (pre_op_desc == nullptr) {
GELOGE(INTERNAL_ERROR, "OpDesc of pre node is invalid.");
return nullptr;
}

std::string node_name = pre_node->GetName() + "_" + ASSIGN;
node_name = CheckDuplicateName(node_name);
OpDescPtr op_desc = MakeShared<OpDesc>(node_name.c_str(), ASSIGN);
if (op_desc == nullptr) {
GELOGE(INTERNAL_ERROR, "Create Assign op: MakeShared op_desc fail.");
return nullptr;
}
GELOGI("Create Assign op:%s.", op_desc->GetName().c_str());

graphStatus ret = op_desc->AddInputDesc("ref", pre_op_desc->GetOutputDesc(out_data_anchor->GetIdx()));
if (ret != GRAPH_SUCCESS) {
GELOGE(INTERNAL_ERROR, "Create Assign op: add ref input desc fail.");
return nullptr;
}

ret = op_desc->AddInputDesc("value", pre_op_desc->GetOutputDesc(out_data_anchor->GetIdx()));
if (ret != GRAPH_SUCCESS) {
GELOGE(INTERNAL_ERROR, "Create Assign op: add value input desc fail.");
return nullptr;
}

ret = op_desc->AddOutputDesc("ref", pre_op_desc->GetOutputDesc(out_data_anchor->GetIdx()));
if (ret != GRAPH_SUCCESS) {
GELOGE(INTERNAL_ERROR, "Create Assign op: add output desc fail.");
return nullptr;
}

NodePtr assign_node = graph->AddNode(op_desc);
if (assign_node == nullptr) {
GELOGE(INTERNAL_ERROR, "Insert Identity node fail.");
return nullptr;
}

return assign_node;
}


///
/// @brief Clear Status, used for subgraph pass
/// @return SUCCESS
///
Status HcclContinuousMemcpyPass::ClearStatus() {
node_num_map_.clear();
return SUCCESS;
}
} // namespace ge

+ 59
- 0
ge/graph/passes/hccl_continuous_memcpy_pass.h View File

@@ -0,0 +1,59 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef GE_GRAPH_PASSES_HCCL_CONTINUOUS_MEMCPY_PASS_H_
#define GE_GRAPH_PASSES_HCCL_CONTINUOUS_MEMCPY_PASS_H_

#include <string>
#include <unordered_map>

#include "graph/graph.h"
#include "inc/graph_pass.h"

namespace ge {
class HcclContinuousMemcpyPass : public GraphPass {
public:
Status Run(ge::ComputeGraphPtr graph);
Status ClearStatus() override;

private:
NodePtr CreateIdentityNode(const ComputeGraphPtr &graph, const OutDataAnchorPtr &out_data_anchor);

NodePtr CreateAssignNode(const ComputeGraphPtr &graph, const OutDataAnchorPtr &out_data_anchor);

std::string CheckDuplicateName(const std::string &node_name);

Status ModifyEdgeConnection(const ComputeGraphPtr &graph, const OutDataAnchorPtr &src_out_anchor,
const InDataAnchorPtr &hccl_in_anchor);

Status InsertIdentityBeforeHccl(const ComputeGraphPtr &graph, const OutDataAnchorPtr &src_out_anchor,
const InDataAnchorPtr &hccl_in_anchor);

Status InsertAssignAfterBroadcastIfNeed(const ComputeGraphPtr &graph,
const OutDataAnchorPtr &src_out_anchor,
const InDataAnchorPtr &hccl_in_anchor);

Status ContinuousInputProcess(const ComputeGraphPtr &graph, const NodePtr node);

Status P2pmemInputProcess(const ComputeGraphPtr &graph, const NodePtr node);

bool IsDataNode(const std::string& node_type);

std::unordered_map<std::string, uint32_t> node_num_map_;
};
} // namespace ge

#endif // GE_GRAPH_PASSES_HCCL_MEMCPY_PASS_H_

+ 4
- 91
ge/graph/passes/hccl_memcpy_pass.cc View File

@@ -34,7 +34,7 @@ const char *const kInputMutable = "_input_mutable";
} // namespace
namespace ge {
Status HcclMemcpyPass::Run(ge::ComputeGraphPtr graph) {
GE_IF_BOOL_EXEC(graph == nullptr, GELOGE(PARAM_INVALID, "param [graph] must not be null."); return PARAM_INVALID);
GE_CHECK_NOTNULL(graph);
for (const auto &node : graph->GetDirectNode()) {
auto op_desc = node->GetOpDesc();
if (op_desc == nullptr) {
@@ -42,24 +42,11 @@ Status HcclMemcpyPass::Run(ge::ComputeGraphPtr graph) {
return INTERNAL_ERROR;
}

Status ret = ContinuousInputProcess(graph, node);
if (ret != SUCCESS) {
GELOGE(INTERNAL_ERROR, "failed ProcessBroadcastMemcpy, node_name:%s.", node->GetName().c_str());
return ret;
}

ret = MutableInputProcess(graph, node);
Status ret = MutableInputProcess(graph, node);
if (ret != SUCCESS) {
GELOGE(INTERNAL_ERROR, "failed MutableInputProcess, node_name:%s.", node->GetName().c_str());
return ret;
}

ret = P2pmemInputProcess(graph, node);
if (ret != SUCCESS) {
GELOGE(INTERNAL_ERROR, "failed P2pmemInputProcess, node_name:%s.", node->GetName().c_str());
return ret;
}

}
return SUCCESS;
}
@@ -114,80 +101,6 @@ Status HcclMemcpyPass::MutableInputProcess(const ComputeGraphPtr &graph, const N
return SUCCESS;
}

// If broadcast input size is bigger than 1, and input from variable,
// cause by broadcast input memory should be continuous,
// another featuremap mem will be allocated for broadcast input.
// In this condition, move data from variable mem to broadcast input featuremap mem will be executed each step.
// In order to avoid move action out of model, use memcpy node instead of move action code.
Status HcclMemcpyPass::ContinuousInputProcess(const ComputeGraphPtr &graph, const NodePtr node) {
auto op_desc = node->GetOpDesc();

bool is_input_continuous = false;
(void)ge::AttrUtils::GetBool(op_desc, ATTR_NAME_CONTINUOUS_INPUT, is_input_continuous);

if (is_input_continuous && op_desc->GetInputsSize() > 1) {
GELOGI("continuous input op is:%s.", op_desc->GetName().c_str());
// if input size bigger than one, insert memcpy between var data for support continous mem alloc
for (auto &hccl_in_anchor : node->GetAllInDataAnchors()) {
if (hccl_in_anchor == nullptr) {
continue;
}
auto src_out_anchor = hccl_in_anchor->GetPeerOutAnchor();
if (src_out_anchor == nullptr) {
GELOGE(INTERNAL_ERROR, "hcom op input has no peer anchor, node_name:%s", node->GetName().c_str());
return INTERNAL_ERROR;
}

if (IsDataNode(src_out_anchor->GetOwnerNode()->GetType())) {
Status ret = ModifyEdgeConnection(graph, src_out_anchor, hccl_in_anchor);
if (ret != SUCCESS) {
GELOGE(INTERNAL_ERROR, "Failed to modify the connection.");
return ret;
}
}
}
}
return SUCCESS;
}

// if input is var type, and node input need p2p mem, then memcpy should be insert between the two
Status HcclMemcpyPass::P2pmemInputProcess(const ComputeGraphPtr &graph, const NodePtr node) {
auto op_desc = node->GetOpDesc();

vector<int64_t> input_memory_types;
(void) ge::AttrUtils::GetListInt(op_desc, ATTR_NAME_INPUT_MEM_TYPE_LIST, input_memory_types);

if (input_memory_types.empty()) {
return SUCCESS;
}

for (uint32_t index = 0; index < input_memory_types.size() && index < op_desc->GetInputsSize(); index++) {
if (input_memory_types[index] != RT_MEMORY_P2P_DDR) {
continue;
}

GELOGD("p2p input op is:%s.", op_desc->GetName().c_str());
auto hccl_in_anchor = node->GetInDataAnchor(index);
if (hccl_in_anchor == nullptr) {
continue;
}
auto src_out_anchor = hccl_in_anchor->GetPeerOutAnchor();
if (src_out_anchor == nullptr) {
GELOGE(INTERNAL_ERROR, "hcom op input has no peer anchor, node_name:%s", node->GetName().c_str());
return INTERNAL_ERROR;
}

if (IsDataNode(src_out_anchor->GetOwnerNode()->GetType())) {
Status ret = ModifyEdgeConnection(graph, src_out_anchor, hccl_in_anchor);
if (ret != SUCCESS) {
GELOGE(INTERNAL_ERROR, "Failed to modify the connection.");
return ret;
}
}
}
return SUCCESS;
}

bool HcclMemcpyPass::IsDataNode(const std::string& node_type) {
return (node_type == CONSTANTOP) || (node_type == VARIABLE) || (node_type == DATA) || (node_type == CONSTANT);
}
@@ -199,7 +112,7 @@ bool HcclMemcpyPass::IsDataNode(const std::string& node_type) {
/// @return ge::NodePtr
///
NodePtr HcclMemcpyPass::CreateIdentityNode(const ComputeGraphPtr &graph, const OutDataAnchorPtr &out_data_anchor) {
GE_IF_BOOL_EXEC(graph == nullptr, return nullptr);
GE_CHECK_NOTNULL_EXEC(graph, return nullptr);
NodePtr pre_node = out_data_anchor->GetOwnerNode();
OpDescPtr pre_op_desc = pre_node->GetOpDesc();
if (pre_op_desc == nullptr) {
@@ -413,7 +326,7 @@ Status HcclMemcpyPass::InsertAssignAfterBroadcastIfNeed(const ComputeGraphPtr &g
/// @return ge::NodePtr
///
NodePtr HcclMemcpyPass::CreateAssignNode(const ComputeGraphPtr &graph, const OutDataAnchorPtr &out_data_anchor) {
GE_IF_BOOL_EXEC(graph == nullptr, return nullptr);
GE_CHECK_NOTNULL_EXEC(graph, return nullptr);
NodePtr pre_node = out_data_anchor->GetOwnerNode();
OpDescPtr pre_op_desc = pre_node->GetOpDesc();
if (pre_op_desc == nullptr) {


+ 0
- 4
ge/graph/passes/hccl_memcpy_pass.h View File

@@ -46,12 +46,8 @@ class HcclMemcpyPass : public GraphPass {
const OutDataAnchorPtr &src_out_anchor,
const InDataAnchorPtr &hccl_in_anchor);

Status ContinuousInputProcess(const ComputeGraphPtr &graph, const NodePtr node);

Status MutableInputProcess(const ComputeGraphPtr &graph, const NodePtr node);

Status P2pmemInputProcess(const ComputeGraphPtr &graph, const NodePtr node);

bool IsDataNode(const std::string& node_type);

std::unordered_map<std::string, uint32_t> node_num_map_;


+ 3
- 0
ge/graph/preprocess/graph_preprocess.cc View File

@@ -48,6 +48,7 @@
#include "graph/passes/enter_pass.h"
#include "graph/passes/for_pass.h"
#include "graph/passes/guarantee_const_pass.h"
#include "graph/passes/hccl_memcpy_pass.h"
#include "graph/passes/hccl_group_pass.h"
#include "graph/passes/identity_pass.h"
#include "graph/passes/infershape_pass.h"
@@ -1894,6 +1895,8 @@ Status GraphPrepare::PrepareOptimize() {
PassManager graph_pass;
try {
(void)graph_pass.AddPass("PrepareOptimize::PrunePass", new PrunePass);
// can't move to optimize1/2 directly, may cause more identity insert, cause CI fail
(void)graph_pass.AddPass("PrepareOptimize::HcclMemcpyPass", new HcclMemcpyPass);
} catch (std::bad_alloc &e) {
GELOGE(INTERNAL_ERROR, "Add pass failed, bad memory allocation occurs.");
return INTERNAL_ERROR;


+ 26
- 0
ge/hybrid/node_executor/aicpu/aicpu_ext_info.cc View File

@@ -64,6 +64,9 @@ Status AicpuExtInfoHandler::Parse(const std::string &ext_info) {
case aicpu::FWKAdapter::FWK_ADPT_EXT_SESSION_INFO:
GE_CHK_STATUS_RET(ParseExtSessionInfo(aicpu_ext_info), "Parse ext session info failed.");
break;
case aicpu::FWKAdapter::FWK_ADPT_EXT_BITMAP:
GE_CHK_STATUS_RET(ParseExtBitMap(aicpu_ext_info), "Parse ext bit map failed.");
break;
default:
GELOGD("Node[%s] ignore infoType=%d, infoLen=%u.",
node_name_.c_str(), aicpu_ext_info->infoType, aicpu_ext_info->infoLen);
@@ -140,6 +143,29 @@ Status AicpuExtInfoHandler::ParseExtSessionInfo(AicpuExtInfo *aicpu_ext_info) {
return SUCCESS;
}

Status AicpuExtInfoHandler::ParseExtBitMap(AicpuExtInfo *aicpu_ext_info) {
GE_CHK_BOOL_RET_STATUS(aicpu_ext_info->infoLen == sizeof(uint64_t), PARAM_INVALID,
"Node[%s] parse bit_map info failed as infoLen must be %zu but %u.",
node_name_.c_str(), sizeof(uint64_t), aicpu_ext_info->infoLen);

bit_map_ = reinterpret_cast<uint64_t *>(aicpu_ext_info->infoMsg);
GELOGI("Node[%s] bit_map info success infoLen=%u.", node_name_.c_str(), aicpu_ext_info->infoLen);
return SUCCESS;
}

Status AicpuExtInfoHandler::UpdateExecuteMode(bool flag) {
if (bit_map_ == nullptr) {
GELOGD("There is no bit_map in ext_info, no need update.");
return SUCCESS;
}
if (flag) {
*(bit_map_) |= 1;
} else {
*(bit_map_) &= ~1;
}
return SUCCESS;
}

Status AicpuExtInfoHandler::UpdateSessionInfo(uint64_t session_id, uint64_t kernel_id, bool sess_flag) {
if (session_info_ == nullptr) {
GELOGD("There is no session info in ext_info, no need update.");


+ 4
- 0
ge/hybrid/node_executor/aicpu/aicpu_ext_info.h View File

@@ -57,6 +57,8 @@ class AicpuExtInfoHandler {

Status UpdateSessionInfoSessionId(uint64_t session_id);

Status UpdateExecuteMode(bool flag);

Status GetOutputShapeAndType(uint32_t output_index, GeShape &shape, DataType &data_type);

private:
@@ -65,6 +67,7 @@ class AicpuExtInfoHandler {
Status ParseExtInputShape(AicpuExtInfo *aicpu_ext_info);
Status ParseExtOutputShape(AicpuExtInfo *aicpu_ext_info);
Status ParseExtSessionInfo(AicpuExtInfo *aicpu_ext_info);
Status ParseExtBitMap(AicpuExtInfo *aicpu_ext_info);

static Status UpdateShapeAndType(const GeShape &shape,
DataType data_type,
@@ -80,6 +83,7 @@ class AicpuExtInfoHandler {
const uint32_t output_num_;
UnknowShapeOpType unknown_type_;
AicpuSessionInfo *session_info_ = nullptr;
uint64_t *bit_map_ = nullptr;

std::unique_ptr<uint8_t[]> ext_info_;
size_t ext_info_len_ = 0;


+ 1
- 0
ge/hybrid/node_executor/aicpu/aicpu_node_executor.cc View File

@@ -136,6 +136,7 @@ Status AicpuNodeTaskBase::UpdateExtInfo() {
return SUCCESS;
}

GE_CHK_STATUS_RET(aicpu_ext_handle_.UpdateExecuteMode(false), "UpdateExecuteMode failed.");
for (auto i = 0; i < node_item_->num_inputs; ++i) {
auto input_desc = node_item_->MutableInputDesc(i);
GE_CHECK_NOTNULL(input_desc);


+ 4
- 2
ge/single_op/task/op_task.cc View File

@@ -373,6 +373,7 @@ Status AiCpuBaseTask::SetExtInfoAndType(const std::string &kernel_ext_info, uint

GE_CHK_STATUS_RET(aicpu_ext_handle_->UpdateSessionInfo(ULLONG_MAX, kernel_id, false),
"UpdateSessionInfo failed.");
GE_CHK_STATUS_RET(aicpu_ext_handle_->UpdateExecuteMode(true), "UpdateExecuteMode failed.");

GE_CHK_RT_RET(rtMalloc(&ext_info_addr_dev_, aicpu_ext_handle_->GetExtInfoLen(), RT_MEMORY_HBM));
GE_CHK_RT_RET(rtMemcpy(ext_info_addr_dev_, aicpu_ext_handle_->GetExtInfoLen(),
@@ -404,13 +405,14 @@ Status AiCpuBaseTask::UpdateExtInfo(const std::vector<GeTensorDesc> &input_desc,
std::vector<GeTensorDesc> &output_desc,
rtStream_t stream) {
GELOGI("Update ext info begin, unknown_type=%d.", unknown_type_);
GE_CHECK_NOTNULL(aicpu_ext_handle_);
GE_CHK_STATUS_RET(aicpu_ext_handle_->UpdateExecuteMode(false), "UpdateExecuteMode failed.");

if (num_inputs_ == 0 && num_outputs_ == 0) {
GELOGI("No input and output, no need update ext info.");
return SUCCESS;
}

GE_CHECK_NOTNULL(aicpu_ext_handle_);

size_t non_const_index = 0;
for (size_t input_index = 0; input_index < num_inputs_; input_index++) {
if (input_index < input_is_const_.size() && input_is_const_[input_index]) {


+ 1
- 0
tests/ut/ge/CMakeLists.txt View File

@@ -243,6 +243,7 @@ set(COMMON_SRC_FILES
"${GE_CODE_DIR}/ge/graph/passes/cast_remove_pass.cc"
"${GE_CODE_DIR}/ge/graph/passes/transpose_transdata_pass.cc"
"${GE_CODE_DIR}/ge/graph/passes/hccl_memcpy_pass.cc"
"${GE_CODE_DIR}/ge/graph/passes/hccl_continuous_memcpy_pass.cc"
"${GE_CODE_DIR}/ge/graph/passes/flow_ctrl_pass.cc"
"${GE_CODE_DIR}/ge/graph/passes/global_step_insert_pass.cc"
"${GE_CODE_DIR}/ge/graph/passes/link_gen_mask_nodes_pass.cc"


+ 8
- 0
tests/ut/ge/graph/load/kernel_ex_task_info_unittest.cc View File

@@ -137,4 +137,12 @@ TEST_F(UtestKernelExTaskInfo, kernel_ex_task_info_calculate_args) {
EXPECT_EQ(kernel_ex_task_info.CalculateArgs(task_def, &model), FAILED);
}

TEST_F(UtestKernelExTaskInfo, kernel_ex_task_ext_info) {
const string ext_info = {1, 1, 1, 1, 0, 0, 0, 0};
const OpDescPtr op_desc = CreateOpDesc("FrameworkOp", "FrameworkOp");

KernelExTaskInfo kernel_ex_task_info;
EXPECT_EQ(kernel_ex_task_info.InitTaskExtInfo(ext_info, op_desc), SUCCESS);
}

} // namespace ge

+ 3
- 0
tests/ut/ge/graph/load/kernel_task_info_unittest.cc View File

@@ -413,6 +413,9 @@ TEST_F(UtestKernelTaskInfo, init_kernel_taskInfo_with_aicpu_kernel_type_fail) {
// rtMemcpy -> RT_ERROR_INVALID_VALUE
EXPECT_EQ(kernel_task_info.Init(task_def, &model), SUCCESS);

const string ext_info = {1, 1, 1, 1, 0, 0, 0, 0};
EXPECT_EQ(kernel_task_info.InitAicpuTaskExtInfo(ext_info), SUCCESS);

EXPECT_EQ(kernel_task_info.Distribute(), SUCCESS);
EXPECT_EQ(kernel_task_info.Release(), SUCCESS);



+ 15
- 15
third_party/fwkacllib/inc/toolchain/slog.h View File

@@ -120,15 +120,15 @@ typedef struct tagKV {
} KeyValue;

typedef enum {
APPLICATION = 0,
SYSTEM
APPLICATION = 0,
SYSTEM
} ProcessType;

typedef struct {
ProcessType type;
unsigned int pid;
unsigned int deviceId;
char reserved[RESERVERD_LENGTH];
ProcessType type;
unsigned int pid;
unsigned int deviceId;
char reserved[RESERVERD_LENGTH];
} LogAttr;

/**
@@ -381,13 +381,13 @@ DLL_EXPORT void DlogFlush(void);
* @ingroup slog
* @brief Internal log interface, other modules are not allowed to call this interface
*/
void DlogErrorInner(int moduleId, const char *fmt, ...);
void DlogWarnInner(int moduleId, const char *fmt, ...);
void DlogInfoInner(int moduleId, const char *fmt, ...);
void DlogDebugInner(int moduleId, const char *fmt, ...);
void DlogEventInner(int moduleId, const char *fmt, ...);
void DlogInner(int moduleId, int level, const char *fmt, ...);
void DlogWithKVInner(int moduleId, int level, KeyValue *pstKVArray, int kvNum, const char *fmt, ...);
void DlogErrorInner(int moduleId, const char *fmt, ...) __attribute__((format(printf, 2, 3)));
void DlogWarnInner(int moduleId, const char *fmt, ...) __attribute__((format(printf, 2, 3)));
void DlogInfoInner(int moduleId, const char *fmt, ...) __attribute__((format(printf, 2, 3)));
void DlogDebugInner(int moduleId, const char *fmt, ...) __attribute__((format(printf, 2, 3)));
void DlogEventInner(int moduleId, const char *fmt, ...) __attribute__((format(printf, 2, 3)));
void DlogInner(int moduleId, int level, const char *fmt, ...) __attribute__((format(printf, 3, 4)));
void DlogWithKVInner(int moduleId, int level, KeyValue *pstKVArray, int kvNum, const char *fmt, ...) __attribute__((format(printf, 5, 6)));

#ifdef __cplusplus
#ifndef LOG_CPP
@@ -500,8 +500,8 @@ DLL_EXPORT void DlogFlushForC(void);
* @ingroup slog
* @brief Internal log interface, other modules are not allowed to call this interface
*/
void DlogInnerForC(int moduleId, int level, const char *fmt, ...);
void DlogWithKVInnerForC(int moduleId, int level, KeyValue *pstKVArray, int kvNum, const char *fmt, ...);
void DlogInnerForC(int moduleId, int level, const char *fmt, ...) __attribute__((format(printf, 3, 4)));
void DlogWithKVInnerForC(int moduleId, int level, KeyValue *pstKVArray, int kvNum, const char *fmt, ...) __attribute__((format(printf, 5, 6)));

#ifdef __cplusplus
}


Loading…
Cancel
Save