From 83e15327f6aab0b8b0e3d3d7278b22f613f535a5 Mon Sep 17 00:00:00 2001 From: lianghao Date: Mon, 25 Jan 2021 15:22:28 +0800 Subject: [PATCH 01/13] memory assign --- ge/graph/build/memory/graph_mem_assigner.cc | 21 ++++++++++++++++++++ ge/graph/build/memory/graph_mem_assigner.h | 4 ++++ ge/graph/build/memory/memory_assigner.cc | 5 +++++ ge/graph/build/memory/var_mem_assign_util.cc | 5 +---- 4 files changed, 31 insertions(+), 4 deletions(-) diff --git a/ge/graph/build/memory/graph_mem_assigner.cc b/ge/graph/build/memory/graph_mem_assigner.cc index a868fdcd..8c5d8940 100755 --- a/ge/graph/build/memory/graph_mem_assigner.cc +++ b/ge/graph/build/memory/graph_mem_assigner.cc @@ -88,6 +88,14 @@ Status VariableMemoryAssigner::AssignVarAttr2Nodes() { return ge::SUCCESS; } +Status VariableMemoryAssigner::AssignMemory2HasRefAttrNode() { + Status result = ge::VarMemAssignUtil::AssignMemory2HasRefAttrNode(compute_graph_); + if (result != ge::SUCCESS) { + return result; + } + return ge::SUCCESS; +} + Status GraphMemoryAssigner::AssignMemory() { ge::HybridMemAssignerPtr mem_assigner(new(std::nothrow) HybridMemAssigner(compute_graph_)); if (mem_assigner->Assign() != ge::SUCCESS) { @@ -135,6 +143,19 @@ ge::Status GraphMemoryAssigner::AssignVarAttr2Nodes() { return ge::SUCCESS; } +ge::Status GraphMemoryAssigner::AssignMemory2HasRefAttrNode() { + auto variable_assigner = + std::unique_ptr(new(std::nothrow) ge::VariableMemoryAssigner(compute_graph_)); + if (variable_assigner == nullptr) { + GELOGE(ge::FAILED, "Alloc VariableMemoryAssigner failed."); + return ge::FAILED; + } + if (variable_assigner->AssignMemory2HasRefAttrNode() != ge::SUCCESS) { + return ge::FAILED; + } + return ge::SUCCESS; +} + ge::Status CalculateTensorRealSizeAndOutSize(const ge::ConstGeTensorDescPtr &output_desc, int64_t dim_index, int64_t &output_mem_size, int64_t &batch_dim_num, int64_t &out_size) { diff --git a/ge/graph/build/memory/graph_mem_assigner.h b/ge/graph/build/memory/graph_mem_assigner.h index a380e594..be6c47b0 100755 --- a/ge/graph/build/memory/graph_mem_assigner.h +++ b/ge/graph/build/memory/graph_mem_assigner.h @@ -63,6 +63,8 @@ class VariableMemoryAssigner { /// ge::Status AssignVarAttr2Nodes(); + ge::Status AssignMemory2HasRefAttrNode(); + private: ge::ComputeGraphPtr compute_graph_; }; @@ -99,6 +101,8 @@ class GraphMemoryAssigner { /// ge::Status AssignVarAttr2Nodes(); + ge::Status AssignMemory2HasRefAttrNode(); + ge::Status ReAssignMemory(bool is_loop_graph, map &mem_type_to_offset); ge::Status AssignZeroCopyMemory(map &mem_offset, size_t &zero_mem_copy_size); diff --git a/ge/graph/build/memory/memory_assigner.cc b/ge/graph/build/memory/memory_assigner.cc index 055103a9..0f58a040 100755 --- a/ge/graph/build/memory/memory_assigner.cc +++ b/ge/graph/build/memory/memory_assigner.cc @@ -40,6 +40,11 @@ Status MemoryAssigner::AssignMemory(bool is_loop_graph, map &me return ge::FAILED; } + if (graph_mem_assigner.AssignMemory2HasRefAttrNode() != ge::SUCCESS) { + GELOGE(ge::FAILED, "Assign memory to node which has ref attr failed!"); + return ge::FAILED; + } + // Assign memory for reference if (graph_mem_assigner.AssignReferenceMemory() != ge::SUCCESS) { GELOGE(ge::FAILED, "Assign reference memory failed!"); diff --git a/ge/graph/build/memory/var_mem_assign_util.cc b/ge/graph/build/memory/var_mem_assign_util.cc index dfc633af..f910d2e2 100755 --- a/ge/graph/build/memory/var_mem_assign_util.cc +++ b/ge/graph/build/memory/var_mem_assign_util.cc @@ -33,10 +33,7 @@ using std::vector; namespace ge { Status VarMemAssignUtil::AssignVarMemory(ge::ComputeGraphPtr &compute_graph) { - GE_CHK_STATUS_RET(AssignMemory2VariableNode(compute_graph)); - GE_CHK_STATUS_RET(AssignMemory2HasRefAttrNode(compute_graph)); - - return SUCCESS; + return AssignMemory2VariableNode(compute_graph); } Status VarMemAssignUtil::AssignConstantOpMemory(ge::ComputeGraphPtr &compute_graph) { From 703ac4ca9cbe6bb83ffe36720b09504a0194d4b3 Mon Sep 17 00:00:00 2001 From: zhangxiaokun Date: Mon, 25 Jan 2021 16:36:37 +0800 Subject: [PATCH 02/13] Add __attribute__((format(printf, 2, 3))) --- third_party/fwkacllib/inc/toolchain/slog.h | 30 +++++++++++----------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/third_party/fwkacllib/inc/toolchain/slog.h b/third_party/fwkacllib/inc/toolchain/slog.h index 7c4f7be2..ba286d02 100644 --- a/third_party/fwkacllib/inc/toolchain/slog.h +++ b/third_party/fwkacllib/inc/toolchain/slog.h @@ -120,15 +120,15 @@ typedef struct tagKV { } KeyValue; typedef enum { - APPLICATION = 0, - SYSTEM + APPLICATION = 0, + SYSTEM } ProcessType; typedef struct { - ProcessType type; - unsigned int pid; - unsigned int deviceId; - char reserved[RESERVERD_LENGTH]; + ProcessType type; + unsigned int pid; + unsigned int deviceId; + char reserved[RESERVERD_LENGTH]; } LogAttr; /** @@ -381,13 +381,13 @@ DLL_EXPORT void DlogFlush(void); * @ingroup slog * @brief Internal log interface, other modules are not allowed to call this interface */ -void DlogErrorInner(int moduleId, const char *fmt, ...); -void DlogWarnInner(int moduleId, const char *fmt, ...); -void DlogInfoInner(int moduleId, const char *fmt, ...); -void DlogDebugInner(int moduleId, const char *fmt, ...); -void DlogEventInner(int moduleId, const char *fmt, ...); -void DlogInner(int moduleId, int level, const char *fmt, ...); -void DlogWithKVInner(int moduleId, int level, KeyValue *pstKVArray, int kvNum, const char *fmt, ...); +void DlogErrorInner(int moduleId, const char *fmt, ...) __attribute__((format(printf, 2, 3))); +void DlogWarnInner(int moduleId, const char *fmt, ...) __attribute__((format(printf, 2, 3))); +void DlogInfoInner(int moduleId, const char *fmt, ...) __attribute__((format(printf, 2, 3))); +void DlogDebugInner(int moduleId, const char *fmt, ...) __attribute__((format(printf, 2, 3))); +void DlogEventInner(int moduleId, const char *fmt, ...) __attribute__((format(printf, 2, 3))); +void DlogInner(int moduleId, int level, const char *fmt, ...) __attribute__((format(printf, 3, 4))); +void DlogWithKVInner(int moduleId, int level, KeyValue *pstKVArray, int kvNum, const char *fmt, ...) __attribute__((format(printf, 5, 6))); #ifdef __cplusplus #ifndef LOG_CPP @@ -500,8 +500,8 @@ DLL_EXPORT void DlogFlushForC(void); * @ingroup slog * @brief Internal log interface, other modules are not allowed to call this interface */ -void DlogInnerForC(int moduleId, int level, const char *fmt, ...); -void DlogWithKVInnerForC(int moduleId, int level, KeyValue *pstKVArray, int kvNum, const char *fmt, ...); +void DlogInnerForC(int moduleId, int level, const char *fmt, ...) __attribute__((format(printf, 3, 4))); +void DlogWithKVInnerForC(int moduleId, int level, KeyValue *pstKVArray, int kvNum, const char *fmt, ...) __attribute__((format(printf, 5, 6))); #ifdef __cplusplus } From 975dddc0d58156149a9f528542437f2b1ae75e1b Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 25 Jan 2021 21:45:12 +0800 Subject: [PATCH 03/13] For aicpu all_shape compile. --- .../task_info/kernel_ex_task_info.cc | 46 +++++++++++++---- .../task_info/kernel_ex_task_info.h | 1 + .../task_info/kernel_task_info.cc | 51 ++++++++----------- .../node_executor/aicpu/aicpu_ext_info.cc | 26 ++++++++++ .../node_executor/aicpu/aicpu_ext_info.h | 4 ++ .../aicpu/aicpu_node_executor.cc | 2 + ge/single_op/task/op_task.cc | 6 ++- .../load/kernel_ex_task_info_unittest.cc | 8 +++ .../graph/load/kernel_task_info_unittest.cc | 6 +++ 9 files changed, 109 insertions(+), 41 deletions(-) diff --git a/ge/graph/load/model_manager/task_info/kernel_ex_task_info.cc b/ge/graph/load/model_manager/task_info/kernel_ex_task_info.cc index 6da1bf63..be91dd15 100644 --- a/ge/graph/load/model_manager/task_info/kernel_ex_task_info.cc +++ b/ge/graph/load/model_manager/task_info/kernel_ex_task_info.cc @@ -26,8 +26,42 @@ #include "graph/attr_value.h" #include "graph/load/model_manager/davinci_model.h" #include "graph/load/model_manager/model_manager.h" +#include "hybrid/node_executor/aicpu/aicpu_ext_info.h" +#include "framework/common/debug/log.h" namespace ge { +Status KernelExTaskInfo::InitTaskExtInfo(const std::string &ext_info, const OpDescPtr &op_desc) { + if (ext_info.empty()) { + return SUCCESS; + } + int32_t unknown_shape_type_val = 0; + (void) AttrUtils::GetInt(op_desc, ::ge::ATTR_NAME_UNKNOWN_SHAPE_TYPE, unknown_shape_type_val); + UnknowShapeOpType unknown_type = static_cast(unknown_shape_type_val); + uint32_t num_inputs = op_desc->GetInputsSize(); + uint32_t num_outputs = op_desc->GetOutputsSize(); + std::unique_ptr ext_handle( + new(std::nothrow) ::ge::hybrid::AicpuExtInfoHandler(op_desc->GetName(), + num_inputs, + num_outputs, + unknown_type)); + GE_CHK_BOOL_RET_STATUS(ext_handle != nullptr, FAILED, "Malloc aicpu_ext_handle mem failed!"); + GE_CHK_STATUS_RET(ext_handle->Parse(ext_info) + "Parse kernel ext info failed, kernel_ext_info_size=%zu.", ext_info.size()); + GE_CHK_STATUS_RET(ext_handle->UpdateExecuteMode(true), "UpdateExecuteMode failed."); + GELOGD("Update aicpu_task ext_info bit_map execute mode to 1."); + + auto rt_ret = rtMalloc(&ext_info_addr_, ext_handle->GetExtInfoLen(), RT_MEMORY_HBM); + GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE, + GELOGE(RT_FAILED, "rtMalloc ext_info error: 0x%X, size=%zu", rt_ret, ext_info.size()); + return RT_ERROR_TO_GE_STATUS(rt_ret);) + rt_ret = rtMemcpy(ext_info_addr_, ext_handle_->GetExtInfoLen(), ext_handle_->GetExtInfo(), + ext_handle->GetExtInfoLen(), RT_MEMCPY_HOST_TO_DEVICE); + GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE, + GELOGE(RT_FAILED, "rtMemcpy ext_info error: 0x%X, size=%zu", rt_ret, ext_info.size()); + return RT_ERROR_TO_GE_STATUS(rt_ret);) + return SUCCESS; +} + Status KernelExTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davinci_model) { GELOGI("KernelExTaskInfo Init Start."); GE_CHECK_NOTNULL(davinci_model); @@ -63,16 +97,8 @@ Status KernelExTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davin } const auto &ext_info = kernel_ex_def.kernel_ext_info(); - if (!ext_info.empty()) { - auto rt_ret = rtMalloc(&ext_info_addr_, ext_info.size(), RT_MEMORY_HBM); - GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE, - GELOGE(RT_FAILED, "rtMalloc ext_info error: 0x%X, size=%zu", rt_ret, ext_info.size()); - return RT_ERROR_TO_GE_STATUS(rt_ret);) - rt_ret = rtMemcpy(ext_info_addr_, ext_info.size(), ext_info.c_str(), ext_info.size(), RT_MEMCPY_HOST_TO_DEVICE); - GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE, - GELOGE(RT_FAILED, "rtMemcpy ext_info error: 0x%X, size=%zu", rt_ret, ext_info.size()); - return RT_ERROR_TO_GE_STATUS(rt_ret);) - } + GE_CHK_STATUS_RET(InitTaskExtInfo(ext_info, op_desc), + "Init aicpu tf_task ext info failed, ext_info size=%zu", ext_info.size()); GELOGI("Node[%s] type[%s] kernel_ext_info size=%zu, ext_info_addr_=%p", op_desc->GetName().c_str(), op_desc->GetType().c_str(), ext_info.size(), ext_info_addr_); diff --git a/ge/graph/load/model_manager/task_info/kernel_ex_task_info.h b/ge/graph/load/model_manager/task_info/kernel_ex_task_info.h index 265316ce..71153c31 100644 --- a/ge/graph/load/model_manager/task_info/kernel_ex_task_info.h +++ b/ge/graph/load/model_manager/task_info/kernel_ex_task_info.h @@ -62,6 +62,7 @@ class KernelExTaskInfo : public TaskInfo { void SetIoAddrs(const OpDescPtr &op_desc); void InitDumpTask(void *addr, const OpDescPtr &op_desc); + Status InitTaskExtInfo(const std::string &ext_info, const OpDescPtr &op_desc); uint32_t task_id_; uint32_t stream_id_; diff --git a/ge/graph/load/model_manager/task_info/kernel_task_info.cc b/ge/graph/load/model_manager/task_info/kernel_task_info.cc index 27fe8eb0..9582b9ab 100755 --- a/ge/graph/load/model_manager/task_info/kernel_task_info.cc +++ b/ge/graph/load/model_manager/task_info/kernel_task_info.cc @@ -964,39 +964,32 @@ Status KernelTaskInfo::InitAicpuTaskExtInfo(const std::string &ext_info) { return SUCCESS; } - std::unique_ptr copy_ext_info; - copy_ext_info.reset(new(std::nothrow)uint8_t[ext_info.size()]); - GE_CHECK_NOTNULL(copy_ext_info); - auto sec_ret = memcpy_s(copy_ext_info.get(), ext_info.size(), ext_info.c_str(), ext_info.size()); - if (sec_ret != EOK) { - GELOGE(FAILED, "memcpy failed, ret: %d", sec_ret); - return FAILED; - } - - auto ext_info_data = copy_ext_info.get(); - size_t offset = 0; - while (offset + sizeof(aicpu::FWKAdapter::ExtInfo) <= ext_info.size()) { - auto aicpu_ext_info = reinterpret_cast(ext_info_data + offset); - GELOGD("Ext infoType=%d, infoLen=%u.", aicpu_ext_info->infoType, aicpu_ext_info->infoLen); - if (aicpu_ext_info->infoType == aicpu::FWKAdapter::FWK_ADPT_EXT_SESSION_INFO) { - GE_CHK_BOOL_RET_STATUS(aicpu_ext_info->infoLen == sizeof(SessionInfo), PARAM_INVALID, - "Parse ext session info failed as infoLen must be %zu but %u.", - sizeof(SessionInfo), aicpu_ext_info->infoLen); - SessionInfo *session_info = reinterpret_cast(aicpu_ext_info->infoMsg); - session_info->sessionId = davinci_model_->GetSessionId(); - session_info->sessFlag = true; - GELOGD("Update aicpu_task ext_info session_info session_id is %lu", session_info->sessionId); - } - offset += sizeof(aicpu::FWKAdapter::ExtInfo); - offset += aicpu_ext_info->infoLen; - } - - auto rt_ret = rtMalloc(&aicpu_ext_info_addr_, ext_info.size(), RT_MEMORY_HBM); + int32_t unknown_shape_type_val = 0; + (void) AttrUtils::GetInt(op_desc, ::ge::ATTR_NAME_UNKNOWN_SHAPE_TYPE, unknown_shape_type_val); + UnknowShapeOpType unknown_type = static_cast(unknown_shape_type_val); + uint32_t num_inputs = op_desc->GetInputsSize(); + uint32_t num_outputs = op_desc->GetOutputsSize(); + std::unique_ptr ext_handle( + new(std::nothrow) ::ge::hybrid::AicpuExtInfoHandler(op_desc->GetName(), + num_inputs, + num_outputs, + unknown_type)); + GE_CHK_BOOL_RET_STATUS(ext_handle != nullptr, FAILED, "Malloc aicpu_ext_handle mem failed!"); + GE_CHK_STATUS_RET(ext_handle->Parse(ext_info) + "Parse kernel ext info failed, kernel_ext_info_size=%zu.", ext_info.size()); + GE_CHK_STATUS_RET(aicpu_ext_handle_->UpdateSessionInfo(davinci_model_->GetSessionId()), + "Update session info session id sfailed."); + GELOGD("Update aicpu_task ext_info session_info session_id is %lu", davinci_model_->GetSessionId()); + GE_CHK_STATUS_RET(ext_handle->UpdateExecuteMode(true), "UpdateExecuteMode failed."); + GELOGD("Update aicpu_task ext_info bit_map execute mode to 1."); + + auto rt_ret = rtMalloc(&aicpu_ext_info_addr_, ext_handle->GetExtInfoLen(), RT_MEMORY_HBM); if (rt_ret != RT_ERROR_NONE) { GELOGE(RT_FAILED, "rtMalloc ext_info error: 0x%X, size=%zu", rt_ret, ext_info.size()); return RT_ERROR_TO_GE_STATUS(rt_ret); } - rt_ret = rtMemcpy(aicpu_ext_info_addr_, ext_info.size(), ext_info_data, ext_info.size(), RT_MEMCPY_HOST_TO_DEVICE); + rt_ret = rtMemcpy(aicpu_ext_info_addr_, ext_handle->GetExtInfoLen(), ext_handle_->GetExtInfo(), + ext_handle->GetExtInfoLen(), RT_MEMCPY_HOST_TO_DEVICE); if (rt_ret != RT_ERROR_NONE) { GELOGE(RT_FAILED, "rtMemcpy ext_info error: 0x%X, size=%zu", rt_ret, ext_info.size()); return RT_ERROR_TO_GE_STATUS(rt_ret); diff --git a/ge/hybrid/node_executor/aicpu/aicpu_ext_info.cc b/ge/hybrid/node_executor/aicpu/aicpu_ext_info.cc index e9c7c604..b178b906 100644 --- a/ge/hybrid/node_executor/aicpu/aicpu_ext_info.cc +++ b/ge/hybrid/node_executor/aicpu/aicpu_ext_info.cc @@ -64,6 +64,9 @@ Status AicpuExtInfoHandler::Parse(const std::string &ext_info) { case aicpu::FWKAdapter::FWK_ADPT_EXT_SESSION_INFO: GE_CHK_STATUS_RET(ParseExtSessionInfo(aicpu_ext_info), "Parse ext session info failed."); break; + case aicpu::FWKAdapter::FWK_ADPT_EXT_BITMAP: + GE_CHK_STATUS_RET(ParseExtBitMap(aicpu_ext_info), "Parse ext bit map failed."); + break; default: GELOGD("Node[%s] ignore infoType=%d, infoLen=%u.", node_name_.c_str(), aicpu_ext_info->infoType, aicpu_ext_info->infoLen); @@ -140,6 +143,29 @@ Status AicpuExtInfoHandler::ParseExtSessionInfo(AicpuExtInfo *aicpu_ext_info) { return SUCCESS; } +Status AicpuExtInfoHandler::ParseExtBitMap(AicpuExtInfo *aicpu_ext_info) { + GE_CHK_BOOL_RET_STATUS(aicpu_ext_info->infoLen == sizeof(uint64_t), PARAM_INVALID, + "Node[%s] parse bit_map info failed as infoLen must be %zu but %u.", + node_name_.c_str(), sizeof(uint64_t), aicpu_ext_info->infoLen); + + bit_map_ = reinterpret_cast(aicpu_ext_info->infoMsg); + GELOGI("Node[%s] bit_map info success infoLen=%u.", node_name_.c_str(), aicpu_ext_info->infoLen); + return SUCCESS; +} + +Status AicpuExtInfoHandler::UpdateExecuteMode(bool flag) { + if (bit_map_ == nullptr) { + GELOGD("There is no bit_map in ext_info, no need update."); + return SUCCESS; + } + if (flag) { + *(bit_map_) |= 1; + } else { + *(bit_map_) &= ~1; + } + return SUCCESS; +} + Status AicpuExtInfoHandler::UpdateSessionInfo(uint64_t session_id, uint64_t kernel_id, bool sess_flag) { if (session_info_ == nullptr) { GELOGD("There is no session info in ext_info, no need update."); diff --git a/ge/hybrid/node_executor/aicpu/aicpu_ext_info.h b/ge/hybrid/node_executor/aicpu/aicpu_ext_info.h index 2defba8f..e5b94452 100644 --- a/ge/hybrid/node_executor/aicpu/aicpu_ext_info.h +++ b/ge/hybrid/node_executor/aicpu/aicpu_ext_info.h @@ -57,6 +57,8 @@ class AicpuExtInfoHandler { Status UpdateSessionInfoSessionId(uint64_t session_id); + Status UpdateExecuteMode(bool flag); + Status GetOutputShapeAndType(uint32_t output_index, GeShape &shape, DataType &data_type); private: @@ -65,6 +67,7 @@ class AicpuExtInfoHandler { Status ParseExtInputShape(AicpuExtInfo *aicpu_ext_info); Status ParseExtOutputShape(AicpuExtInfo *aicpu_ext_info); Status ParseExtSessionInfo(AicpuExtInfo *aicpu_ext_info); + Status ParseExtBitMap(AicpuExtInfo *aicpu_ext_info); static Status UpdateShapeAndType(const GeShape &shape, DataType data_type, @@ -80,6 +83,7 @@ class AicpuExtInfoHandler { const uint32_t output_num_; UnknowShapeOpType unknown_type_; AicpuSessionInfo *session_info_ = nullptr; + uint64_t *bit_map_ = nullptr; std::unique_ptr ext_info_; size_t ext_info_len_ = 0; diff --git a/ge/hybrid/node_executor/aicpu/aicpu_node_executor.cc b/ge/hybrid/node_executor/aicpu/aicpu_node_executor.cc index 1c160eea..e9e99629 100755 --- a/ge/hybrid/node_executor/aicpu/aicpu_node_executor.cc +++ b/ge/hybrid/node_executor/aicpu/aicpu_node_executor.cc @@ -60,6 +60,7 @@ Status AicpuNodeTaskBase::InitExtInfo(const std::string &kernel_ext_info, int64_ GELOGD("To update aicpu_task ext_info session_info session_id to %lu", session_id); GE_CHK_STATUS_RET(aicpu_ext_handle_.UpdateSessionInfoSessionId(session_id), "UpdateSessionInfoSessionId failed."); + GE_CHK_STATUS_RET(aicpu_ext_handle_.UpdateExecuteMode(true), "UpdateExecuteMode failed."); // copy task args buf GE_CHK_STATUS_RET(AllocTensorBuffer(aicpu_ext_handle_.GetExtInfoLen(), ext_info_addr_dev_), @@ -136,6 +137,7 @@ Status AicpuNodeTaskBase::UpdateExtInfo() { return SUCCESS; } + GE_CHK_STATUS_RET(ext_handle->UpdateExecuteMode(false), "UpdateExecuteMode failed."); for (auto i = 0; i < node_item_->num_inputs; ++i) { auto input_desc = node_item_->MutableInputDesc(i); GE_CHECK_NOTNULL(input_desc); diff --git a/ge/single_op/task/op_task.cc b/ge/single_op/task/op_task.cc index 3d001d8b..8bef8c50 100755 --- a/ge/single_op/task/op_task.cc +++ b/ge/single_op/task/op_task.cc @@ -373,6 +373,7 @@ Status AiCpuBaseTask::SetExtInfoAndType(const std::string &kernel_ext_info, uint GE_CHK_STATUS_RET(aicpu_ext_handle_->UpdateSessionInfo(ULLONG_MAX, kernel_id, false), "UpdateSessionInfo failed."); + GE_CHK_STATUS_RET(ext_handle->UpdateExecuteMode(true, "UpdateExecuteMode failed."); GE_CHK_RT_RET(rtMalloc(&ext_info_addr_dev_, aicpu_ext_handle_->GetExtInfoLen(), RT_MEMORY_HBM)); GE_CHK_RT_RET(rtMemcpy(ext_info_addr_dev_, aicpu_ext_handle_->GetExtInfoLen(), @@ -404,13 +405,14 @@ Status AiCpuBaseTask::UpdateExtInfo(const std::vector &input_desc, std::vector &output_desc, rtStream_t stream) { GELOGI("Update ext info begin, unknown_type=%d.", unknown_type_); + GE_CHECK_NOTNULL(aicpu_ext_handle_); + GE_CHK_STATUS_RET(ext_handle->UpdateExecuteMode(false), "UpdateExecuteMode failed."); + if (num_inputs_ == 0 && num_outputs_ == 0) { GELOGI("No input and output, no need update ext info."); return SUCCESS; } - GE_CHECK_NOTNULL(aicpu_ext_handle_); - size_t non_const_index = 0; for (size_t input_index = 0; input_index < num_inputs_; input_index++) { if (input_index < input_is_const_.size() && input_is_const_[input_index]) { diff --git a/tests/ut/ge/graph/load/kernel_ex_task_info_unittest.cc b/tests/ut/ge/graph/load/kernel_ex_task_info_unittest.cc index 53436820..7bc3faca 100644 --- a/tests/ut/ge/graph/load/kernel_ex_task_info_unittest.cc +++ b/tests/ut/ge/graph/load/kernel_ex_task_info_unittest.cc @@ -137,4 +137,12 @@ TEST_F(UtestKernelExTaskInfo, kernel_ex_task_info_calculate_args) { EXPECT_EQ(kernel_ex_task_info.CalculateArgs(task_def, &model), FAILED); } +TEST_F(UtestKernelExTaskInfo, kernel_ex_task_ext_info) { + const char ext_info[4] = {0, 0, 0, 4}; + const OpDescPtr op_desc = CreateOpDesc("FrameworkOp", "FrameworkOp"); + + KernelExTaskInfo kernel_ex_task_info; + EXPECT_EQ(kernel_ex_task_info.InitTaskExtInfo(ext_info, op_desc), SUCCESS); +} + } // namespace ge diff --git a/tests/ut/ge/graph/load/kernel_task_info_unittest.cc b/tests/ut/ge/graph/load/kernel_task_info_unittest.cc index a3a27a7b..c253666f 100644 --- a/tests/ut/ge/graph/load/kernel_task_info_unittest.cc +++ b/tests/ut/ge/graph/load/kernel_task_info_unittest.cc @@ -1195,4 +1195,10 @@ TEST_F(UtestKernelTaskInfo, kernel_task_info_super_kernel_info) { EXPECT_EQ(kernel_task_info.SKTFinalize(), SUCCESS); } +TEST_F(UtestKernelTaskInfo, kernel_ask_ext_info) { + const char ext_info[4] = {0, 0, 0, 4}; + KernelTaskInfo kernel_task_info; + EXPECT_EQ(kernel_task_info.InitAicpuTaskExtInfo(ext_info), SUCCESS); +} + } // namespace ge From f640f729c978141e27195e93f0ba0d069d1881cc Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 25 Jan 2021 21:46:35 +0800 Subject: [PATCH 04/13] For aicpu all_shape compile. --- ge/generator/ge_generator.cc | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/ge/generator/ge_generator.cc b/ge/generator/ge_generator.cc index fe7ea3bf..b050ab93 100644 --- a/ge/generator/ge_generator.cc +++ b/ge/generator/ge_generator.cc @@ -721,8 +721,12 @@ Status GeGenerator::BuildSingleOp(OpDescPtr &op_desc, const vector &in GeModelPtr &ge_model = name_to_ge_model.begin()->second; GELOGD("The opType in op_desc_tmp is [%s]", op_desc_tmp->GetType().c_str()); + bool all_shape = false; + (void)AttrUtils::GetBool(op_desc, "_AllShape", all_shape); bool dynamic_flag = false; - if (CheckShapeReset(op_desc, dynamic_flag) == SUCCESS && dynamic_flag) { + CheckShapeReset(op_desc, dynamic_flag); + if (all_shape && dynamic_flag) { + GELOGD("Get aicpu all_shape kernel!"); vector inputs_dynamic; vector outputs_dynamic; GE_CHK_STATUS_RET_NOLOG(ResetTensorVecShape(inputs, inputs_dynamic)); From d383e91c52ed9a681777d965cde4ba27e6f40b64 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 25 Jan 2021 22:00:59 +0800 Subject: [PATCH 05/13] For aicpu all_shape compile. --- ge/generator/ge_generator.cc | 5 +++-- .../task_info/kernel_ex_task_info.cc | 4 ++-- .../task_info/kernel_task_info.cc | 18 ++++++++++-------- .../node_executor/aicpu/aicpu_node_executor.cc | 3 +-- ge/single_op/task/op_task.cc | 4 ++-- 5 files changed, 18 insertions(+), 16 deletions(-) diff --git a/ge/generator/ge_generator.cc b/ge/generator/ge_generator.cc index b050ab93..52dfa65d 100644 --- a/ge/generator/ge_generator.cc +++ b/ge/generator/ge_generator.cc @@ -47,6 +47,7 @@ const char *const kEngineNameDefault = "default"; const char *const kVectorEngine = "VectorEngine"; const char *const kAIcoreEngine = "AIcoreEngine"; const char *const kFileNameSuffix = "online"; +const char *const kAicpuAllshape = "_AllShape"; const size_t kDynamicDimSize = 1; const int64_t kDynamicDimValue = -2; @@ -722,10 +723,10 @@ Status GeGenerator::BuildSingleOp(OpDescPtr &op_desc, const vector &in GELOGD("The opType in op_desc_tmp is [%s]", op_desc_tmp->GetType().c_str()); bool all_shape = false; - (void)AttrUtils::GetBool(op_desc, "_AllShape", all_shape); bool dynamic_flag = false; + (void)AttrUtils::GetBool(op_desc, kAicpuAllshape, all_shape); CheckShapeReset(op_desc, dynamic_flag); - if (all_shape && dynamic_flag) { + if (dynamic_flag && all_shape) { GELOGD("Get aicpu all_shape kernel!"); vector inputs_dynamic; vector outputs_dynamic; diff --git a/ge/graph/load/model_manager/task_info/kernel_ex_task_info.cc b/ge/graph/load/model_manager/task_info/kernel_ex_task_info.cc index be91dd15..b9644946 100644 --- a/ge/graph/load/model_manager/task_info/kernel_ex_task_info.cc +++ b/ge/graph/load/model_manager/task_info/kernel_ex_task_info.cc @@ -45,7 +45,7 @@ Status KernelExTaskInfo::InitTaskExtInfo(const std::string &ext_info, const OpDe num_outputs, unknown_type)); GE_CHK_BOOL_RET_STATUS(ext_handle != nullptr, FAILED, "Malloc aicpu_ext_handle mem failed!"); - GE_CHK_STATUS_RET(ext_handle->Parse(ext_info) + GE_CHK_STATUS_RET(ext_handle->Parse(ext_info), "Parse kernel ext info failed, kernel_ext_info_size=%zu.", ext_info.size()); GE_CHK_STATUS_RET(ext_handle->UpdateExecuteMode(true), "UpdateExecuteMode failed."); GELOGD("Update aicpu_task ext_info bit_map execute mode to 1."); @@ -54,7 +54,7 @@ Status KernelExTaskInfo::InitTaskExtInfo(const std::string &ext_info, const OpDe GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE, GELOGE(RT_FAILED, "rtMalloc ext_info error: 0x%X, size=%zu", rt_ret, ext_info.size()); return RT_ERROR_TO_GE_STATUS(rt_ret);) - rt_ret = rtMemcpy(ext_info_addr_, ext_handle_->GetExtInfoLen(), ext_handle_->GetExtInfo(), + rt_ret = rtMemcpy(ext_info_addr_, ext_handle->GetExtInfoLen(), ext_handle->GetExtInfo(), ext_handle->GetExtInfoLen(), RT_MEMCPY_HOST_TO_DEVICE); GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE, GELOGE(RT_FAILED, "rtMemcpy ext_info error: 0x%X, size=%zu", rt_ret, ext_info.size()); diff --git a/ge/graph/load/model_manager/task_info/kernel_task_info.cc b/ge/graph/load/model_manager/task_info/kernel_task_info.cc index 9582b9ab..2a3ddaf8 100755 --- a/ge/graph/load/model_manager/task_info/kernel_task_info.cc +++ b/ge/graph/load/model_manager/task_info/kernel_task_info.cc @@ -32,6 +32,8 @@ #include "super_kernel/super_kernel.h" #include "super_kernel/super_kernel_factory.h" #include "cce/aicpu_engine_struct.h" +#include "hybrid/node_executor/aicpu/aicpu_ext_info.h" +#include "framework/common/debug/log.h" namespace { const uint8_t kL2LoadToDdr = 1; @@ -965,20 +967,20 @@ Status KernelTaskInfo::InitAicpuTaskExtInfo(const std::string &ext_info) { } int32_t unknown_shape_type_val = 0; - (void) AttrUtils::GetInt(op_desc, ::ge::ATTR_NAME_UNKNOWN_SHAPE_TYPE, unknown_shape_type_val); + (void) AttrUtils::GetInt(op_desc_, ::ge::ATTR_NAME_UNKNOWN_SHAPE_TYPE, unknown_shape_type_val); UnknowShapeOpType unknown_type = static_cast(unknown_shape_type_val); - uint32_t num_inputs = op_desc->GetInputsSize(); - uint32_t num_outputs = op_desc->GetOutputsSize(); + uint32_t num_inputs = op_desc_->GetInputsSize(); + uint32_t num_outputs = op_desc_->GetOutputsSize(); std::unique_ptr ext_handle( - new(std::nothrow) ::ge::hybrid::AicpuExtInfoHandler(op_desc->GetName(), + new(std::nothrow) ::ge::hybrid::AicpuExtInfoHandler(op_desc_->GetName(), num_inputs, num_outputs, unknown_type)); GE_CHK_BOOL_RET_STATUS(ext_handle != nullptr, FAILED, "Malloc aicpu_ext_handle mem failed!"); - GE_CHK_STATUS_RET(ext_handle->Parse(ext_info) + GE_CHK_STATUS_RET(ext_handle->Parse(ext_info), "Parse kernel ext info failed, kernel_ext_info_size=%zu.", ext_info.size()); - GE_CHK_STATUS_RET(aicpu_ext_handle_->UpdateSessionInfo(davinci_model_->GetSessionId()), - "Update session info session id sfailed."); + GE_CHK_STATUS_RET(ext_handle_->UpdateSessionInfo(davinci_model_->GetSessionId()), + "Update session info session id failed."); GELOGD("Update aicpu_task ext_info session_info session_id is %lu", davinci_model_->GetSessionId()); GE_CHK_STATUS_RET(ext_handle->UpdateExecuteMode(true), "UpdateExecuteMode failed."); GELOGD("Update aicpu_task ext_info bit_map execute mode to 1."); @@ -988,7 +990,7 @@ Status KernelTaskInfo::InitAicpuTaskExtInfo(const std::string &ext_info) { GELOGE(RT_FAILED, "rtMalloc ext_info error: 0x%X, size=%zu", rt_ret, ext_info.size()); return RT_ERROR_TO_GE_STATUS(rt_ret); } - rt_ret = rtMemcpy(aicpu_ext_info_addr_, ext_handle->GetExtInfoLen(), ext_handle_->GetExtInfo(), + rt_ret = rtMemcpy(aicpu_ext_info_addr_, ext_handle->GetExtInfoLen(), ext_handle->GetExtInfo(), ext_handle->GetExtInfoLen(), RT_MEMCPY_HOST_TO_DEVICE); if (rt_ret != RT_ERROR_NONE) { GELOGE(RT_FAILED, "rtMemcpy ext_info error: 0x%X, size=%zu", rt_ret, ext_info.size()); diff --git a/ge/hybrid/node_executor/aicpu/aicpu_node_executor.cc b/ge/hybrid/node_executor/aicpu/aicpu_node_executor.cc index e9e99629..ca9e908b 100755 --- a/ge/hybrid/node_executor/aicpu/aicpu_node_executor.cc +++ b/ge/hybrid/node_executor/aicpu/aicpu_node_executor.cc @@ -60,7 +60,6 @@ Status AicpuNodeTaskBase::InitExtInfo(const std::string &kernel_ext_info, int64_ GELOGD("To update aicpu_task ext_info session_info session_id to %lu", session_id); GE_CHK_STATUS_RET(aicpu_ext_handle_.UpdateSessionInfoSessionId(session_id), "UpdateSessionInfoSessionId failed."); - GE_CHK_STATUS_RET(aicpu_ext_handle_.UpdateExecuteMode(true), "UpdateExecuteMode failed."); // copy task args buf GE_CHK_STATUS_RET(AllocTensorBuffer(aicpu_ext_handle_.GetExtInfoLen(), ext_info_addr_dev_), @@ -137,7 +136,7 @@ Status AicpuNodeTaskBase::UpdateExtInfo() { return SUCCESS; } - GE_CHK_STATUS_RET(ext_handle->UpdateExecuteMode(false), "UpdateExecuteMode failed."); + GE_CHK_STATUS_RET(aicpu_ext_handle_.UpdateExecuteMode(false), "UpdateExecuteMode failed."); for (auto i = 0; i < node_item_->num_inputs; ++i) { auto input_desc = node_item_->MutableInputDesc(i); GE_CHECK_NOTNULL(input_desc); diff --git a/ge/single_op/task/op_task.cc b/ge/single_op/task/op_task.cc index 8bef8c50..28ddcbe2 100755 --- a/ge/single_op/task/op_task.cc +++ b/ge/single_op/task/op_task.cc @@ -373,7 +373,7 @@ Status AiCpuBaseTask::SetExtInfoAndType(const std::string &kernel_ext_info, uint GE_CHK_STATUS_RET(aicpu_ext_handle_->UpdateSessionInfo(ULLONG_MAX, kernel_id, false), "UpdateSessionInfo failed."); - GE_CHK_STATUS_RET(ext_handle->UpdateExecuteMode(true, "UpdateExecuteMode failed."); + GE_CHK_STATUS_RET(aicpu_ext_handle_->UpdateExecuteMode(true, "UpdateExecuteMode failed."); GE_CHK_RT_RET(rtMalloc(&ext_info_addr_dev_, aicpu_ext_handle_->GetExtInfoLen(), RT_MEMORY_HBM)); GE_CHK_RT_RET(rtMemcpy(ext_info_addr_dev_, aicpu_ext_handle_->GetExtInfoLen(), @@ -406,7 +406,7 @@ Status AiCpuBaseTask::UpdateExtInfo(const std::vector &input_desc, rtStream_t stream) { GELOGI("Update ext info begin, unknown_type=%d.", unknown_type_); GE_CHECK_NOTNULL(aicpu_ext_handle_); - GE_CHK_STATUS_RET(ext_handle->UpdateExecuteMode(false), "UpdateExecuteMode failed."); + GE_CHK_STATUS_RET(aicpu_ext_handle_->UpdateExecuteMode(false), "UpdateExecuteMode failed."); if (num_inputs_ == 0 && num_outputs_ == 0) { GELOGI("No input and output, no need update ext info."); From 3f69c0ca6515544dfa69fe6251f3c53a7ad8ce14 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 25 Jan 2021 22:02:49 +0800 Subject: [PATCH 06/13] For aicpu all_shape compile. --- ge/generator/ge_generator.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ge/generator/ge_generator.cc b/ge/generator/ge_generator.cc index 52dfa65d..6c0fb6e5 100644 --- a/ge/generator/ge_generator.cc +++ b/ge/generator/ge_generator.cc @@ -726,7 +726,7 @@ Status GeGenerator::BuildSingleOp(OpDescPtr &op_desc, const vector &in bool dynamic_flag = false; (void)AttrUtils::GetBool(op_desc, kAicpuAllshape, all_shape); CheckShapeReset(op_desc, dynamic_flag); - if (dynamic_flag && all_shape) { + if (dynamic_flag || all_shape) { GELOGD("Get aicpu all_shape kernel!"); vector inputs_dynamic; vector outputs_dynamic; From 5b001c5684914fc1f4a1caee85cb192318c79ed0 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 25 Jan 2021 22:06:18 +0800 Subject: [PATCH 07/13] For aicpu all_shape compile. --- ge/graph/load/model_manager/task_info/kernel_task_info.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ge/graph/load/model_manager/task_info/kernel_task_info.cc b/ge/graph/load/model_manager/task_info/kernel_task_info.cc index 2a3ddaf8..861ef484 100755 --- a/ge/graph/load/model_manager/task_info/kernel_task_info.cc +++ b/ge/graph/load/model_manager/task_info/kernel_task_info.cc @@ -979,7 +979,7 @@ Status KernelTaskInfo::InitAicpuTaskExtInfo(const std::string &ext_info) { GE_CHK_BOOL_RET_STATUS(ext_handle != nullptr, FAILED, "Malloc aicpu_ext_handle mem failed!"); GE_CHK_STATUS_RET(ext_handle->Parse(ext_info), "Parse kernel ext info failed, kernel_ext_info_size=%zu.", ext_info.size()); - GE_CHK_STATUS_RET(ext_handle_->UpdateSessionInfo(davinci_model_->GetSessionId()), + GE_CHK_STATUS_RET(ext_handle_->UpdateSessionInfoSessionId(davinci_model_->GetSessionId()), "Update session info session id failed."); GELOGD("Update aicpu_task ext_info session_info session_id is %lu", davinci_model_->GetSessionId()); GE_CHK_STATUS_RET(ext_handle->UpdateExecuteMode(true), "UpdateExecuteMode failed."); From df604f679b325f1bb8c63ef22e9c4b99e2cf46b1 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 25 Jan 2021 22:07:12 +0800 Subject: [PATCH 08/13] For aicpu all_shape compile. --- ge/graph/load/model_manager/task_info/kernel_task_info.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ge/graph/load/model_manager/task_info/kernel_task_info.cc b/ge/graph/load/model_manager/task_info/kernel_task_info.cc index 861ef484..1d2a74cc 100755 --- a/ge/graph/load/model_manager/task_info/kernel_task_info.cc +++ b/ge/graph/load/model_manager/task_info/kernel_task_info.cc @@ -979,7 +979,7 @@ Status KernelTaskInfo::InitAicpuTaskExtInfo(const std::string &ext_info) { GE_CHK_BOOL_RET_STATUS(ext_handle != nullptr, FAILED, "Malloc aicpu_ext_handle mem failed!"); GE_CHK_STATUS_RET(ext_handle->Parse(ext_info), "Parse kernel ext info failed, kernel_ext_info_size=%zu.", ext_info.size()); - GE_CHK_STATUS_RET(ext_handle_->UpdateSessionInfoSessionId(davinci_model_->GetSessionId()), + GE_CHK_STATUS_RET(ext_handle->UpdateSessionInfoSessionId(davinci_model_->GetSessionId()), "Update session info session id failed."); GELOGD("Update aicpu_task ext_info session_info session_id is %lu", davinci_model_->GetSessionId()); GE_CHK_STATUS_RET(ext_handle->UpdateExecuteMode(true), "UpdateExecuteMode failed."); From 323e0fcde7ba55d1f28e6b787160d6387ac852aa Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 25 Jan 2021 22:10:14 +0800 Subject: [PATCH 09/13] For aicpu all_shape compile. --- ge/single_op/task/op_task.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ge/single_op/task/op_task.cc b/ge/single_op/task/op_task.cc index 28ddcbe2..ff200806 100755 --- a/ge/single_op/task/op_task.cc +++ b/ge/single_op/task/op_task.cc @@ -373,7 +373,7 @@ Status AiCpuBaseTask::SetExtInfoAndType(const std::string &kernel_ext_info, uint GE_CHK_STATUS_RET(aicpu_ext_handle_->UpdateSessionInfo(ULLONG_MAX, kernel_id, false), "UpdateSessionInfo failed."); - GE_CHK_STATUS_RET(aicpu_ext_handle_->UpdateExecuteMode(true, "UpdateExecuteMode failed."); + GE_CHK_STATUS_RET(aicpu_ext_handle_->UpdateExecuteMode(true), "UpdateExecuteMode failed."); GE_CHK_RT_RET(rtMalloc(&ext_info_addr_dev_, aicpu_ext_handle_->GetExtInfoLen(), RT_MEMORY_HBM)); GE_CHK_RT_RET(rtMemcpy(ext_info_addr_dev_, aicpu_ext_handle_->GetExtInfoLen(), From 0215e623ca4c21e492e16819151b791c5258ec8a Mon Sep 17 00:00:00 2001 From: zhangxiaokun Date: Tue, 26 Jan 2021 09:13:51 +0800 Subject: [PATCH 10/13] InitInputDescInfo & InitOutputDescInfo & CheckHasHcomOp --- ge/graph/load/model_manager/davinci_model.cc | 125 +++++++++---------- ge/graph/load/model_manager/davinci_model.h | 6 +- 2 files changed, 62 insertions(+), 69 deletions(-) diff --git a/ge/graph/load/model_manager/davinci_model.cc b/ge/graph/load/model_manager/davinci_model.cc index be33588a..95fd8392 100755 --- a/ge/graph/load/model_manager/davinci_model.cc +++ b/ge/graph/load/model_manager/davinci_model.cc @@ -446,23 +446,20 @@ void DavinciModel::InitRuntimeParams() { runtime_param_.mem_size, runtime_param_.weight_size, runtime_param_.var_size); } -void DavinciModel::CheckHasHcomOp() { - Graph graph = ge_model_->GetGraph(); - auto compute_graph = GraphUtils::GetComputeGraph(graph); - if (compute_graph == nullptr) { - return; - } +void DavinciModel::CheckHasHcomOp(const ComputeGraphPtr &compute_graph) { + const set hcom_opp_types({ + HCOMBROADCAST, HCOMALLGATHER, HCOMALLREDUCE, HCOMSEND, HCOMRECEIVE, HCOMREDUCESCATTER, + HVDCALLBACKALLREDUCE, HVDCALLBACKALLGATHER, HVDCALLBACKBROADCAST, HVDWAIT, HCOMREDUCE + }); + for (const auto &node : compute_graph->GetAllNodes()) { OpDescPtr op_desc = node->GetOpDesc(); GE_IF_BOOL_EXEC(op_desc == nullptr, GELOGW("Node OpDesc is nullptr"); continue); - GE_IF_BOOL_EXEC(((op_desc->GetType() == HCOMBROADCAST) || (op_desc->GetType() == HCOMALLGATHER) || - (op_desc->GetType() == HCOMALLREDUCE) || (op_desc->GetType() == HCOMSEND) || - (op_desc->GetType() == HCOMRECEIVE) || (op_desc->GetType() == HCOMREDUCESCATTER) || - (op_desc->GetType() == HVDCALLBACKALLREDUCE) || (op_desc->GetType() == HVDCALLBACKALLGATHER) || - (op_desc->GetType() == HVDCALLBACKBROADCAST) || (op_desc->GetType() == HVDWAIT) || - (op_desc->GetType() == HCOMREDUCE)), - uint32_t stream_id = static_cast(op_desc->GetStreamId()); - (void)hcom_streams_.emplace(stream_id); GELOGD("hcom stream: %u.", stream_id); continue); + if (hcom_opp_types.count(op_desc->GetType()) > 0) { + uint32_t stream_id = static_cast(op_desc->GetStreamId()); + hcom_streams_.emplace(stream_id); + GELOGD("hcom stream: %u.", stream_id); + } } } @@ -642,7 +639,7 @@ Status DavinciModel::Init(void *dev_ptr, size_t mem_size, void *weight_ptr, size name_ = ge_model_->GetName(); (void)ge::AttrUtils::GetBool(ge_model_, ATTR_NAME_SWITCH_FOR_L1_FUSION, is_l1_fusion_enable_); GELOGD("The value of ge.l1Fusion in ge_model is %d.", is_l1_fusion_enable_); - CheckHasHcomOp(); + CheckHasHcomOp(compute_graph); vector huge_stream_list; (void)ge::AttrUtils::GetListInt(ge_model_, ATTR_MODEL_HUGE_STREAM_LIST, huge_stream_list); @@ -1028,7 +1025,7 @@ Status DavinciModel::GenInputOutputInfo(const map &data_by_ const vector &output_op_list) { GELOGD("Data node size: %zu, NetOutput node size: %zu", data_by_index.size(), output_op_list.size()); for (auto &item : data_by_index) { - auto output_addrs = ModelUtils::GetOutputDataAddrs(runtime_param_, item.second); + const auto output_addrs = ModelUtils::GetOutputDataAddrs(runtime_param_, item.second); GELOGD("Data node: %s, output addr size: %zu", item.second->GetName().c_str(), output_addrs.size()); input_addrs_list_.emplace_back(output_addrs); @@ -1036,14 +1033,18 @@ Status DavinciModel::GenInputOutputInfo(const map &data_by_ GE_CHK_STATUS_RET(InitAippType(item.first, item.second, data_by_index), "Init AIPP Type failed"); GE_CHK_STATUS_RET(InitOrigInputInfo(item.first, item.second), "Init Orig input failed"); GE_CHK_STATUS_RET(InitAippInputOutputDims(item.first, item.second), "Init AIPP dims failed"); + GE_CHK_STATUS_RET(InitInputDescInfo(item.second), "Init input desc info failed"); if (item.second->GetType() == AIPP_DATA_TYPE) { GELOGI("This is dynamic aipp model, Node: %s", item.second->GetName().c_str()); is_dynamic_aipp_ = true; } } + vector out_node_name; + (void)AttrUtils::GetListStr(ge_model_, ATTR_MODEL_OUT_NODES_NAME, out_node_name); + GELOGD("Output node size: %zu, out nodes name: %zu", output_op_list.size(), out_node_name.size()); for (const auto &op_desc : output_op_list) { - auto input_addrs = ModelUtils::GetInputDataAddrs(runtime_param_, op_desc); + const auto input_addrs = ModelUtils::GetInputDataAddrs(runtime_param_, op_desc); GELOGD("NetOutput node: %s, input addr size: %zu", op_desc->GetName().c_str(), input_addrs.size()); output_addrs_list_.emplace_back(input_addrs); @@ -1061,10 +1062,11 @@ Status DavinciModel::GenInputOutputInfo(const map &data_by_ if (InitOutputTensorInfo(op_desc) != SUCCESS) { return INTERNAL_ERROR; } + + GE_CHK_STATUS_RET(InitOutputDescInfo(op_desc, out_node_name), "Init output desc info failed"); } - GE_CHK_STATUS_RET(InitInputDescInfo(data_by_index), "Init input desc info failed"); - return InitOutputDescInfo(output_op_list); + return SUCCESS; } bool DavinciModel::IsGetNextSinkDynamic(const OpDescPtr &op_desc) { @@ -1980,27 +1982,24 @@ void DavinciModel::CreateInputDimsInfo(const OpDescPtr &op_desc, Format format, } } -Status DavinciModel::InitInputDescInfo(const map &data_by_index) { - for (const auto &item : data_by_index) { - const auto op_desc = item.second; - GE_CHECK_NOTNULL(op_desc->GetInputDescPtr(0)); +Status DavinciModel::InitInputDescInfo(const OpDescPtr &op_desc) { + GE_CHECK_NOTNULL(op_desc->GetInputDescPtr(0)); - InputOutputDescInfo input; - ShapeDescription dims_info; - Format format = op_desc->GetInputDescPtr(0)->GetFormat(); - CreateInputDimsInfo(op_desc, format, input.shape_info, dims_info); + InputOutputDescInfo input; + ShapeDescription dims_info; + Format format = op_desc->GetInputDescPtr(0)->GetFormat(); + CreateInputDimsInfo(op_desc, format, input.shape_info, dims_info); - input.data_type = op_desc->GetInputDescPtr(0)->GetDataType(); - input.name = op_desc->GetName(); - int64_t input_size = 0; - GE_CHK_STATUS_RET(TensorUtils::GetSize(*op_desc->GetInputDescPtr(0), input_size), "get input size failed."); - input.size = input_size; - input_formats_.push_back(format); - input_descs_.push_back(input); + input.data_type = op_desc->GetInputDescPtr(0)->GetDataType(); + input.name = op_desc->GetName(); + int64_t input_size = 0; + GE_CHK_STATUS_RET(TensorUtils::GetSize(*op_desc->GetInputDescPtr(0), input_size), "get input size failed."); + input.size = input_size; + input_formats_.push_back(format); + input_descs_.push_back(input); - input.shape_info = dims_info; - input_descs_dims_.push_back(input); - } + input.shape_info = dims_info; + input_descs_dims_.push_back(input); return SUCCESS; } @@ -2066,37 +2065,31 @@ void DavinciModel::CreateOutput(uint32_t index, const OpDescPtr &op_desc, InputO output.data_type = op_desc->GetInputDescPtr(index)->GetDataType(); } -Status DavinciModel::InitOutputDescInfo(const vector &output_op_list) { - GELOGD("Output node size: %zu", output_op_list.size()); - vector out_node_name; - (void)ge::AttrUtils::GetListStr(ge_model_, ATTR_MODEL_OUT_NODES_NAME, out_node_name); - for (const auto &op_desc : output_op_list) { - uint32_t out_size = static_cast(op_desc->GetInputsSize()); - for (uint32_t index = 0; index < out_size; index++) { - string output_name; - InputOutputDescInfo output; - uint32_t format_result; - CreateOutput(index, op_desc, output, format_result); - - std::vector src_name = op_desc->GetSrcName(); - std::vector src_index = op_desc->GetSrcIndex(); - GE_CHK_BOOL_RET_STATUS(src_name.size() > index && src_index.size() > index, INTERNAL_ERROR, - "construct output_name failed."); - // forward compatbility, if old om has no out_node_name, need to return output follow origin way - if (out_size == out_node_name.size()) { - // neweast plan, the index will add to name during generate model. - bool contains_colon = out_node_name[index].find(":") != std::string::npos; - output_name = - contains_colon ? out_node_name[index] : out_node_name[index] + ":" + std::to_string(src_index[index]); - } else { - output_name = std::string("output_") + std::to_string(index) + "_" + src_name[index] + "_" + - std::to_string(src_index[index]); - } - output.name = output_name; - output_descs_.push_back(output); - output_formats_.push_back(format_result); +Status DavinciModel::InitOutputDescInfo(const OpDescPtr &op_desc, const vector &out_node_name) { + uint32_t out_size = static_cast(op_desc->GetInputsSize()); + for (uint32_t i = 0; i < out_size; ++i) { + string output_name; + InputOutputDescInfo output; + uint32_t format_result; + CreateOutput(i, op_desc, output, format_result); + + std::vector src_name = op_desc->GetSrcName(); + std::vector src_index = op_desc->GetSrcIndex(); + GE_CHK_BOOL_RET_STATUS(src_name.size() > i && src_index.size() > i, INTERNAL_ERROR, + "construct output_name failed."); + // forward compatbility, if old om has no out_node_name, need to return output follow origin way + if (out_size == out_node_name.size()) { + // neweast plan, the index will add to name during generate model. + bool contains_colon = out_node_name[i].find(":") != std::string::npos; + output_name = contains_colon ? out_node_name[i] : out_node_name[i] + ":" + std::to_string(src_index[i]); + } else { + output_name = string("output_") + std::to_string(i) + "_" + src_name[i] + "_" + std::to_string(src_index[i]); } + output.name = output_name; + output_descs_.push_back(output); + output_formats_.push_back(format_result); } + return SUCCESS; } diff --git a/ge/graph/load/model_manager/davinci_model.h b/ge/graph/load/model_manager/davinci_model.h index 6f0bcea0..53e9cd4d 100755 --- a/ge/graph/load/model_manager/davinci_model.h +++ b/ge/graph/load/model_manager/davinci_model.h @@ -831,7 +831,7 @@ class DavinciModel { void OpDebugUnRegister(); - void CheckHasHcomOp(); + void CheckHasHcomOp(const ComputeGraphPtr &graph); Status DoTaskSink(); @@ -854,8 +854,8 @@ class DavinciModel { Status InitOutputTensorInfo(const OpDescPtr &op_desc); Status GenOutputTensorInfo(OutputData *output_data, vector &outputs); - Status InitInputDescInfo(const map &data_by_index); - Status InitOutputDescInfo(const vector &output_op_list); + Status InitInputDescInfo(const OpDescPtr &op_desc); + Status InitOutputDescInfo(const OpDescPtr &op_desc, const vector &out_node_name); Status InitOrigInputInfo(uint32_t index, const OpDescPtr &op_desc); Status InitAippInfo(uint32_t index, const OpDescPtr &op_desc); From e331dee2b9b1bf2d4fe09487719f31cac17cd8fe Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 26 Jan 2021 10:59:53 +0800 Subject: [PATCH 11/13] Fix ut. --- tests/ut/ge/graph/load/kernel_ex_task_info_unittest.cc | 2 +- tests/ut/ge/graph/load/kernel_task_info_unittest.cc | 9 +++------ 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/tests/ut/ge/graph/load/kernel_ex_task_info_unittest.cc b/tests/ut/ge/graph/load/kernel_ex_task_info_unittest.cc index 7bc3faca..850a29ad 100644 --- a/tests/ut/ge/graph/load/kernel_ex_task_info_unittest.cc +++ b/tests/ut/ge/graph/load/kernel_ex_task_info_unittest.cc @@ -138,7 +138,7 @@ TEST_F(UtestKernelExTaskInfo, kernel_ex_task_info_calculate_args) { } TEST_F(UtestKernelExTaskInfo, kernel_ex_task_ext_info) { - const char ext_info[4] = {0, 0, 0, 4}; + const string ext_info = {1, 1, 1, 1, 0, 0, 0, 0}; const OpDescPtr op_desc = CreateOpDesc("FrameworkOp", "FrameworkOp"); KernelExTaskInfo kernel_ex_task_info; diff --git a/tests/ut/ge/graph/load/kernel_task_info_unittest.cc b/tests/ut/ge/graph/load/kernel_task_info_unittest.cc index c253666f..65783ac9 100644 --- a/tests/ut/ge/graph/load/kernel_task_info_unittest.cc +++ b/tests/ut/ge/graph/load/kernel_task_info_unittest.cc @@ -413,6 +413,9 @@ TEST_F(UtestKernelTaskInfo, init_kernel_taskInfo_with_aicpu_kernel_type_fail) { // rtMemcpy -> RT_ERROR_INVALID_VALUE EXPECT_EQ(kernel_task_info.Init(task_def, &model), SUCCESS); + const string ext_info = {1, 1, 1, 1, 0, 0, 0, 0}; + EXPECT_EQ(kernel_task_info.InitAicpuTaskExtInfo(ext_info), SUCCESS); + EXPECT_EQ(kernel_task_info.Distribute(), SUCCESS); EXPECT_EQ(kernel_task_info.Release(), SUCCESS); @@ -1195,10 +1198,4 @@ TEST_F(UtestKernelTaskInfo, kernel_task_info_super_kernel_info) { EXPECT_EQ(kernel_task_info.SKTFinalize(), SUCCESS); } -TEST_F(UtestKernelTaskInfo, kernel_ask_ext_info) { - const char ext_info[4] = {0, 0, 0, 4}; - KernelTaskInfo kernel_task_info; - EXPECT_EQ(kernel_task_info.InitAicpuTaskExtInfo(ext_info), SUCCESS); -} - } // namespace ge From 5cdca396f0630412b33bfad75eb52a7cbba307da Mon Sep 17 00:00:00 2001 From: wangxiaotian22 Date: Tue, 26 Jan 2021 13:49:05 +0800 Subject: [PATCH 12/13] move hccl_memcpy_pass back to preprocess --- ge/CMakeLists.txt | 2 + ge/ge_inference.mk | 1 + ge/ge_runner.mk | 1 + ge/graph/manager/graph_manager.cc | 7 +- .../passes/hccl_continuous_memcpy_pass.cc | 411 ++++++++++++++++++ ge/graph/passes/hccl_continuous_memcpy_pass.h | 59 +++ ge/graph/passes/hccl_memcpy_pass.cc | 95 +--- ge/graph/passes/hccl_memcpy_pass.h | 4 - ge/graph/preprocess/graph_preprocess.cc | 3 + tests/ut/ge/CMakeLists.txt | 1 + 10 files changed, 486 insertions(+), 98 deletions(-) create mode 100644 ge/graph/passes/hccl_continuous_memcpy_pass.cc create mode 100644 ge/graph/passes/hccl_continuous_memcpy_pass.h diff --git a/ge/CMakeLists.txt b/ge/CMakeLists.txt index 888f565c..096c3a4b 100755 --- a/ge/CMakeLists.txt +++ b/ge/CMakeLists.txt @@ -258,6 +258,7 @@ set(TRAIN_SRC_LIST "graph/passes/get_original_format_pass.cc" "graph/passes/guarantee_const_pass.cc" "graph/passes/hccl_memcpy_pass.cc" + "graph/passes/hccl_continuous_memcpy_pass.cc" "graph/passes/identity_pass.cc" "graph/passes/ref_identity_delete_op_pass.cc" "graph/passes/infershape_pass.cc" @@ -595,6 +596,7 @@ set(INFER_SRC_LIST "graph/passes/cast_remove_pass.cc" "graph/passes/transpose_transdata_pass.cc" "graph/passes/hccl_memcpy_pass.cc" + "graph/passes/hccl_continuous_memcpy_pass.cc" "graph/passes/flow_ctrl_pass.cc" "graph/passes/global_step_insert_pass.cc" "graph/passes/link_gen_mask_nodes_pass.cc" diff --git a/ge/ge_inference.mk b/ge/ge_inference.mk index a20ff437..132141fc 100755 --- a/ge/ge_inference.mk +++ b/ge/ge_inference.mk @@ -212,6 +212,7 @@ OMG_HOST_SRC_FILES := \ graph/passes/cast_remove_pass.cc \ graph/passes/transpose_transdata_pass.cc \ graph/passes/hccl_memcpy_pass.cc \ + graph/passes/hccl_continuous_memcpy_pass.cc \ graph/passes/flow_ctrl_pass.cc \ graph/passes/global_step_insert_pass.cc \ graph/passes/link_gen_mask_nodes_pass.cc \ diff --git a/ge/ge_runner.mk b/ge/ge_runner.mk index 4434dc2b..de27ff31 100644 --- a/ge/ge_runner.mk +++ b/ge/ge_runner.mk @@ -183,6 +183,7 @@ LIBGE_LOCAL_SRC_FILES := \ graph/passes/get_original_format_pass.cc \ graph/passes/guarantee_const_pass.cc \ graph/passes/hccl_memcpy_pass.cc \ + graph/passes/hccl_continuous_memcpy_pass.cc \ graph/passes/identity_pass.cc \ graph/passes/ref_identity_delete_op_pass.cc \ graph/passes/infershape_pass.cc \ diff --git a/ge/graph/manager/graph_manager.cc b/ge/graph/manager/graph_manager.cc index 0d58e9c2..445510c9 100755 --- a/ge/graph/manager/graph_manager.cc +++ b/ge/graph/manager/graph_manager.cc @@ -92,7 +92,7 @@ #include "graph/passes/unused_args_clean_pass.h" #include "graph/passes/global_step_insert_pass.h" #include "graph/passes/memcpy_addr_async_pass.h" -#include "graph/passes/hccl_memcpy_pass.h" +#include "graph/passes/hccl_continuous_memcpy_pass.h" #include "graph/build/label_allocator.h" #include "graph/utils/tensor_adapter.h" #include "inc/pass_manager.h" @@ -2151,8 +2151,6 @@ Status GraphManager::OptimizeStage1(ge::ComputeGraphPtr &compute_graph) { new (std::nothrow) TransOpWithoutReshapeFusionPass)) GE_CHK_STATUS_RET(after_merge_passes.AddPass("OptimizeStage1_1::TransOpBreadthFusionPass", new (std::nothrow) TransOpBreadthFusionPass)) - GE_CHK_STATUS_RET( - after_merge_passes.AddPass("OptimizeStage1_1::HcclMemcpyPass", new (std::nothrow) HcclMemcpyPass)); GE_TIMESTAMP_START(after_merge_passes); auto ret = after_merge_passes.Run(compute_graph); @@ -2268,6 +2266,9 @@ Status GraphManager::OptimizeStage2(ge::ComputeGraphPtr &compute_graph) { GE_CHK_STATUS_RET(after_merge_passes.AddPass("OptimizeStage2::AfterMergePasses::LinkGenMaskNodesPass", new (std::nothrow) LinkGenMaskNodesPass(options_.stream_max_parallel_num))); + GE_CHK_STATUS_RET( + after_merge_passes.AddPass("OptimizeStage2::HcclContinuousMemcpyPass", + new (std::nothrow) HcclContinuousMemcpyPass)); GE_TIMESTAMP_START(after_merge_passes); auto ret = after_merge_passes.Run(compute_graph); diff --git a/ge/graph/passes/hccl_continuous_memcpy_pass.cc b/ge/graph/passes/hccl_continuous_memcpy_pass.cc new file mode 100644 index 00000000..7dd2fb06 --- /dev/null +++ b/ge/graph/passes/hccl_continuous_memcpy_pass.cc @@ -0,0 +1,411 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "graph/passes/hccl_continuous_memcpy_pass.h" + +#include + +#include "common/debug/log.h" +#include "framework/common/debug/ge_log.h" +#include "common/ge_inner_error_codes.h" +#include "common/ge/ge_util.h" +#include "framework/common/types.h" +#include "graph/utils/graph_utils.h" + +namespace { +const int kAnchorNum = 0; +const int32_t kAnchorAssignRefIndex = 0; +const int32_t kAnchorAssignValueIndex = 1; +} // namespace +namespace ge { +Status HcclContinuousMemcpyPass::Run(ge::ComputeGraphPtr graph) { + GE_CHECK_NOTNULL(graph); + for (const auto &node : graph->GetDirectNode()) { + auto op_desc = node->GetOpDesc(); + if (op_desc == nullptr) { + GELOGE(INTERNAL_ERROR, "node has no op_desc, node_name : %s.", node->GetName().c_str()); + return INTERNAL_ERROR; + } + + Status ret = ContinuousInputProcess(graph, node); + if (ret != SUCCESS) { + GELOGE(INTERNAL_ERROR, "failed ProcessBroadcastMemcpy, node_name:%s.", node->GetName().c_str()); + return ret; + } + + ret = P2pmemInputProcess(graph, node); + if (ret != SUCCESS) { + GELOGE(INTERNAL_ERROR, "failed P2pmemInputProcess, node_name:%s.", node->GetName().c_str()); + return ret; + } + + } + return SUCCESS; +} + +// If broadcast input size is bigger than 1, and input from variable, +// cause by broadcast input memory should be continuous, +// another featuremap mem will be allocated for broadcast input. +// In this condition, move data from variable mem to broadcast input featuremap mem will be executed each step. +// In order to avoid move action out of model, use memcpy node instead of move action code. +Status HcclContinuousMemcpyPass::ContinuousInputProcess(const ComputeGraphPtr &graph, const NodePtr node) { + auto op_desc = node->GetOpDesc(); + + bool is_input_continuous = false; + (void)ge::AttrUtils::GetBool(op_desc, ATTR_NAME_CONTINUOUS_INPUT, is_input_continuous); + + if (is_input_continuous && op_desc->GetInputsSize() > 1) { + GELOGI("continuous input op is:%s.", op_desc->GetName().c_str()); + // if input size bigger than one, insert memcpy between var data for support continous mem alloc + for (auto &hccl_in_anchor : node->GetAllInDataAnchors()) { + if (hccl_in_anchor == nullptr) { + continue; + } + auto src_out_anchor = hccl_in_anchor->GetPeerOutAnchor(); + if (src_out_anchor == nullptr) { + GELOGE(INTERNAL_ERROR, "hcom op input has no peer anchor, node_name:%s", node->GetName().c_str()); + return INTERNAL_ERROR; + } + + if (IsDataNode(src_out_anchor->GetOwnerNode()->GetType())) { + Status ret = ModifyEdgeConnection(graph, src_out_anchor, hccl_in_anchor); + if (ret != SUCCESS) { + GELOGE(INTERNAL_ERROR, "Failed to modify the connection."); + return ret; + } + } + } + } + return SUCCESS; +} + +// if input is var type, and node input need p2p mem, then memcpy should be insert between the two +Status HcclContinuousMemcpyPass::P2pmemInputProcess(const ComputeGraphPtr &graph, const NodePtr node) { + auto op_desc = node->GetOpDesc(); + + vector input_memory_types; + (void) ge::AttrUtils::GetListInt(op_desc, ATTR_NAME_INPUT_MEM_TYPE_LIST, input_memory_types); + + if (input_memory_types.empty()) { + return SUCCESS; + } + + for (uint32_t index = 0; index < input_memory_types.size() && index < op_desc->GetInputsSize(); index++) { + if (input_memory_types[index] != RT_MEMORY_P2P_DDR) { + continue; + } + + GELOGD("p2p input op is:%s.", op_desc->GetName().c_str()); + auto hccl_in_anchor = node->GetInDataAnchor(index); + if (hccl_in_anchor == nullptr) { + continue; + } + auto src_out_anchor = hccl_in_anchor->GetPeerOutAnchor(); + if (src_out_anchor == nullptr) { + GELOGE(INTERNAL_ERROR, "hcom op input has no peer anchor, node_name:%s", node->GetName().c_str()); + return INTERNAL_ERROR; + } + + if (IsDataNode(src_out_anchor->GetOwnerNode()->GetType())) { + Status ret = ModifyEdgeConnection(graph, src_out_anchor, hccl_in_anchor); + if (ret != SUCCESS) { + GELOGE(INTERNAL_ERROR, "Failed to modify the connection."); + return ret; + } + } + } + return SUCCESS; +} + +bool HcclContinuousMemcpyPass::IsDataNode(const std::string& node_type) { + return (node_type == CONSTANTOP) || (node_type == VARIABLE) || (node_type == DATA) || (node_type == CONSTANT); +} + +/// +/// @brief Add Identity Node +/// @param [in] ge::ComputeGraphPtr graph +/// @param [in] ge::OutDataAnchorPtr in_node +/// @return ge::NodePtr +/// +NodePtr HcclContinuousMemcpyPass::CreateIdentityNode(const ComputeGraphPtr &graph, const OutDataAnchorPtr &out_data_anchor) { + GE_CHECK_NOTNULL_EXEC(graph, return nullptr); + NodePtr pre_node = out_data_anchor->GetOwnerNode(); + OpDescPtr pre_op_desc = pre_node->GetOpDesc(); + if (pre_op_desc == nullptr) { + GELOGE(INTERNAL_ERROR, "OpDesc of pre node is invalid."); + return nullptr; + } + + std::string node_name = pre_node->GetName() + "_" + IDENTITY; + node_name = CheckDuplicateName(node_name); + OpDescPtr op_desc = MakeShared(node_name.c_str(), IDENTITY); + if (op_desc == nullptr) { + GELOGE(INTERNAL_ERROR, "Create Identity op: MakeShared op_desc fail."); + return nullptr; + } + GELOGI("Create Identity op:%s.", op_desc->GetName().c_str()); + + graphStatus ret = op_desc->AddInputDesc("x", pre_op_desc->GetOutputDesc(out_data_anchor->GetIdx())); + if (ret != GRAPH_SUCCESS) { + GELOGE(INTERNAL_ERROR, "Create Identity op: add input desc fail."); + return nullptr; + } + + ret = op_desc->AddOutputDesc("y", pre_op_desc->GetOutputDesc(out_data_anchor->GetIdx())); + if (ret != GRAPH_SUCCESS) { + GELOGE(INTERNAL_ERROR, "Create Identity op: add output desc fail."); + return nullptr; + } + // because history reason ,this pass can not do work after constant fold so mark it + (void)AttrUtils::SetBool(op_desc, ATTR_NO_NEED_CONSTANT_FOLDING, false); + + NodePtr memcpy_node = graph->AddNode(op_desc); + if (memcpy_node == nullptr) { + GELOGE(INTERNAL_ERROR, "Insert Identity node fail."); + return nullptr; + } + + return memcpy_node; +} + +/// +/// @brief Check duplicate node_name +/// @param [in] std::string& node_name +/// @return std::string +/// +std::string HcclContinuousMemcpyPass::CheckDuplicateName(const std::string &node_name) { + std::string tmp_name = node_name; + auto iter = node_num_map_.find(tmp_name); + if (iter != node_num_map_.end()) { + tmp_name = tmp_name + "_" + std::to_string(iter->second); + (iter->second)++; + } else { + node_num_map_[tmp_name] = 1; + } + return tmp_name; +} + +/// +/// @brief Modify edge connection +/// @param [in] ComputeGraphPtr graph +/// @param [in] OutDataAnchorPtr src_out_anchor +/// @param [in] InDataAnchorPtr hccl_in_anchor +/// @return status +/// +Status HcclContinuousMemcpyPass::ModifyEdgeConnection(const ComputeGraphPtr &graph, const OutDataAnchorPtr &src_out_anchor, + const InDataAnchorPtr &hccl_in_anchor) { + GE_CHECK_NOTNULL(src_out_anchor->GetOwnerNode()); + GE_CHECK_NOTNULL(hccl_in_anchor->GetOwnerNode()); + + Status ret = InsertIdentityBeforeHccl(graph, src_out_anchor, hccl_in_anchor); + if (ret != SUCCESS) { + GELOGE(INTERNAL_ERROR, "add identity failed, var_node:%s, hccl_node:%s.", + src_out_anchor->GetOwnerNode()->GetName().c_str(), + hccl_in_anchor->GetOwnerNode()->GetName().c_str()); + return ret; + } + + ret = InsertAssignAfterBroadcastIfNeed(graph, src_out_anchor, hccl_in_anchor); + if (ret != SUCCESS) { + GELOGE(INTERNAL_ERROR, "add assign failed, var_node:%s, hccl_node:%s.", + src_out_anchor->GetOwnerNode()->GetName().c_str(), + hccl_in_anchor->GetOwnerNode()->GetName().c_str()); + return ret; + } + return SUCCESS; +} + +/// +/// @brief Insert Identity node Between Hccl node and variable +/// @param [in] ComputeGraphPtr graph +/// @param [in] OutDataAnchorPtr src_out_anchor +/// @param [in] InDataAnchorPtr hccl_in_anchor +/// @return status +/// +Status HcclContinuousMemcpyPass::InsertIdentityBeforeHccl(const ComputeGraphPtr &graph, const OutDataAnchorPtr &src_out_anchor, + const InDataAnchorPtr &hccl_in_anchor) { + GELOGI("Between op %s and op %s need insert memcpy async op.", src_out_anchor->GetOwnerNode()->GetName().c_str(), + hccl_in_anchor->GetOwnerNode()->GetName().c_str()); + NodePtr memcpy_node = CreateIdentityNode(graph, src_out_anchor); + GE_CHECK_NOTNULL(memcpy_node); + + Status ret1 = src_out_anchor->Unlink(hccl_in_anchor); + if (ret1 != SUCCESS) { + GELOGE(INTERNAL_ERROR, "The op %s Unlink anchor %s fail.", src_out_anchor->GetOwnerNode()->GetName().c_str(), + hccl_in_anchor->GetOwnerNode()->GetName().c_str()); + return FAILED; + } + auto out_data_anchor_0 = memcpy_node->GetOutDataAnchor(kAnchorNum); + GE_CHECK_NOTNULL(out_data_anchor_0); + ret1 = out_data_anchor_0->LinkTo(hccl_in_anchor); + if (ret1 != SUCCESS) { + GELOGE(INTERNAL_ERROR, "The op %s link anchor %s fail.", memcpy_node->GetName().c_str(), + hccl_in_anchor->GetOwnerNode()->GetName().c_str()); + return FAILED; + } + + Status ret = src_out_anchor->LinkTo(memcpy_node->GetInDataAnchor(kAnchorNum)); + if (ret != SUCCESS) { + GELOGE(INTERNAL_ERROR, "The op %s link anchor %s fail.", src_out_anchor->GetOwnerNode()->GetName().c_str(), + memcpy_node->GetName().c_str()); + return FAILED; + } + return SUCCESS; +} + +/// +/// @brief Insert assign node after broadcast node and variable to refresh variable data +/// @param [in] ComputeGraphPtr graph +/// @param [in] OutDataAnchorPtr var_out_anchor +/// @param [in] InDataAnchorPtr hccl_in_anchor +/// @return status +/// +Status HcclContinuousMemcpyPass::InsertAssignAfterBroadcastIfNeed(const ComputeGraphPtr &graph, + const OutDataAnchorPtr &var_out_anchor, + const InDataAnchorPtr &hccl_in_anchor) { + if (hccl_in_anchor->GetOwnerNode()->GetType() != HCOMBROADCAST) { + GELOGD("%s not broadcast, no need to insert assign node", hccl_in_anchor->GetOwnerNode()->GetName().c_str()); + return SUCCESS; + } + + if (var_out_anchor->GetOwnerNode()->GetType() != VARIABLE) { + GELOGD("%s not variable, no need to insert assign node", var_out_anchor->GetOwnerNode()->GetName().c_str()); + return SUCCESS; + } + + GELOGI("after op %s and op %s need insert assign op.", var_out_anchor->GetOwnerNode()->GetName().c_str(), + hccl_in_anchor->GetOwnerNode()->GetName().c_str()); + + for (auto peer_in_anchor : var_out_anchor->GetPeerInDataAnchors()) { + if (peer_in_anchor->GetOwnerNode()->GetType() == ASSIGN) { + GELOGD("variable %s out assign node is exist.", var_out_anchor->GetOwnerNode()->GetName().c_str()); + return SUCCESS; + } + } + + NodePtr assign_node = CreateAssignNode(graph, var_out_anchor); + GE_CHECK_NOTNULL(assign_node); + + OutDataAnchorPtr hccl_out_anchor = hccl_in_anchor->GetOwnerNode()->GetOutDataAnchor(hccl_in_anchor->GetIdx()); + GE_CHECK_NOTNULL(hccl_out_anchor); + + Status ret = hccl_out_anchor->LinkTo(assign_node->GetInDataAnchor(kAnchorAssignValueIndex)); + if (ret != SUCCESS) { + GELOGE(INTERNAL_ERROR, "The op %s link anchor %s fail.", hccl_out_anchor->GetOwnerNode()->GetName().c_str(), + assign_node->GetName().c_str()); + return FAILED; + } + + ret = var_out_anchor->LinkTo(assign_node->GetInDataAnchor(kAnchorAssignRefIndex)); + if (ret != SUCCESS) { + GELOGE(INTERNAL_ERROR, "The op %s link anchor %s fail.", var_out_anchor->GetOwnerNode()->GetName().c_str(), + assign_node->GetName().c_str()); + return FAILED; + } + + // add control edge between assign node and node after broadcast node + OutControlAnchorPtr assign_out_control_anchor = assign_node->GetOutControlAnchor(); + GE_CHECK_NOTNULL(assign_out_control_anchor); + + for (auto in_data_anchor : hccl_out_anchor->GetPeerInDataAnchors()) { + if (in_data_anchor->GetOwnerNode()->GetName() == assign_node->GetName()) { + continue; + } + ret = assign_out_control_anchor->LinkTo(in_data_anchor->GetOwnerNode()->GetInControlAnchor()); + if (ret != SUCCESS) { + GELOGE(INTERNAL_ERROR, "The op %s link control anchor %s fail.", + assign_out_control_anchor->GetOwnerNode()->GetName().c_str(), + in_data_anchor->GetOwnerNode()->GetName().c_str()); + return FAILED; + } + } + + for (auto in_control_anchor : hccl_out_anchor->GetOwnerNode()->GetOutControlAnchor()->GetPeerInControlAnchors()) { + if (in_control_anchor->GetOwnerNode()->GetName() == assign_node->GetName()) { + continue; + } + ret = assign_out_control_anchor->LinkTo(in_control_anchor); + if (ret != SUCCESS) { + GELOGE(INTERNAL_ERROR, "The op %s link control anchor %s fail.", + assign_out_control_anchor->GetOwnerNode()->GetName().c_str(), + in_control_anchor->GetOwnerNode()->GetName().c_str()); + return FAILED; + } + } + return SUCCESS; +} + +/// +/// @brief create assign Node, add to graph +/// @param [in] ge::ComputeGraphPtr graph +/// @param [in] ge::OutDataAnchorPtr variable node out anchor +/// @return ge::NodePtr +/// +NodePtr HcclContinuousMemcpyPass::CreateAssignNode(const ComputeGraphPtr &graph, const OutDataAnchorPtr &out_data_anchor) { + GE_CHECK_NOTNULL_EXEC(graph , return nullptr); + NodePtr pre_node = out_data_anchor->GetOwnerNode(); + OpDescPtr pre_op_desc = pre_node->GetOpDesc(); + if (pre_op_desc == nullptr) { + GELOGE(INTERNAL_ERROR, "OpDesc of pre node is invalid."); + return nullptr; + } + + std::string node_name = pre_node->GetName() + "_" + ASSIGN; + node_name = CheckDuplicateName(node_name); + OpDescPtr op_desc = MakeShared(node_name.c_str(), ASSIGN); + if (op_desc == nullptr) { + GELOGE(INTERNAL_ERROR, "Create Assign op: MakeShared op_desc fail."); + return nullptr; + } + GELOGI("Create Assign op:%s.", op_desc->GetName().c_str()); + + graphStatus ret = op_desc->AddInputDesc("ref", pre_op_desc->GetOutputDesc(out_data_anchor->GetIdx())); + if (ret != GRAPH_SUCCESS) { + GELOGE(INTERNAL_ERROR, "Create Assign op: add ref input desc fail."); + return nullptr; + } + + ret = op_desc->AddInputDesc("value", pre_op_desc->GetOutputDesc(out_data_anchor->GetIdx())); + if (ret != GRAPH_SUCCESS) { + GELOGE(INTERNAL_ERROR, "Create Assign op: add value input desc fail."); + return nullptr; + } + + ret = op_desc->AddOutputDesc("ref", pre_op_desc->GetOutputDesc(out_data_anchor->GetIdx())); + if (ret != GRAPH_SUCCESS) { + GELOGE(INTERNAL_ERROR, "Create Assign op: add output desc fail."); + return nullptr; + } + + NodePtr assign_node = graph->AddNode(op_desc); + if (assign_node == nullptr) { + GELOGE(INTERNAL_ERROR, "Insert Identity node fail."); + return nullptr; + } + + return assign_node; +} + + +/// +/// @brief Clear Status, used for subgraph pass +/// @return SUCCESS +/// +Status HcclContinuousMemcpyPass::ClearStatus() { + node_num_map_.clear(); + return SUCCESS; +} +} // namespace ge diff --git a/ge/graph/passes/hccl_continuous_memcpy_pass.h b/ge/graph/passes/hccl_continuous_memcpy_pass.h new file mode 100644 index 00000000..0a21c896 --- /dev/null +++ b/ge/graph/passes/hccl_continuous_memcpy_pass.h @@ -0,0 +1,59 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef GE_GRAPH_PASSES_HCCL_CONTINUOUS_MEMCPY_PASS_H_ +#define GE_GRAPH_PASSES_HCCL_CONTINUOUS_MEMCPY_PASS_H_ + +#include +#include + +#include "graph/graph.h" +#include "inc/graph_pass.h" + +namespace ge { +class HcclContinuousMemcpyPass : public GraphPass { + public: + Status Run(ge::ComputeGraphPtr graph); + Status ClearStatus() override; + + private: + NodePtr CreateIdentityNode(const ComputeGraphPtr &graph, const OutDataAnchorPtr &out_data_anchor); + + NodePtr CreateAssignNode(const ComputeGraphPtr &graph, const OutDataAnchorPtr &out_data_anchor); + + std::string CheckDuplicateName(const std::string &node_name); + + Status ModifyEdgeConnection(const ComputeGraphPtr &graph, const OutDataAnchorPtr &src_out_anchor, + const InDataAnchorPtr &hccl_in_anchor); + + Status InsertIdentityBeforeHccl(const ComputeGraphPtr &graph, const OutDataAnchorPtr &src_out_anchor, + const InDataAnchorPtr &hccl_in_anchor); + + Status InsertAssignAfterBroadcastIfNeed(const ComputeGraphPtr &graph, + const OutDataAnchorPtr &src_out_anchor, + const InDataAnchorPtr &hccl_in_anchor); + + Status ContinuousInputProcess(const ComputeGraphPtr &graph, const NodePtr node); + + Status P2pmemInputProcess(const ComputeGraphPtr &graph, const NodePtr node); + + bool IsDataNode(const std::string& node_type); + + std::unordered_map node_num_map_; +}; +} // namespace ge + +#endif // GE_GRAPH_PASSES_HCCL_MEMCPY_PASS_H_ diff --git a/ge/graph/passes/hccl_memcpy_pass.cc b/ge/graph/passes/hccl_memcpy_pass.cc index 3f607f84..537920b7 100755 --- a/ge/graph/passes/hccl_memcpy_pass.cc +++ b/ge/graph/passes/hccl_memcpy_pass.cc @@ -34,7 +34,7 @@ const char *const kInputMutable = "_input_mutable"; } // namespace namespace ge { Status HcclMemcpyPass::Run(ge::ComputeGraphPtr graph) { - GE_IF_BOOL_EXEC(graph == nullptr, GELOGE(PARAM_INVALID, "param [graph] must not be null."); return PARAM_INVALID); + GE_CHECK_NOTNULL(graph); for (const auto &node : graph->GetDirectNode()) { auto op_desc = node->GetOpDesc(); if (op_desc == nullptr) { @@ -42,24 +42,11 @@ Status HcclMemcpyPass::Run(ge::ComputeGraphPtr graph) { return INTERNAL_ERROR; } - Status ret = ContinuousInputProcess(graph, node); - if (ret != SUCCESS) { - GELOGE(INTERNAL_ERROR, "failed ProcessBroadcastMemcpy, node_name:%s.", node->GetName().c_str()); - return ret; - } - - ret = MutableInputProcess(graph, node); + Status ret = MutableInputProcess(graph, node); if (ret != SUCCESS) { GELOGE(INTERNAL_ERROR, "failed MutableInputProcess, node_name:%s.", node->GetName().c_str()); return ret; } - - ret = P2pmemInputProcess(graph, node); - if (ret != SUCCESS) { - GELOGE(INTERNAL_ERROR, "failed P2pmemInputProcess, node_name:%s.", node->GetName().c_str()); - return ret; - } - } return SUCCESS; } @@ -114,80 +101,6 @@ Status HcclMemcpyPass::MutableInputProcess(const ComputeGraphPtr &graph, const N return SUCCESS; } -// If broadcast input size is bigger than 1, and input from variable, -// cause by broadcast input memory should be continuous, -// another featuremap mem will be allocated for broadcast input. -// In this condition, move data from variable mem to broadcast input featuremap mem will be executed each step. -// In order to avoid move action out of model, use memcpy node instead of move action code. -Status HcclMemcpyPass::ContinuousInputProcess(const ComputeGraphPtr &graph, const NodePtr node) { - auto op_desc = node->GetOpDesc(); - - bool is_input_continuous = false; - (void)ge::AttrUtils::GetBool(op_desc, ATTR_NAME_CONTINUOUS_INPUT, is_input_continuous); - - if (is_input_continuous && op_desc->GetInputsSize() > 1) { - GELOGI("continuous input op is:%s.", op_desc->GetName().c_str()); - // if input size bigger than one, insert memcpy between var data for support continous mem alloc - for (auto &hccl_in_anchor : node->GetAllInDataAnchors()) { - if (hccl_in_anchor == nullptr) { - continue; - } - auto src_out_anchor = hccl_in_anchor->GetPeerOutAnchor(); - if (src_out_anchor == nullptr) { - GELOGE(INTERNAL_ERROR, "hcom op input has no peer anchor, node_name:%s", node->GetName().c_str()); - return INTERNAL_ERROR; - } - - if (IsDataNode(src_out_anchor->GetOwnerNode()->GetType())) { - Status ret = ModifyEdgeConnection(graph, src_out_anchor, hccl_in_anchor); - if (ret != SUCCESS) { - GELOGE(INTERNAL_ERROR, "Failed to modify the connection."); - return ret; - } - } - } - } - return SUCCESS; -} - -// if input is var type, and node input need p2p mem, then memcpy should be insert between the two -Status HcclMemcpyPass::P2pmemInputProcess(const ComputeGraphPtr &graph, const NodePtr node) { - auto op_desc = node->GetOpDesc(); - - vector input_memory_types; - (void) ge::AttrUtils::GetListInt(op_desc, ATTR_NAME_INPUT_MEM_TYPE_LIST, input_memory_types); - - if (input_memory_types.empty()) { - return SUCCESS; - } - - for (uint32_t index = 0; index < input_memory_types.size() && index < op_desc->GetInputsSize(); index++) { - if (input_memory_types[index] != RT_MEMORY_P2P_DDR) { - continue; - } - - GELOGD("p2p input op is:%s.", op_desc->GetName().c_str()); - auto hccl_in_anchor = node->GetInDataAnchor(index); - if (hccl_in_anchor == nullptr) { - continue; - } - auto src_out_anchor = hccl_in_anchor->GetPeerOutAnchor(); - if (src_out_anchor == nullptr) { - GELOGE(INTERNAL_ERROR, "hcom op input has no peer anchor, node_name:%s", node->GetName().c_str()); - return INTERNAL_ERROR; - } - - if (IsDataNode(src_out_anchor->GetOwnerNode()->GetType())) { - Status ret = ModifyEdgeConnection(graph, src_out_anchor, hccl_in_anchor); - if (ret != SUCCESS) { - GELOGE(INTERNAL_ERROR, "Failed to modify the connection."); - return ret; - } - } - } - return SUCCESS; -} - bool HcclMemcpyPass::IsDataNode(const std::string& node_type) { return (node_type == CONSTANTOP) || (node_type == VARIABLE) || (node_type == DATA) || (node_type == CONSTANT); } @@ -199,7 +112,7 @@ bool HcclMemcpyPass::IsDataNode(const std::string& node_type) { /// @return ge::NodePtr /// NodePtr HcclMemcpyPass::CreateIdentityNode(const ComputeGraphPtr &graph, const OutDataAnchorPtr &out_data_anchor) { - GE_IF_BOOL_EXEC(graph == nullptr, return nullptr); + GE_CHECK_NOTNULL_EXEC(graph, return nullptr); NodePtr pre_node = out_data_anchor->GetOwnerNode(); OpDescPtr pre_op_desc = pre_node->GetOpDesc(); if (pre_op_desc == nullptr) { @@ -413,7 +326,7 @@ Status HcclMemcpyPass::InsertAssignAfterBroadcastIfNeed(const ComputeGraphPtr &g /// @return ge::NodePtr /// NodePtr HcclMemcpyPass::CreateAssignNode(const ComputeGraphPtr &graph, const OutDataAnchorPtr &out_data_anchor) { - GE_IF_BOOL_EXEC(graph == nullptr, return nullptr); + GE_CHECK_NOTNULL_EXEC(graph, return nullptr); NodePtr pre_node = out_data_anchor->GetOwnerNode(); OpDescPtr pre_op_desc = pre_node->GetOpDesc(); if (pre_op_desc == nullptr) { diff --git a/ge/graph/passes/hccl_memcpy_pass.h b/ge/graph/passes/hccl_memcpy_pass.h index 98e05964..feea82d9 100755 --- a/ge/graph/passes/hccl_memcpy_pass.h +++ b/ge/graph/passes/hccl_memcpy_pass.h @@ -46,12 +46,8 @@ class HcclMemcpyPass : public GraphPass { const OutDataAnchorPtr &src_out_anchor, const InDataAnchorPtr &hccl_in_anchor); - Status ContinuousInputProcess(const ComputeGraphPtr &graph, const NodePtr node); - Status MutableInputProcess(const ComputeGraphPtr &graph, const NodePtr node); - Status P2pmemInputProcess(const ComputeGraphPtr &graph, const NodePtr node); - bool IsDataNode(const std::string& node_type); std::unordered_map node_num_map_; diff --git a/ge/graph/preprocess/graph_preprocess.cc b/ge/graph/preprocess/graph_preprocess.cc index 787a28cf..5456a662 100644 --- a/ge/graph/preprocess/graph_preprocess.cc +++ b/ge/graph/preprocess/graph_preprocess.cc @@ -48,6 +48,7 @@ #include "graph/passes/enter_pass.h" #include "graph/passes/for_pass.h" #include "graph/passes/guarantee_const_pass.h" +#include "graph/passes/hccl_memcpy_pass.h" #include "graph/passes/hccl_group_pass.h" #include "graph/passes/identity_pass.h" #include "graph/passes/infershape_pass.h" @@ -1891,6 +1892,8 @@ Status GraphPrepare::PrepareOptimize() { PassManager graph_pass; try { (void)graph_pass.AddPass("PrepareOptimize::PrunePass", new PrunePass); + // can't move to optimize1/2 directly, may cause more identity insert, cause CI fail + (void)graph_pass.AddPass("PrepareOptimize::HcclMemcpyPass", new HcclMemcpyPass); } catch (std::bad_alloc &e) { GELOGE(INTERNAL_ERROR, "Add pass failed, bad memory allocation occurs."); return INTERNAL_ERROR; diff --git a/tests/ut/ge/CMakeLists.txt b/tests/ut/ge/CMakeLists.txt index abff433c..50d06fee 100755 --- a/tests/ut/ge/CMakeLists.txt +++ b/tests/ut/ge/CMakeLists.txt @@ -243,6 +243,7 @@ set(COMMON_SRC_FILES "${GE_CODE_DIR}/ge/graph/passes/cast_remove_pass.cc" "${GE_CODE_DIR}/ge/graph/passes/transpose_transdata_pass.cc" "${GE_CODE_DIR}/ge/graph/passes/hccl_memcpy_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/hccl_continuous_memcpy_pass.cc" "${GE_CODE_DIR}/ge/graph/passes/flow_ctrl_pass.cc" "${GE_CODE_DIR}/ge/graph/passes/global_step_insert_pass.cc" "${GE_CODE_DIR}/ge/graph/passes/link_gen_mask_nodes_pass.cc" From 9e0738fb79b506916e5332c9c88234802960a0aa Mon Sep 17 00:00:00 2001 From: zhangxiaokun Date: Tue, 26 Jan 2021 15:29:59 +0800 Subject: [PATCH 13/13] Enable -Werror=format --- ge/CMakeLists.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ge/CMakeLists.txt b/ge/CMakeLists.txt index 888f565c..12af76ec 100755 --- a/ge/CMakeLists.txt +++ b/ge/CMakeLists.txt @@ -707,7 +707,7 @@ target_compile_options(ge_runner PRIVATE -O2 -fno-common $<$:-Werror=unused-variable> - $<$:-Werror=unused-const-variable> + $<$:-Werror=unused-const-variable -Werror=format> ) target_include_directories(ge_runner SYSTEM PRIVATE @@ -776,7 +776,7 @@ target_compile_options(ge_compiler PRIVATE -O2 -fno-common $<$:-Werror=unused-variable> - $<$:-Werror=unused-const-variable> + $<$:-Werror=unused-const-variable -Werror=format> ) target_include_directories(ge_compiler SYSTEM PRIVATE