| @@ -23,7 +23,7 @@ export BUILD_PATH="${BASEPATH}/build/" | |||
| usage() | |||
| { | |||
| echo "Usage:" | |||
| echo "sh build.sh [-j[n]] [-h] [-v] [-s] [-t] [-u] [-c] [-p]" | |||
| echo "sh build.sh [-j[n]] [-h] [-v] [-s] [-t] [-u] [-c] [-S on|off]" | |||
| echo "" | |||
| echo "Options:" | |||
| echo " -h Print usage" | |||
| @@ -34,9 +34,21 @@ usage() | |||
| echo " -c Build ut with coverage tag" | |||
| echo " -p Build inference or train" | |||
| echo " -v Display build command" | |||
| echo " -S Enable enable download cmake compile dependency from gitee , default off" | |||
| echo "to be continued ..." | |||
| } | |||
| # check value of input is 'on' or 'off' | |||
| # usage: check_on_off arg_value arg_name | |||
| check_on_off() | |||
| { | |||
| if [[ "X$1" != "Xon" && "X$1" != "Xoff" ]]; then | |||
| echo "Invalid value $1 for option -$2" | |||
| usage | |||
| exit 1 | |||
| fi | |||
| } | |||
| # parse and set options | |||
| checkopts() | |||
| { | |||
| @@ -49,8 +61,9 @@ checkopts() | |||
| GE_ONLY="on" | |||
| PLATFORM="inference" | |||
| PRODUCT="normal" | |||
| ENABLE_GITEE="off" | |||
| # Process the options | |||
| while getopts 'ustchj:p:g:v' opt | |||
| while getopts 'ustchj:p:g:vS:' opt | |||
| do | |||
| OPTARG=$(echo ${OPTARG} | tr '[A-Z]' '[a-z]') | |||
| case "${opt}" in | |||
| @@ -86,6 +99,11 @@ checkopts() | |||
| g) | |||
| PRODUCT=$OPTARG | |||
| ;; | |||
| S) | |||
| check_on_off $OPTARG S | |||
| ENABLE_GITEE="$OPTARG" | |||
| echo "enable download from gitee" | |||
| ;; | |||
| *) | |||
| echo "Undefined option: ${opt}" | |||
| usage | |||
| @@ -129,6 +147,9 @@ build_graphengine() | |||
| CMAKE_ARGS="${CMAKE_ARGS} -DENABLE_GE_ST=ON" | |||
| fi | |||
| if [[ "X$ENABLE_GITEE" = "Xon" ]]; then | |||
| CMAKE_ARGS="${CMAKE_ARGS} -DENABLE_GITEE=ON" | |||
| fi | |||
| CMAKE_ARGS="${CMAKE_ARGS} -DENABLE_OPEN_SRC=True -DCMAKE_INSTALL_PREFIX=${OUTPUT_PATH} -DPLATFORM=${PLATFORM} -DPRODUCT=${PRODUCT}" | |||
| echo "${CMAKE_ARGS}" | |||
| cmake ${CMAKE_ARGS} .. | |||
| @@ -11,8 +11,16 @@ if ((${CMAKE_INSTALL_PREFIX} STREQUAL /usr/local) OR | |||
| endif() | |||
| set (gflags_CXXFLAGS "-D_GLIBCXX_USE_CXX11_ABI=0 -Dgoogle=ascend_private") | |||
| if (ENABLE_GITEE) | |||
| set(REQ_URL "https://gitee.com/mirrors/gflags/repository/archive/v2.2.2.tar.gz") | |||
| set(MD5 "") | |||
| else() | |||
| set(REQ_URL "https://github.com/gflags/gflags/archive/v2.2.2.tar.gz") | |||
| set(MD5 "") | |||
| endif () | |||
| ExternalProject_Add(gflags_build | |||
| URL https://github.com/gflags/gflags/archive/v2.2.2.tar.gz | |||
| URL ${REQ_URL} | |||
| #URL /home/txd/workspace/linux_cmake/pkg/protobuf-3.8.0.tar.gz | |||
| #SOURCE_DIR ${METADEF_DIR}/../../third_party/gflags/src/gflags-2.2.2 | |||
| CONFIGURE_COMMAND ${CMAKE_COMMAND} -DCMAKE_CXX_FLAGS=${gflags_CXXFLAGS} -DCMAKE_INSTALL_PREFIX=${CMAKE_INSTALL_PREFIX}/gflags <SOURCE_DIR> | |||
| @@ -5,8 +5,17 @@ endif() | |||
| include(ExternalProject) | |||
| set(JSON_SRC_DIR ${CMAKE_BINARY_DIR}/opensrc/json/include) | |||
| if (ENABLE_GITEE) | |||
| set(REQ_URL "https://gitee.com/mirrors/JSON-for-Modern-CPP/repository/archive/v3.6.1.zip") | |||
| set(MD5 "5bda78ce308e6cfcf614dcf1d5ff27a7") | |||
| set(JSON_INCLUDE_DIR "${JSON_SRC_DIR}/include") | |||
| else() | |||
| set(REQ_URL "https://github.com/nlohmann/json/releases/download/v3.6.1/include.zip") | |||
| set(MD5 "0dc903888211db3a0f170304cd9f3a89") | |||
| set(JSON_INCLUDE_DIR ${JSON_SRC_DIR}) | |||
| endif () | |||
| ExternalProject_Add(json_build | |||
| URL https://github.com/nlohmann/json/releases/download/v3.6.1/include.zip | |||
| URL ${REQ_URL} | |||
| #URL /home/txd/workspace/cloud_code/pkg/include.zip | |||
| SOURCE_DIR ${JSON_SRC_DIR} | |||
| CONFIGURE_COMMAND "" | |||
| @@ -17,7 +26,7 @@ ExternalProject_Add(json_build | |||
| add_library(json INTERFACE) | |||
| target_include_directories(json INTERFACE ${JSON_SRC_DIR}) | |||
| target_include_directories(json INTERFACE ${JSON_INCLUDE_DIR}) | |||
| add_dependencies(json json_build) | |||
| #set(HAVE_JSON TRUE CACHE BOOL "json build add") | |||
| @@ -6,8 +6,16 @@ set(ONNX_PROTO_DIR ${CMAKE_BINARY_DIR}/onnx) | |||
| set(ONNX_PROTO_FILE ${ONNX_PROTO_DIR}/onnx.proto) | |||
| file(MAKE_DIRECTORY ${ONNX_PROTO_DIR}) | |||
| if (ENABLE_GITEE) | |||
| set(REQ_URL "https://gitee.com/mirrors/ONNX/repository/archive/v1.6.0.tar.gz") | |||
| set(MD5 "1bdbcecdd68ea8392630467646776e02") | |||
| else() | |||
| set(REQ_URL "https://github.com/onnx/onnx/releases/download/v1.6.0/onnx-1.6.0.tar.gz") | |||
| set(MD5 "512f2779d6215d4a36f366b6b9acdf1e") | |||
| endif () | |||
| ExternalProject_Add(onnx | |||
| URL https://github.com/onnx/onnx/releases/download/v1.6.0/onnx-1.6.0.tar.gz | |||
| URL ${REQ_URL} | |||
| #URL /home/txd/workspace/cloud_code/pkg/onnx-1.6.0.tar.gz | |||
| #URL_HASH SHA256=3b88c3fe521151651a0403c4d131cb2e0311bd28b753ef692020a432a81ce345 | |||
| #SOURCE_DIR ${ONNX_SRC_DIR} | |||
| @@ -11,6 +11,13 @@ if ((${CMAKE_INSTALL_PREFIX} STREQUAL /usr/local) OR | |||
| message(STATUS "No install prefix selected, default to ${CMAKE_INSTALL_PREFIX}.") | |||
| endif() | |||
| if (ENABLE_GITEE) | |||
| set(REQ_URL "https://gitee.com/mirrors/protobuf_source/repository/archive/v3.8.0.tar.gz") | |||
| set(MD5 "eba86ae9f07ba5cfbaf8af3bc4e84236") | |||
| else() | |||
| set(REQ_URL "https://github.com/protocolbuffers/protobuf/archive/v3.8.0.tar.gz") | |||
| set(MD5 "3d9e32700639618a4d2d342c99d4507a") | |||
| endif () | |||
| set(protobuf_CXXFLAGS "-Wno-maybe-uninitialized -Wno-unused-parameter -fPIC -fstack-protector-all -D_FORTIFY_SOURCE=2 -D_GLIBCXX_USE_CXX11_ABI=0 -O2 -Dgoogle=ascend_private") | |||
| set(protobuf_LDFLAGS "-Wl,-z,relro,-z,now,-z,noexecstack") | |||
| ExternalProject_Add(protobuf_build | |||
| @@ -8,11 +8,18 @@ if ((${CMAKE_INSTALL_PREFIX} STREQUAL /usr/local) OR | |||
| message(STATUS "No install prefix selected, default to ${CMAKE_INSTALL_PREFIX}.") | |||
| endif() | |||
| if (ENABLE_GITEE) | |||
| set(REQ_URL "https://gitee.com/mirrors/protobuf_source/repository/archive/v3.8.0.tar.gz") | |||
| set(MD5 "eba86ae9f07ba5cfbaf8af3bc4e84236") | |||
| else() | |||
| set(REQ_URL "https://github.com/protocolbuffers/protobuf/archive/v3.8.0.tar.gz") | |||
| set(MD5 "3d9e32700639618a4d2d342c99d4507a") | |||
| endif () | |||
| set(protobuf_CXXFLAGS "-Wno-maybe-uninitialized -Wno-unused-parameter -fPIC -fstack-protector-all -D_FORTIFY_SOURCE=2 -D_GLIBCXX_USE_CXX11_ABI=0 -O2 -Dgoogle=ascend_private") | |||
| set(protobuf_LDFLAGS "-Wl,-z,relro,-z,now,-z,noexecstack") | |||
| set(PROTOBUF_STATIC_PKG_DIR ${CMAKE_INSTALL_PREFIX}/protobuf_static) | |||
| ExternalProject_Add(protobuf_static_build | |||
| URL https://github.com/protocolbuffers/protobuf/archive/v3.8.0.tar.gz | |||
| URL ${REQ_URL} | |||
| #URL /home/txd/workspace/linux_cmake/pkg/protobuf-3.8.0.tar.gz | |||
| #SOURCE_DIR ${GE_CODE_DIR}/../../third_party/protobuf/src/protobuf-3.8.0 | |||
| CONFIGURE_COMMAND ${CMAKE_COMMAND} | |||
| @@ -12,10 +12,19 @@ if ((${CMAKE_INSTALL_PREFIX} STREQUAL /usr/local) OR | |||
| message(STATUS "No install prefix selected, default to ${CMAKE_INSTALL_PREFIX}.") | |||
| endif() | |||
| if (ENABLE_GITEE) | |||
| set(REQ_URL "https://gitee.com/mirrors/protobuf_source/repository/archive/v3.8.0.tar.gz") | |||
| set(MD5 "eba86ae9f07ba5cfbaf8af3bc4e84236") | |||
| else() | |||
| set(REQ_URL "https://github.com/protocolbuffers/protobuf/archive/v3.8.0.tar.gz") | |||
| set(MD5 "3d9e32700639618a4d2d342c99d4507a") | |||
| endif () | |||
| set(protobuf_CXXFLAGS "-Wno-maybe-uninitialized -Wno-unused-parameter -fPIC -fstack-protector-all -D_FORTIFY_SOURCE=2 -D_GLIBCXX_USE_CXX11_ABI=0 -O2") | |||
| set(protobuf_LDFLAGS "-Wl,-z,relro,-z,now,-z,noexecstack") | |||
| ExternalProject_Add(protoc_build | |||
| URL https://github.com/protocolbuffers/protobuf/archive/v3.8.0.tar.gz | |||
| URL ${REQ_URL} | |||
| #URL /home/txd/workspace/linux_cmake/pkg/protobuf-3.8.0.tar.gz | |||
| #SOURCE_DIR ${GE_CODE_DIR}/../third_party/protobuf/src/protobuf-3.8.0 | |||
| CONFIGURE_COMMAND ${CMAKE_COMMAND} -Dprotobuf_WITH_ZLIB=OFF -Dprotobuf_BUILD_TESTS=OFF -DBUILD_SHARED_LIBS=OFF -DCMAKE_CXX_FLAGS=${protobuf_CXXFLAGS} -DCMAKE_CXX_LDFLAGS=${protobuf_LDFLAGS} -DCMAKE_INSTALL_PREFIX=${CMAKE_INSTALL_PREFIX}/protoc <SOURCE_DIR>/cmake | |||
| @@ -239,6 +239,7 @@ set(TRAIN_SRC_LIST | |||
| "graph/passes/switch_data_edges_bypass.cc" | |||
| "graph/passes/switch_logic_remove_pass.cc" | |||
| "graph/passes/merge_to_stream_merge_pass.cc" | |||
| "graph/passes/merge_input_memcpy_pass.cc" | |||
| "graph/passes/switch_to_stream_switch_pass.cc" | |||
| "graph/passes/attach_stream_label_pass.cc" | |||
| "graph/passes/switch_dead_branch_elimination.cc" | |||
| @@ -559,6 +560,7 @@ set(INFER_SRC_LIST | |||
| "graph/passes/cast_translate_pass.cc" | |||
| "graph/passes/prune_pass.cc" | |||
| "graph/passes/merge_to_stream_merge_pass.cc" | |||
| "graph/passes/merge_input_memcpy_pass.cc" | |||
| "graph/passes/switch_to_stream_switch_pass.cc" | |||
| "graph/passes/attach_stream_label_pass.cc" | |||
| "graph/passes/multi_batch_pass.cc" | |||
| @@ -177,6 +177,7 @@ OMG_HOST_SRC_FILES := \ | |||
| graph/passes/cast_translate_pass.cc \ | |||
| graph/passes/prune_pass.cc \ | |||
| graph/passes/merge_to_stream_merge_pass.cc \ | |||
| graph/passes/merge_input_memcpy_pass.cc \ | |||
| graph/passes/switch_to_stream_switch_pass.cc \ | |||
| graph/passes/attach_stream_label_pass.cc \ | |||
| graph/passes/multi_batch_pass.cc \ | |||
| @@ -212,6 +212,7 @@ LIBGE_LOCAL_SRC_FILES := \ | |||
| graph/passes/switch_data_edges_bypass.cc \ | |||
| graph/passes/switch_logic_remove_pass.cc \ | |||
| graph/passes/merge_to_stream_merge_pass.cc \ | |||
| graph/passes/merge_input_memcpy_pass.cc \ | |||
| graph/passes/switch_to_stream_switch_pass.cc \ | |||
| graph/passes/attach_stream_label_pass.cc \ | |||
| graph/passes/switch_dead_branch_elimination.cc \ | |||
| @@ -69,6 +69,7 @@ | |||
| #include "graph/passes/link_gen_mask_nodes_pass.h" | |||
| #include "graph/passes/mark_graph_unknown_status_pass.h" | |||
| #include "graph/passes/merge_pass.h" | |||
| #include "graph/passes/merge_input_memcpy_pass.h" | |||
| #include "graph/passes/merge_to_stream_merge_pass.h" | |||
| #include "graph/passes/multi_batch_pass.h" | |||
| #include "graph/passes/next_iteration_pass.h" | |||
| @@ -1960,6 +1961,8 @@ Status GraphManager::OptimizeStage1(ge::ComputeGraphPtr &compute_graph) { | |||
| GELOGI("get ge.exec.variable_acc failed. set default value."); | |||
| } | |||
| PassManager after_merge_passes; | |||
| GE_CHK_STATUS_RET( | |||
| after_merge_passes.AddPass("OptimizeStage1_1::MergeInputMemcpyPass", new (std::nothrow) MergeInputMemcpyPass)); | |||
| GE_CHK_STATUS_RET( | |||
| after_merge_passes.AddPass("OptimizeStage1_1::SwitchDataEdgesBypass", new (std::nothrow) SwitchDataEdgesBypass)); | |||
| GE_CHK_STATUS_RET( | |||
| @@ -0,0 +1,98 @@ | |||
| /** | |||
| * Copyright 2019-2020 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #include "graph/passes/merge_input_memcpy_pass.h" | |||
| #include "common/ge/ge_util.h" | |||
| #include "ge/ge_api_types.h" | |||
| #include "graph/common/omg_util.h" | |||
| namespace ge { | |||
| Status MergeInputMemcpyPass::Run(ComputeGraphPtr graph) { | |||
| GELOGD("MergeInputMemcpyPass Enter"); | |||
| for (const auto &node : graph->GetDirectNode()) { | |||
| if ((node->GetType() != MERGE) && (node->GetType() != REFMERGE)) { | |||
| continue; | |||
| } | |||
| GE_CHECK_NOTNULL(node->GetOpDesc()); | |||
| GE_CHK_STATUS_RET(AddMemcpyAsyncNodes(graph, node, node->GetOpDesc()->HasAttr(ATTR_INSERT_BY_MBATCH)), | |||
| "Merge add memcpy node failed."); | |||
| } | |||
| GELOGD("MergeInputMemcpyPass Leave"); | |||
| return SUCCESS; | |||
| } | |||
| /// | |||
| /// @brief Add MemcpyAsync Op as Merge in_node | |||
| /// @param [in] graph | |||
| /// @param [in] node | |||
| /// @param [in] multi_batch_flag | |||
| /// @return Status | |||
| /// | |||
| Status MergeInputMemcpyPass::AddMemcpyAsyncNodes(const ComputeGraphPtr &graph, const NodePtr &node, | |||
| bool multi_batch_flag) { | |||
| for (const InDataAnchorPtr &in_data_anchor : node->GetAllInDataAnchors()) { | |||
| OutDataAnchorPtr peer_out_anchor = in_data_anchor->GetPeerOutAnchor(); | |||
| GE_IF_BOOL_EXEC(peer_out_anchor == nullptr, continue); | |||
| NodePtr in_node = peer_out_anchor->GetOwnerNode(); | |||
| const std::string &type = in_node->GetType(); | |||
| // For WhileLoop no need memcpy for merge. | |||
| GE_IF_BOOL_EXEC((type == ENTER) || (type == REFENTER) || (type == NEXTITERATION) || (type == REFNEXTITERATION), | |||
| continue); | |||
| const std::string &memcpy_name = node->GetName() + "_input_" + std::to_string(in_data_anchor->GetIdx()); | |||
| NodePtr memcpy_node = CreateMemcpyAsyncNode(graph, memcpy_name, peer_out_anchor, multi_batch_flag); | |||
| GE_CHK_BOOL_EXEC(memcpy_node != nullptr, return FAILED, "Create MemcpyAsync node failed."); | |||
| GE_CHK_STATUS(GraphUtils::RemoveEdge(peer_out_anchor, in_data_anchor), "MemcpyAsync node remove edge failed."); | |||
| GE_CHK_STATUS(GraphUtils::AddEdge(peer_out_anchor, memcpy_node->GetInDataAnchor(0)), | |||
| "MemcpyAsync node add edge failed."); | |||
| GE_CHK_STATUS(GraphUtils::AddEdge(memcpy_node->GetOutDataAnchor(0), in_data_anchor), | |||
| "MemcpyAsync node add edge failed."); | |||
| } | |||
| return SUCCESS; | |||
| } | |||
| /// | |||
| /// @brief Add MemcpyAsync Node | |||
| /// @param [in] graph | |||
| /// @param [in] name | |||
| /// @param [in] out_data_anchor | |||
| /// @param [in] multi_batch_flag | |||
| /// @return ge::NodePtr | |||
| /// | |||
| NodePtr MergeInputMemcpyPass::CreateMemcpyAsyncNode(const ComputeGraphPtr &graph, const std::string &name, | |||
| const OutDataAnchorPtr &out_data_anchor, bool multi_batch_flag) { | |||
| OpDescPtr pre_op_desc = out_data_anchor->GetOwnerNode()->GetOpDesc(); | |||
| GE_CHK_BOOL_EXEC(pre_op_desc != nullptr, return nullptr, "OpDesc of pre node is invalid."); | |||
| const std::string &memcpy_type = multi_batch_flag ? MEMCPYADDRASYNC : MEMCPYASYNC; | |||
| const std::string &node_name = name + "_" + memcpy_type; | |||
| GELOGI("Create MemcpyAsync op:%s.", node_name.c_str()); | |||
| OpDescPtr op_desc = MakeShared<OpDesc>(node_name, memcpy_type); | |||
| if (op_desc == nullptr) { | |||
| GELOGE(FAILED, "Create op_desc failed, MemcpyAsync:%s.", node_name.c_str()); | |||
| return nullptr; | |||
| } | |||
| GE_CHK_BOOL_EXEC(op_desc->AddInputDesc(pre_op_desc->GetOutputDesc(out_data_anchor->GetIdx())) == GRAPH_SUCCESS, | |||
| return nullptr, "Create MemcpyAsync op: add input desc failed."); | |||
| GE_CHK_BOOL_EXEC(op_desc->AddOutputDesc(pre_op_desc->GetOutputDesc(out_data_anchor->GetIdx())) == GRAPH_SUCCESS, | |||
| return nullptr, "Create MemcpyAsync op: add output desc failed."); | |||
| return graph->AddNode(op_desc); | |||
| } | |||
| } // namespace ge | |||
| @@ -0,0 +1,49 @@ | |||
| /** | |||
| * Copyright 2019-2020 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #ifndef GE_GRAPH_PASSES_MERGE_ADD_INPUT_MEMCPY_PASS_H_ | |||
| #define GE_GRAPH_PASSES_MERGE_ADD_INPUT_MEMCPY_PASS_H_ | |||
| #include "inc/graph_pass.h" | |||
| namespace ge { | |||
| class MergeInputMemcpyPass : public GraphPass { | |||
| public: | |||
| Status Run(ComputeGraphPtr graph); | |||
| private: | |||
| /// | |||
| /// @brief Add MemcpyAsync Op as Merge in_node | |||
| /// @param [in] graph | |||
| /// @param [in] node | |||
| /// @param [in] multi_batch_flag | |||
| /// @return Status | |||
| /// | |||
| Status AddMemcpyAsyncNodes(const ComputeGraphPtr &graph, const NodePtr &node, bool multi_batch_flag); | |||
| /// | |||
| /// @brief Add MemcpyAsync Node | |||
| /// @param [in] graph | |||
| /// @param [in] name | |||
| /// @param [in] out_data_anchor | |||
| /// @param [in] multi_batch_flag | |||
| /// @return ge::NodePtr | |||
| /// | |||
| NodePtr CreateMemcpyAsyncNode(const ComputeGraphPtr &graph, const std::string &name, | |||
| const OutDataAnchorPtr &out_data_anchor, bool multi_batch_flag); | |||
| }; | |||
| } // namespace ge | |||
| #endif // GE_GRAPH_PASSES_MERGE_ADD_INPUT_MEMCPY_PASS_H_ | |||
| @@ -32,7 +32,7 @@ Status MergeToStreamMergePass::Run(ComputeGraphPtr graph) { | |||
| OpDescPtr merge_op_desc = node->GetOpDesc(); | |||
| GE_CHECK_NOTNULL(merge_op_desc); | |||
| if (merge_op_desc->HasAttr(ATTR_INSERT_BY_MBATCH)) { | |||
| GE_CHK_STATUS_RET(AddMemcpyAsyncNodes(graph, node, true), "Merge add memcpy node failed."); | |||
| GE_CHK_STATUS_RET(AddActiveNodes(graph, node), "Merge add active node failed."); | |||
| GE_CHK_STATUS_RET(SetStreamLabel(node, node->GetName()), "Set stream label failed"); | |||
| } else { | |||
| GE_CHK_STATUS_RET(ReplaceMergeNode(graph, node), "Add StreamMerge node failed."); | |||
| @@ -99,38 +99,26 @@ Status MergeToStreamMergePass::ReplaceMergeNode(const ComputeGraphPtr &graph, co | |||
| } | |||
| } | |||
| return AddMemcpyAsyncNodes(graph, stream_merge, false); | |||
| return AddActiveNodes(graph, stream_merge); | |||
| } | |||
| /// | |||
| /// @brief Add MemcpyAsync Op as StreamMerge in_node | |||
| /// @brief Add StreamActive Op before StreamMerge/Merge | |||
| /// @param [in] graph | |||
| /// @param [in] node | |||
| /// @param [in] multi_batch_flag | |||
| /// @return Status | |||
| /// | |||
| Status MergeToStreamMergePass::AddMemcpyAsyncNodes(const ComputeGraphPtr &graph, const NodePtr &node, | |||
| bool multi_batch_flag) { | |||
| Status MergeToStreamMergePass::AddActiveNodes(const ComputeGraphPtr &graph, const NodePtr &node) { | |||
| GE_CHK_BOOL_EXEC(node != nullptr, return FAILED, "Param of pre node is null."); | |||
| for (const InDataAnchorPtr &in_data_anchor : node->GetAllInDataAnchors()) { | |||
| OutDataAnchorPtr peer_out_anchor = in_data_anchor->GetPeerOutAnchor(); | |||
| GE_IF_BOOL_EXEC(peer_out_anchor == nullptr, continue); | |||
| NodePtr in_node = peer_out_anchor->GetOwnerNode(); | |||
| const std::string &type = in_node->GetType(); | |||
| // For WhileLoop no need memcpy & active for merge. | |||
| // For WhileLoop, no need to add active nodes here, since which have been added in NextIterationPass. | |||
| GE_IF_BOOL_EXEC((type == ENTER) || (type == REFENTER) || (type == NEXTITERATION) || (type == REFNEXTITERATION), | |||
| continue); | |||
| const std::string &memcpy_name = node->GetName() + "_input_" + std::to_string(in_data_anchor->GetIdx()); | |||
| NodePtr memcpy_node = CreateMemcpyAsyncNode(graph, memcpy_name, peer_out_anchor, multi_batch_flag); | |||
| GE_CHK_BOOL_EXEC(memcpy_node != nullptr, return FAILED, "Create MemcpyAsync node failed."); | |||
| GE_CHK_STATUS(GraphUtils::RemoveEdge(peer_out_anchor, in_data_anchor), "MemcpyAsync node remove edge failed."); | |||
| GE_CHK_STATUS(GraphUtils::AddEdge(peer_out_anchor, memcpy_node->GetInDataAnchor(0)), | |||
| "MemcpyAsync node add edge failed."); | |||
| GE_CHK_STATUS(GraphUtils::AddEdge(memcpy_node->GetOutDataAnchor(0), in_data_anchor), | |||
| "MemcpyAsync node add edge failed."); | |||
| NodePtr active_node = CreateActiveNode(graph, memcpy_node); | |||
| NodePtr active_node = CreateActiveNode(graph, in_node); | |||
| GE_CHK_BOOL_EXEC(active_node != nullptr, return FAILED, "Create StreamActive node failed."); | |||
| GE_CHK_STATUS(GraphUtils::AddEdge(active_node->GetOutControlAnchor(), node->GetInControlAnchor()), | |||
| "StreamActive add ctrl edge failed."); | |||
| @@ -143,37 +131,6 @@ Status MergeToStreamMergePass::AddMemcpyAsyncNodes(const ComputeGraphPtr &graph, | |||
| return SUCCESS; | |||
| } | |||
| /// | |||
| /// @brief Add MemcpyAsync Node | |||
| /// @param [in] graph | |||
| /// @param [in] name | |||
| /// @param [in] out_data_anchor | |||
| /// @param [in] multi_batch_flag | |||
| /// @return ge::NodePtr | |||
| /// | |||
| NodePtr MergeToStreamMergePass::CreateMemcpyAsyncNode(const ComputeGraphPtr &graph, const std::string &name, | |||
| const OutDataAnchorPtr &out_data_anchor, bool multi_batch_flag) { | |||
| GE_CHK_BOOL_EXEC(out_data_anchor != nullptr, return nullptr, "Param of input node is null."); | |||
| OpDescPtr pre_op_desc = out_data_anchor->GetOwnerNode()->GetOpDesc(); | |||
| GE_CHK_BOOL_EXEC(pre_op_desc != nullptr, return nullptr, "OpDesc of pre node is invalid."); | |||
| const std::string &memcpy_type = multi_batch_flag ? MEMCPYADDRASYNC : MEMCPYASYNC; | |||
| const std::string &node_name = name + "_" + memcpy_type; | |||
| GELOGI("Create MemcpyAsync op:%s.", node_name.c_str()); | |||
| OpDescPtr op_desc = MakeShared<OpDesc>(node_name, memcpy_type); | |||
| if (op_desc == nullptr) { | |||
| GELOGE(FAILED, "Create op_desc failed, MemcpyAsync:%s.", node_name.c_str()); | |||
| return nullptr; | |||
| } | |||
| GE_CHK_BOOL_EXEC(op_desc->AddInputDesc(pre_op_desc->GetOutputDesc(out_data_anchor->GetIdx())) == GRAPH_SUCCESS, | |||
| return nullptr, "Create MemcpyAsync op: add input desc failed."); | |||
| GE_CHK_BOOL_EXEC(op_desc->AddOutputDesc(pre_op_desc->GetOutputDesc(out_data_anchor->GetIdx())) == GRAPH_SUCCESS, | |||
| return nullptr, "Create MemcpyAsync op: add output desc failed."); | |||
| return graph->AddNode(op_desc); | |||
| } | |||
| /// | |||
| /// @brief Create Active Op | |||
| /// @param [in] graph | |||
| @@ -193,7 +150,7 @@ NodePtr MergeToStreamMergePass::CreateActiveNode(const ComputeGraphPtr &graph, c | |||
| GE_CHK_BOOL_EXEC(active_node != nullptr, return nullptr, "Create StreamActive node failed."); | |||
| GE_IF_BOOL_EXEC(GraphUtils::AddEdge(node->GetOutControlAnchor(), active_node->GetInControlAnchor()) != SUCCESS, | |||
| GELOGE(INTERNAL_ERROR, "add edge failed"); | |||
| return nullptr); | |||
| return nullptr); | |||
| GE_IF_BOOL_EXEC(SetSwitchBranchNodeLabel(active_node, node_name) != SUCCESS, | |||
| GELOGE(INTERNAL_ERROR, "set switch branch node label failed"); | |||
| return nullptr); | |||
| @@ -34,24 +34,12 @@ class MergeToStreamMergePass : public GraphPass { | |||
| Status ReplaceMergeNode(const ComputeGraphPtr &graph, const NodePtr &merge_node); | |||
| /// | |||
| /// @brief Add MemcpyAsync Op as StreamMerge in_node | |||
| /// @brief Add StreamActive Op as StreamMerge in_node | |||
| /// @param [in] graph | |||
| /// @param [in] node | |||
| /// @param [in] multi_batch_flag | |||
| /// @return Status | |||
| /// | |||
| Status AddMemcpyAsyncNodes(const ComputeGraphPtr &graph, const NodePtr &node, bool multi_batch_flag); | |||
| /// | |||
| /// @brief Add MemcpyAsync Node | |||
| /// @param [in] graph | |||
| /// @param [in] name | |||
| /// @param [in] out_data_anchor | |||
| /// @param [in] multi_batch_flag | |||
| /// @return ge::NodePtr | |||
| /// | |||
| NodePtr CreateMemcpyAsyncNode(const ComputeGraphPtr &graph, const std::string &name, | |||
| const OutDataAnchorPtr &out_data_anchor, bool multi_batch_flag); | |||
| Status AddActiveNodes(const ComputeGraphPtr &graph, const NodePtr &node); | |||
| /// | |||
| /// @brief Create Active Op | |||
| @@ -67,19 +67,19 @@ static bool is_dynamic_input = false; | |||
| const char *const kGraphMemoryManagerMallocMaxSize = "8*1024*1024*1024"; | |||
| const char *const kModeSupport = "only support 0(model to framework model), " | |||
| "1(framework model to json), 3(only pre-check), 5(pbtxt to json)"; | |||
| const char *const kModelToJsonSupport = "only support 0(Caffe) 3(TensorFlow)"; | |||
| const char *const kModelToJsonSupport = "only support 0(Caffe) 3(TensorFlow) 5(Onnx)"; | |||
| // limit available mem size 2G | |||
| const long kMinAvailableMem = 2 * 1024 * 1024; | |||
| DEFINE_string(model, "", "The model file."); | |||
| DEFINE_string(output, "", "The output file path&name."); | |||
| DEFINE_int32(framework, -1, "Framework type(0:Caffe; 1:MindSpore; 3:Tensorflow)."); | |||
| DEFINE_int32(framework, -1, "Framework type(0:Caffe; 1:MindSpore; 3:Tensorflow; 5:Onnx)."); | |||
| DEFINE_string(weight, "", "Optional; weight file. Required when framework is Caffe."); | |||
| DEFINE_string(input_shape, "", | |||
| "Optional; shape of input data. Required when framework is caffe " | |||
| "or TensorFLow or MindSpore." | |||
| "or TensorFLow or MindSpore or Onnx. " | |||
| "Format: \"input_name1:n1,c1,h1,w1;input_name2:n2,c2,h2,w2\""); | |||
| DEFINE_bool(h, false, "show this help message"); | |||
| DEFINE_string(cal_conf, "", "Optional; the calibration config file."); | |||
| @@ -225,31 +225,29 @@ class GFlagUtils { | |||
| " --model Model file\n" | |||
| " --weight Weight file. Required when framework is Caffe\n" | |||
| " --om The model file to be converted to json\n" | |||
| " --framework Framework type. 0:Caffe; 1:MindSpore; 3:Tensorflow\n" | |||
| " --framework Framework type. 0:Caffe; 1:MindSpore; 3:Tensorflow; 5:Onnx\n" | |||
| " --input_format Format of input data. E.g.: \"NCHW\"\n" | |||
| " --input_shape Shape of input data. Separate multiple nodes with semicolons (;)." | |||
| " --input_shape Shape of input data. Separate multiple nodes with semicolons (;). " | |||
| "Use double quotation marks (\") to enclose each argument.\n" | |||
| " E.g.: \"input_name1:n1,c1,h1,w1;input_name2:n2,c2,h2,w2\"\n" | |||
| " --dynamic_batch_size Set dynamic batch size. E.g: \"batchsize1,batchsize2,batchsize3\"\n" | |||
| " --dynamic_image_size Set dynamic image size. Separate multiple nodes with semicolons (;)." | |||
| " --dynamic_batch_size Set dynamic batch size. E.g.: \"batchsize1,batchsize2,batchsize3\"\n" | |||
| " --dynamic_image_size Set dynamic image size. Separate multiple nodes with semicolons (;). " | |||
| "Use double quotation marks (\") to enclose each argument.\n" | |||
| " E.g: \"imagesize1_height,imagesize1_width;imagesize2_height,imagesize2_width\"\n" | |||
| " --dynamic_dims Set dynamic dims. Separate multiple nodes with semicolons (;)." | |||
| "Use double quotation marks (\") to enclose each argument. E.g: \"dims1_n1,dims1_n2;dims2_n1,dims2_n2\"\n" | |||
| " E.g.: \"imagesize1_height,imagesize1_width;imagesize2_height,imagesize2_width\"\n" | |||
| " --dynamic_dims Set dynamic dims. Separate multiple nodes with semicolons (;). " | |||
| "Use double quotation marks (\") to enclose each argument.\n" | |||
| " E.g.: \"dims1_n1,dims1_n2;dims2_n1,dims2_n2\"\n" | |||
| " --singleop Single op definition file. atc will generate offline " | |||
| "model(s) for single op if --singleop is set.\n" | |||
| "\n[Output]\n" | |||
| " --output Output file path&name(needn't suffix, will add " | |||
| ".om automatically). \n" | |||
| " --output Output file path&name(needn't suffix, will add .om automatically). \n" | |||
| " If --singleop is set, this arg specifies the directory to " | |||
| "which the single op offline model will be generated\n" | |||
| " --output_type Set net output type. Support FP32, FP16, UINT8." | |||
| " --output_type Set net output type. Support FP32, FP16, UINT8. " | |||
| "E.g.: FP16, indicates that all out nodes are set to FP16.\n" | |||
| " \"node1:0:FP16;node2:1:FP32\", indicates setting the datatype of multiple out nodes.\n" | |||
| " --check_report The pre-checking report file. Default value is: " | |||
| "\"check_result.json\"\n" | |||
| " --json The output json file path&name which is " | |||
| "converted from a model\n" | |||
| " --check_report The pre-checking report file. Default value is: \"check_result.json\"\n" | |||
| " --json The output json file path&name which is converted from a model\n" | |||
| "\n[Target]\n" | |||
| " --soc_version The soc version.\n" | |||
| " --core_type Set core type AiCore or VectorCore. VectorCore: use vector core. " | |||
| @@ -260,23 +258,22 @@ class GFlagUtils { | |||
| " --out_nodes Output nodes designated by users. Separate multiple nodes with semicolons (;)." | |||
| "Use double quotation marks (\") to enclose each argument.\n" | |||
| " E.g.: \"node_name1:0;node_name1:1;node_name2:0\"\n" | |||
| " --input_fp16_nodes Input node datatype is fp16. Separate multiple nodes with semicolons " | |||
| "(;)." | |||
| "Use double quotation marks (\") to enclose each argument." | |||
| " --input_fp16_nodes Input node datatype is fp16. Separate multiple nodes with semicolons (;)." | |||
| "Use double quotation marks (\") to enclose each argument. " | |||
| "E.g.: \"node_name1;node_name2\"\n" | |||
| " --insert_op_conf Config file to insert new op\n" | |||
| " --op_name_map Custom op name mapping file\n" | |||
| " Note: A semicolon(;) cannot be included in each " | |||
| "path, otherwise the resolved path will not match the expected one.\n" | |||
| " --is_input_adjust_hw_layout Intput node datatype is fp16 and format is " | |||
| "NC1HWC0, used with input_fp16_nodes E.g.: \"true,true,false,true\"\n" | |||
| "NC1HWC0, used with input_fp16_nodes. E.g.: \"true,true,false,true\"\n" | |||
| " --is_output_adjust_hw_layout Net output node datatype is fp16 and format is " | |||
| "NC1HWC0, used with out_nodes. E.g.: \"true,true,false,true\"\n" | |||
| "\n[Model Tuning]\n" | |||
| " --disable_reuse_memory The switch of reuse memory. Default value is : 0." | |||
| " --disable_reuse_memory The switch of reuse memory. Default value is : 0. " | |||
| "0 means reuse memory, 1 means do not reuse memory.\n" | |||
| " --fusion_switch_file Set fusion switch file path\n" | |||
| " --enable_scope_fusion_passes validate the non-general scope fusion passes," | |||
| " --enable_scope_fusion_passes validate the non-general scope fusion passes, " | |||
| "multiple names can be set and separated by ','. E.g.: ScopePass1,ScopePass2,...\n" | |||
| " --enable_single_stream Enable single stream. true: enable; false(default): disable\n" | |||
| " --enable_small_channel Set enable small channel. 0(default): disable; 1: enable\n" | |||
| @@ -287,20 +284,21 @@ class GFlagUtils { | |||
| " --precision_mode precision mode, support force_fp16(default), allow_mix_precision, " | |||
| "allow_fp32_to_fp16, must_keep_origin_dtype.\n" | |||
| " --auto_tune_mode Set tune mode. E.g.: \"GA,RL\", support configure multiple, spit by ,\n" | |||
| " --op_select_implmode Set op select implmode. Support high_precision, high_performance." | |||
| " --op_select_implmode Set op select implmode. Support high_precision, high_performance. " | |||
| "default: high_performance\n" | |||
| " --optypelist_for_implmode Appoint which op to select implmode, cooperated with op_select_implmode.\n" | |||
| " Separate multiple nodes with commas (,). Use double quotation marks (\") " | |||
| " to enclose each argument. E.g.: \"node_name1,node_name2\"\n" | |||
| "to enclose each argument. E.g.: \"node_name1,node_name2\"\n" | |||
| " --op_debug_level Debug enable for TBE operator building.\n" | |||
| " 0 (default): Disable debug; 1: Enable TBE pipe_all, " | |||
| "and generate the operator CCE file and Python-CCE mapping file (.json);\n" | |||
| " 2: Enable TBE pipe_all, generate the operator CCE file and Python-CCE mapping file " | |||
| "(.json), and enable the CCE compiler -O0-g.\n" | |||
| " 3: Disable debug, and keep generating kernel file (.o and .json)\n" | |||
| "\n[Debug]\n" | |||
| " --save_original_model Control whether to output original model. E.g.: true: output original model\n" | |||
| " --log Generate log with level. Support debug, info, warning, error, null\n" | |||
| " --dump_mode The switch of dump json with shape, to be used with mode 1." | |||
| " --dump_mode The switch of dump json with shape, to be used with mode 1. " | |||
| "0(default): disable; 1: enable."); | |||
| gflags::ParseCommandLineNonHelpFlags(&argc, &argv, true); | |||
| @@ -511,9 +509,9 @@ class GFlagUtils { | |||
| // No framework information was passed in or the entered framework is illegal | |||
| ErrorManager::GetInstance().ATCReportErrMessage( | |||
| "E10007", {"parameter", "support"}, | |||
| {"framework", "0(Caffe) or 1(MindSpore) or 3(TensorFlow)"}); | |||
| {"framework", "0(Caffe) or 1(MindSpore) or 3(TensorFlow) or 5(Onnx)"}); | |||
| DOMI_LOGE("Input parameter[--framework] is mandatory and it's value must be: " | |||
| "0(Caffe) or 1(MindSpore) or 3(TensorFlow)."); | |||
| "0(Caffe) or 1(MindSpore) or 3(TensorFlow) or 5(Onnx)."); | |||
| return domi::PARAM_INVALID; | |||
| } | |||
| @@ -994,8 +994,9 @@ FMK_FUNC_HOST_VISIBILITY Status ConvertFwkModelToJson(const domi::FrameworkType | |||
| ErrorManager::GetInstance().ATCReportErrMessage( | |||
| "E10001", {"parameter", "value", "reason"}, | |||
| {"--framework", std::to_string(framework), "only support 0(Caffe) 3(TensorFlow)"}); | |||
| GELOGE(PARAM_INVALID, "Input parameter[--framework] is mandatory and it's value must be: 0(Caffe) 3(TensorFlow)."); | |||
| {"--framework", std::to_string(framework), "only support 0(Caffe) 3(TensorFlow) 5(Onnx)"}); | |||
| GELOGE(PARAM_INVALID, "Input parameter[--framework] is mandatory and it's value must be: 0(Caffe) 3(TensorFlow) " | |||
| "or 5(Onnx)."); | |||
| return PARAM_INVALID; | |||
| } | |||
| @@ -263,7 +263,8 @@ Status AiCpuBaseTask::SetExtInfoAndType(const std::string &kernel_ext_info) { | |||
| } | |||
| Status AiCpuBaseTask::UpdateExtInfo(const std::vector<GeTensorDesc> &input_desc, | |||
| std::vector<GeTensorDesc> &output_desc) { | |||
| std::vector<GeTensorDesc> &output_desc, | |||
| rtStream_t stream) { | |||
| GELOGI("Update ext info begin, unknown_type=%d.", unknown_type_); | |||
| if (num_inputs_ == 0 && num_outputs_ == 0) { | |||
| GELOGI("No input and output, no need update ext info."); | |||
| @@ -283,11 +284,12 @@ Status AiCpuBaseTask::UpdateExtInfo(const std::vector<GeTensorDesc> &input_desc, | |||
| } | |||
| } | |||
| GE_CHK_RT_RET(rtMemcpy(ext_info_addr_dev_, | |||
| aicpu_ext_handle_->GetExtInfoLen(), // check size | |||
| aicpu_ext_handle_->GetExtInfo(), | |||
| aicpu_ext_handle_->GetExtInfoLen(), | |||
| RT_MEMCPY_HOST_TO_DEVICE)); | |||
| GE_CHK_RT_RET(rtMemcpyAsync(ext_info_addr_dev_, | |||
| aicpu_ext_handle_->GetExtInfoLen(), // check size | |||
| aicpu_ext_handle_->GetExtInfo(), | |||
| aicpu_ext_handle_->GetExtInfoLen(), | |||
| RT_MEMCPY_HOST_TO_DEVICE_EX, | |||
| stream)); | |||
| GELOGI("Update ext info end."); | |||
| return SUCCESS; | |||
| @@ -618,7 +620,7 @@ Status AiCpuTask::LaunchKernel(const std::vector<GeTensorDesc> &input_desc, | |||
| std::vector<GeTensorDesc> &output_desc, | |||
| std::vector<DataBuffer> &output_buffers, | |||
| rtStream_t stream) { | |||
| GE_CHK_STATUS_RET_NOLOG(UpdateExtInfo(input_desc, output_desc)); | |||
| GE_CHK_STATUS_RET_NOLOG(UpdateExtInfo(input_desc, output_desc, stream)); | |||
| std::vector<void *> inputs; | |||
| std::vector<void *> outputs; | |||
| for (auto &buffer : input_buffers) { | |||
| @@ -629,11 +631,12 @@ Status AiCpuTask::LaunchKernel(const std::vector<GeTensorDesc> &input_desc, | |||
| } | |||
| GE_CHK_STATUS_RET_NOLOG(SetIO(inputs, outputs)); | |||
| GE_CHK_STATUS_RET_NOLOG(LaunchKernel(stream)); | |||
| GE_CHK_RT_RET(rtStreamSynchronize(stream)); | |||
| if (unknown_type_ == DEPEND_SHAPE_RANGE) { | |||
| GE_CHK_RT_RET(rtStreamSynchronize(stream)); | |||
| GE_CHK_STATUS_RET_NOLOG(UpdateOutputShape(output_desc)); | |||
| } else if (unknown_type_ == DEPEND_COMPUTE) { | |||
| GE_CHK_RT_RET(rtStreamSynchronize(stream)); | |||
| GE_CHK_STATUS_RET_NOLOG(UpdateShapeAndDataByResultSummary(output_desc, output_buffers, stream)); | |||
| } | |||
| @@ -689,7 +692,7 @@ Status AiCpuCCTask::LaunchKernel(const std::vector<GeTensorDesc> &input_desc, | |||
| "AiCpuCCTask unknown type[%d] is depend compute, it's not supported now.", | |||
| unknown_type_); | |||
| GE_CHK_STATUS_RET_NOLOG(UpdateExtInfo(input_desc, output_desc)); | |||
| GE_CHK_STATUS_RET_NOLOG(UpdateExtInfo(input_desc, output_desc, stream)); | |||
| size_t arg_index = 0; | |||
| auto *task_io_addr = reinterpret_cast<uintptr_t *>(io_addr_); | |||
| @@ -702,9 +705,9 @@ Status AiCpuCCTask::LaunchKernel(const std::vector<GeTensorDesc> &input_desc, | |||
| } | |||
| GE_CHK_STATUS_RET_NOLOG(LaunchKernel(stream)); | |||
| GE_CHK_RT_RET(rtStreamSynchronize(stream)); | |||
| if (unknown_type_ == DEPEND_SHAPE_RANGE) { | |||
| GE_CHK_RT_RET(rtStreamSynchronize(stream)); | |||
| GE_CHK_STATUS_RET_NOLOG(UpdateOutputShape(output_desc)); | |||
| } | |||
| @@ -131,7 +131,8 @@ class AiCpuBaseTask : public OpTask { | |||
| Status SetExtInfoAndType(const std::string &kernel_ext_info); | |||
| Status UpdateExtInfo(const std::vector<GeTensorDesc> &input_desc, | |||
| std::vector<GeTensorDesc> &output_desc); | |||
| std::vector<GeTensorDesc> &output_desc, | |||
| rtStream_t stream); | |||
| Status UpdateOutputShape(vector<GeTensorDesc> &output_desc); | |||
| Status UpdateShapeToOutputDesc(const GeShape &shape_new, GeTensorDesc &output_desc); | |||
| @@ -1 +1 @@ | |||
| Subproject commit 384dab951f40b90788cc755e13754ed386ccb9c6 | |||
| Subproject commit 5ced68f6e3eff84a28a4d708e9f38a503b20f24d | |||
| @@ -1 +1 @@ | |||
| Subproject commit f9e30cdc643926d91f7b650b6903a7b3bf6f597b | |||
| Subproject commit 2e47f0ccc3981a3002ac12e3ff6a36fb689968ae | |||