| @@ -11,7 +11,7 @@ AlignTrailingComments: true | |||
| AllowAllParametersOfDeclarationOnNextLine: true | |||
| AllowShortBlocksOnASingleLine: false | |||
| AllowShortCaseLabelsOnASingleLine: false | |||
| AllowShortFunctionsOnASingleLine: All | |||
| AllowShortFunctionsOnASingleLine: Empty | |||
| AllowShortIfStatementsOnASingleLine: true | |||
| AllowShortLoopsOnASingleLine: true | |||
| AlwaysBreakAfterDefinitionReturnType: None | |||
| @@ -50,9 +50,8 @@ CommentPragmas: '^ IWYU pragma:' | |||
| CompactNamespaces: false | |||
| ConstructorInitializerAllOnOneLineOrOnePerLine: true | |||
| ConstructorInitializerIndentWidth: 4 | |||
| ContinuationIndentWidth: 2 | |||
| ContinuationIndentWidth: 4 | |||
| Cpp11BracedListStyle: true | |||
| DerivePointerAlignment: true | |||
| DisableFormat: false | |||
| ExperimentalAutoDetectBinPacking: false | |||
| FixNamespaceComments: true | |||
| @@ -94,7 +93,7 @@ PenaltyBreakString: 1000 | |||
| PenaltyBreakTemplateDeclaration: 10 | |||
| PenaltyExcessCharacter: 1000000 | |||
| PenaltyReturnTypeOnItsOwnLine: 200 | |||
| PointerAlignment: Left | |||
| PointerAlignment: Right | |||
| RawStringFormats: | |||
| - Language: Cpp | |||
| Delimiters: | |||
| @@ -88,13 +88,12 @@ else () | |||
| find_module(hccl libhccl.so ${GE_LIB_PATH}) | |||
| find_module(adump_server libadump_server.a ${GE_LIB_PATH}) | |||
| find_module(runtime libruntime.so ${GE_LIB_PATH}) | |||
| find_module(runtime_compile libruntime_compile.so ${GE_LIB_PATH}) | |||
| find_module(resource libresource.so ${GE_LIB_PATH}) | |||
| find_module(ascend_hal_stub libascend_hal.so ${GE_LIB_PATH}) | |||
| find_module(msprofiler_fwk_ext libmsprofiler_fwk.a ${GE_LIB_PATH}) | |||
| #find_module(ascendcl_static libascendcl.a ${GE_LIB_PATH}) | |||
| else() | |||
| find_module(slog libalog.so ${ASCEND_ATC_DIR}) | |||
| find_module(opt_feature libopt_feature.so ${ASCEND_ATC_DIR}) | |||
| find_module(static_mmpa libmmpa.a ${ASCEND_ATC_DIR}) | |||
| if(PLATFORM STREQUAL "train") | |||
| find_module(adump_server libadump_server.a ${ASCEND_RUNTIME_DIR}) | |||
| @@ -107,7 +106,6 @@ else () | |||
| elseif(PLATFORM STREQUAL "inference") | |||
| find_module(adump_server libadump_server.a ${ASCEND_ACL_DIR}) | |||
| find_module(runtime libruntime.so ${ASCEND_ACL_DIR}) | |||
| find_module(runtime_compile libruntime_compile.so ${ASCEND_ATC_DIR}) | |||
| find_module(msprofiler_ext libmsprofiler.a ${ASCEND_ACL_DIR}) | |||
| if(PRODUCT STREQUAL "flr3") | |||
| elseif(PRODUCT STREQUAL "flr1") | |||
| @@ -118,12 +116,11 @@ else () | |||
| find_module(ascend_hal_stub libascend_hal.so ${ASCEND_DRIVER_DIR}) | |||
| endif() | |||
| elseif(PLATFORM STREQUAL "all") | |||
| find_module(adump_server libadump_server.a ${ASCEND_RUNTIME_DIR}) | |||
| find_module(runtime libruntime.so ${ASCEND_RUNTIME_DIR}) | |||
| find_module(adump_server libadump_server.a ${ASCEND_RUNTIME_DIR}) | |||
| find_module(runtime libruntime.so ${ASCEND_ATC_DIR}) | |||
| find_module(msprofiler_fwk_ext libmsprofiler_fwk.a ${ASCEND_RUNTIME_DIR}) | |||
| find_module(ascend_hal_stub libascend_hal.so ${ASCEND_DRIVER_DIR}) | |||
| find_module(runtime_compile libruntime_compile.so ${ASCEND_ATC_DIR}) | |||
| find_module(msprofiler_ext libmsprofiler.a ${ASCEND_ACL_DIR}) | |||
| find_module(ascend_hal_stub libascend_hal.so ${ASCEND_ATC_DIR}/stub) | |||
| find_module(msprofiler_ext libmsprofiler.a ${ASCEND_ACL_DIR}) | |||
| else() | |||
| message(STATUS "PLATFORM param is invalid, should be train or inference, you choose nothing!") | |||
| endif() | |||
| @@ -34,18 +34,6 @@ | |||
| 在训练/推理过程中,上述过程会自动执行,通过上述图操作,GE可以将前端下发的图转换为一种可以在昇腾AI处理器上高效运行的图模式。 | |||
| <!-- TOC --> | |||
| - [安装说明](#安装说明) | |||
| - [安装GE](#安装ge) | |||
| - [源码安装](#源码安装) | |||
| - [社区](#社区) | |||
| - [贡献](#贡献) | |||
| - [Release Notes](#release-notes) | |||
| - [License](#license) | |||
| <!-- /TOC --> | |||
| # 安装说明 | |||
| ## 安装GE | |||
| @@ -54,45 +42,8 @@ GE内嵌在MindSpore安装包中,MindSpore安装完毕后,GE以三个动态 | |||
| ## 源码安装 | |||
| GE也支持由源码编译,进行源码编译前,首先确保你有昇腾910 AI处理器的环境,同时系统满足以下要求: | |||
| - GCC >= 7.3.0 | |||
| - CMake >= 3.14.0 | |||
| - Autoconf >= 2.64 | |||
| - Libtool >= 2.4.6 | |||
| - Automake >= 1.15.1 | |||
| 编译完成后会生成几个动态库,他们会链接到MindSpore中执行,无法单独运行。 | |||
| 1. 下载GE源码。 | |||
| GE源码托管在码云平台,可由此下载。 | |||
| ``` | |||
| git clone https://gitee.com/mindspore/graphengine.git | |||
| cd graphengine | |||
| ``` | |||
| 2. 在GE根目录下执行下列命令即可进行编译。 | |||
| ``` | |||
| bash build.sh | |||
| ``` | |||
| > - 开始编译之前,请确保正确设置相关的环境变量。 | |||
| > - 在`build.sh`的脚本中,会进行`git clone`操作,请确保网络连接正常且git配置正确。 | |||
| > - 在`build.sh`的脚本中,默认会8线程编译,如果机器性能较差,可能会编译失败。可以通过`-j{线程数}`来控制线程数,如`bash build.sh –j4`。 | |||
| 3. 完成编译后,相应的动态库文件会生成在output文件夹中。 | |||
| 更多指令帮助,可以使用: | |||
| ``` | |||
| bash build.sh –h | |||
| ``` | |||
| 如果想清除历史编译记录,可以如下操作: | |||
| ``` | |||
| rm -rf build/ output/ | |||
| bash build.sh | |||
| ``` | |||
| GE也支持由源码编译,请参考以下链接完成: | |||
| [个人开发工具链](https://gitee.com/mindspore/graphengine/blob/master/scripts/readme.md) | |||
| ## 社区 | |||
| @@ -144,7 +144,6 @@ build_graphengine() | |||
| CMAKE_ARGS="${CMAKE_ARGS} -DENABLE_GE_UT=ON" | |||
| fi | |||
| if [[ "X$ENABLE_GE_ST" = "Xon" ]]; then | |||
| CMAKE_ARGS="${CMAKE_ARGS} -DENABLE_GE_ST=ON" | |||
| fi | |||
| @@ -176,7 +175,7 @@ build_graphengine() | |||
| TARGET="ge_compiler atc_atc.bin ge_executor_shared ${TARGET}" | |||
| elif [ "X$ENABLE_GE_ST" = "Xon" ] | |||
| then | |||
| TARGET="ge_graph_dsl_test graph_engine_test" | |||
| TARGET="ge_graph_dsl_test ge_running_env_test graph_engine_test" | |||
| elif [ "X$ENABLE_GE_UT" = "Xon" ] | |||
| then | |||
| TARGET="ut_libgraph ut_libge_multiparts_utest ut_libge_others_utest ut_libge_kernel_utest ut_libge_distinct_load_utest" | |||
| @@ -244,13 +243,13 @@ if [[ "X$ENABLE_GE_ST" = "Xon" ]]; then | |||
| mkdir -p ${OUTPUT_PATH}/plugin/opskernel | |||
| cp ${BUILD_PATH}/tests/framework/libnnengine.so ${OUTPUT_PATH}/plugin/nnengine | |||
| cp ${BUILD_PATH}/engine_conf.json ${OUTPUT_PATH}/plugin/nnengine/ge_config | |||
| cp ${BUILD_PATH}/tests/framework/libhost_cpu_engine.so ${OUTPUT_PATH}/plugin/opskernel | |||
| cp ${BUILD_PATH}/tests/framework/libge_local_engine.so ${OUTPUT_PATH}/plugin/opskernel | |||
| cp ${BUILD_PATH}/tests/framework/stub_engine/libfe.so ${OUTPUT_PATH}/plugin/opskernel | |||
| #prepare st execution bin | |||
| cp ${BUILD_PATH}/tests/st/testcase/graph_engine_test ${OUTPUT_PATH} | |||
| cp ${BUILD_PATH}/tests/framework/ge_running_env/tests/ge_running_env_test ${OUTPUT_PATH} | |||
| cp ${BUILD_PATH}/tests/framework/ge_graph_dsl/tests/ge_graph_dsl_test ${OUTPUT_PATH} | |||
| #execute st testcase | |||
| RUN_TEST_CASE=${OUTPUT_PATH}/ge_running_env_test && ${RUN_TEST_CASE} | |||
| RUN_TEST_CASE=${OUTPUT_PATH}/graph_engine_test && ${RUN_TEST_CASE} | |||
| RUN_TEST_CASE=${OUTPUT_PATH}/ge_graph_dsl_test && ${RUN_TEST_CASE} | |||
| if [[ "$?" -ne 0 ]]; then | |||
| @@ -355,13 +354,13 @@ generate_package() | |||
| if [ "x${PLATFORM}" = "xtrain" ] | |||
| then | |||
| tar -cf graphengine_lib.tar fwkacllib | |||
| tar -zcf graphengine_lib.tar fwkacllib | |||
| elif [ "x${PLATFORM}" = "xinference" ] | |||
| then | |||
| tar -cf graphengine_lib.tar acllib atc | |||
| tar -zcf graphengine_lib.tar acllib atc | |||
| elif [ "x${PLATFORM}" = "xall" ] | |||
| then | |||
| tar -cf graphengine_lib.tar fwkacllib acllib atc | |||
| tar -zcf graphengine_lib.tar fwkacllib acllib atc | |||
| fi | |||
| } | |||
| @@ -371,6 +370,6 @@ elif [ "X$MINDSPORE_MODE" = "Xon" ] | |||
| then | |||
| cd "${OUTPUT_PATH}" | |||
| find ./ -name graphengine_lib.tar -exec rm {} \; | |||
| tar -cf graphengine_lib.tar lib | |||
| tar -zcf graphengine_lib.tar lib | |||
| fi | |||
| echo "---------------- GraphEngine package archive generated ----------------" | |||
| @@ -10,12 +10,17 @@ if ((${CMAKE_INSTALL_PREFIX} STREQUAL /usr/local) OR | |||
| message(STATUS "No install prefix selected, default to ${CMAKE_INSTALL_PREFIX}.") | |||
| endif() | |||
| if (ENABLE_GITEE) | |||
| set(REQ_URL "https://gitee.com/mirrors/gflags/repository/archive/v2.2.2.tar.gz") | |||
| set(MD5 "") | |||
| if (GE_PB_PKG) | |||
| set(REQ_URL "${GE_PB_PKG}/libs/gflags/v2.2.2.tar.gz") | |||
| set(MD5 "1a865b93bacfa963201af3f75b7bd64c") | |||
| else() | |||
| set(REQ_URL "https://github.com/gflags/gflags/archive/v2.2.2.tar.gz") | |||
| set(MD5 "") | |||
| if (ENABLE_GITEE) | |||
| set(REQ_URL "https://gitee.com/mirrors/gflags/repository/archive/v2.2.2.tar.gz") | |||
| set(MD5 "") | |||
| else() | |||
| set(REQ_URL "https://github.com/gflags/gflags/archive/v2.2.2.tar.gz") | |||
| set(MD5 "1a865b93bacfa963201af3f75b7bd64c") | |||
| endif () | |||
| endif () | |||
| set (gflags_CXXFLAGS "-D_GLIBCXX_USE_CXX11_ABI=0 -Dgoogle=ascend_private") | |||
| @@ -11,14 +11,14 @@ if ((${CMAKE_INSTALL_PREFIX} STREQUAL /usr/local) OR | |||
| message(STATUS "No install prefix selected, default to ${CMAKE_INSTALL_PREFIX}.") | |||
| endif() | |||
| if (GE_PB_PKG) | |||
| set(REQ_URL "${GE_PB_PKG}/libs/protobuf/v3.8.0.tar.gz") | |||
| set(REQ_URL "${GE_PB_PKG}/libs/protobuf/v3.13.0.tar.gz") | |||
| else() | |||
| if (ENABLE_GITEE) | |||
| set(REQ_URL "https://gitee.com/mirrors/protobuf_source/repository/archive/v3.8.0.tar.gz") | |||
| set(MD5 "eba86ae9f07ba5cfbaf8af3bc4e84236") | |||
| set(REQ_URL "https://gitee.com/mirrors/protobuf_source/repository/archive/v3.13.0.tar.gz") | |||
| set(MD5 "f4489cb88922ad9c58cbe3308d59cee5") | |||
| else() | |||
| set(REQ_URL "https://github.com/protocolbuffers/protobuf/archive/v3.8.0.tar.gz") | |||
| set(MD5 "3d9e32700639618a4d2d342c99d4507a") | |||
| set(REQ_URL "https://github.com/protocolbuffers/protobuf/archive/v3.13.0.tar.gz") | |||
| set(MD5 "1a6274bc4a65b55a6fa70e264d796490") | |||
| endif () | |||
| endif() | |||
| @@ -58,7 +58,7 @@ target_include_directories(ascend_protobuf INTERFACE ${PROTOBUF_SHARED_PKG_DIR}/ | |||
| set(INSTALL_BASE_DIR "") | |||
| set(INSTALL_LIBRARY_DIR lib) | |||
| install(FILES ${PROTOBUF_SHARED_PKG_DIR}/${CMAKE_INSTALL_LIBDIR}/ascend_protobuf.so.3.8.0.0 OPTIONAL | |||
| install(FILES ${PROTOBUF_SHARED_PKG_DIR}/${CMAKE_INSTALL_LIBDIR}/ascend_protobuf.so.3.13.0.0 OPTIONAL | |||
| DESTINATION ${INSTALL_LIBRARY_DIR}) | |||
| install(FILES ${PROTOBUF_SHARED_PKG_DIR}/${CMAKE_INSTALL_LIBDIR}/ascend_protobuf.so OPTIONAL | |||
| DESTINATION ${INSTALL_LIBRARY_DIR}) | |||
| @@ -13,14 +13,14 @@ if ((${CMAKE_INSTALL_PREFIX} STREQUAL /usr/local) OR | |||
| endif() | |||
| if(GE_PB_PKG) | |||
| set(REQ_URL "${GE_PB_PKG}/libs/protobuf/v3.8.0.tar.gz") | |||
| set(REQ_URL "${GE_PB_PKG}/libs/protobuf/v3.13.0.tar.gz") | |||
| else() | |||
| if (ENABLE_GITEE) | |||
| set(REQ_URL "https://gitee.com/mirrors/protobuf_source/repository/archive/v3.8.0.tar.gz") | |||
| set(MD5 "eba86ae9f07ba5cfbaf8af3bc4e84236") | |||
| set(REQ_URL "https://gitee.com/mirrors/protobuf_source/repository/archive/v3.13.0.tar.gz") | |||
| set(MD5 "f4489cb88922ad9c58cbe3308d59cee5") | |||
| else() | |||
| set(REQ_URL "https://github.com/protocolbuffers/protobuf/archive/v3.8.0.tar.gz") | |||
| set(MD5 "3d9e32700639618a4d2d342c99d4507a") | |||
| set(REQ_URL "https://github.com/protocolbuffers/protobuf/archive/v3.13.0.tar.gz") | |||
| set(MD5 "1a6274bc4a65b55a6fa70e264d796490") | |||
| endif () | |||
| endif() | |||
| @@ -29,8 +29,6 @@ set(protobuf_LDFLAGS "-Wl,-z,relro,-z,now,-z,noexecstack") | |||
| set(PROTOBUF_STATIC_PKG_DIR ${CMAKE_INSTALL_PREFIX}/protobuf_static) | |||
| ExternalProject_Add(protobuf_static_build | |||
| URL ${REQ_URL} | |||
| #URL /home/txd/workspace/linux_cmake/pkg/protobuf-3.8.0.tar.gz | |||
| #SOURCE_DIR ${METADEF_DIR}/../../third_party/protobuf/src/protobuf-3.8.0 | |||
| TLS_VERIFY OFF | |||
| CONFIGURE_COMMAND ${CMAKE_COMMAND} | |||
| -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} | |||
| @@ -13,14 +13,14 @@ if ((${CMAKE_INSTALL_PREFIX} STREQUAL /usr/local) OR | |||
| endif() | |||
| if(GE_PB_PKG) | |||
| set(REQ_URL "${GE_PB_PKG}/libs/protobuf/v3.8.0.tar.gz") | |||
| set(REQ_URL "${GE_PB_PKG}/libs/protobuf/v3.13.0.tar.gz") | |||
| else() | |||
| if (ENABLE_GITEE) | |||
| set(REQ_URL "https://gitee.com/mirrors/protobuf_source/repository/archive/v3.8.0.tar.gz") | |||
| set(MD5 "eba86ae9f07ba5cfbaf8af3bc4e84236") | |||
| set(REQ_URL "https://gitee.com/mirrors/protobuf_source/repository/archive/v3.13.0.tar.gz") | |||
| set(MD5 "f4489cb88922ad9c58cbe3308d59cee5") | |||
| else() | |||
| set(REQ_URL "https://github.com/protocolbuffers/protobuf/archive/v3.8.0.tar.gz") | |||
| set(MD5 "3d9e32700639618a4d2d342c99d4507a") | |||
| set(REQ_URL "https://github.com/protocolbuffers/protobuf/archive/v3.13.0.tar.gz") | |||
| set(MD5 "1a6274bc4a65b55a6fa70e264d796490") | |||
| endif () | |||
| endif() | |||
| @@ -28,8 +28,6 @@ set(protobuf_CXXFLAGS "-Wno-maybe-uninitialized -Wno-unused-parameter -fPIC -fst | |||
| set(protobuf_LDFLAGS "-Wl,-z,relro,-z,now,-z,noexecstack") | |||
| ExternalProject_Add(protoc_build | |||
| URL ${REQ_URL} | |||
| #URL /home/txd/workspace/linux_cmake/pkg/protobuf-3.8.0.tar.gz | |||
| #SOURCE_DIR ${GE_CODE_DIR}/../third_party/protobuf/src/protobuf-3.8.0 | |||
| TLS_VERIFY OFF | |||
| CONFIGURE_COMMAND ${CMAKE_COMMAND} -Dprotobuf_WITH_ZLIB=OFF -Dprotobuf_BUILD_TESTS=OFF -DBUILD_SHARED_LIBS=OFF -DCMAKE_CXX_FLAGS=${protobuf_CXXFLAGS} -DCMAKE_CXX_LDFLAGS=${protobuf_LDFLAGS} -DCMAKE_INSTALL_PREFIX=${CMAKE_INSTALL_PREFIX}/protoc <SOURCE_DIR>/cmake | |||
| BUILD_COMMAND $(MAKE) | |||
| @@ -14,7 +14,7 @@ | |||
| * limitations under the License. | |||
| */ | |||
| #include "analyzer.h" | |||
| #include "analyzer/analyzer.h" | |||
| #include <cstdlib> | |||
| #include <cstdio> | |||
| @@ -14,10 +14,10 @@ | |||
| * limitations under the License. | |||
| */ | |||
| #include "ge/ge_api.h" | |||
| #include "external/ge/ge_api.h" | |||
| #include <iostream> | |||
| #include <malloc.h> | |||
| #include "common/debug/log.h" | |||
| #include "framework/common/debug/log.h" | |||
| #include "framework/common/debug/ge_log.h" | |||
| #include "common/ge/datatype_util.h" | |||
| #include "proto/ge_api.pb.h" | |||
| @@ -29,7 +29,7 @@ | |||
| #include "graph/opsproto_manager.h" | |||
| #include "graph/utils/type_utils.h" | |||
| #include "graph/manager/util/rt_context_util.h" | |||
| #include "graph/common/ge_call_wrapper.h" | |||
| #include "common/ge_call_wrapper.h" | |||
| #include "register/op_registry.h" | |||
| #include "common/ge/tbe_plugin_manager.h" | |||
| #include "common/util/error_manager/error_manager.h" | |||
| @@ -47,6 +47,7 @@ const int32_t kMaxStrLen = 128; | |||
| static bool g_ge_initialized = false; | |||
| static std::mutex g_ge_release_mutex; // GEFinalize and ~Session use | |||
| static std::shared_ptr<ge::SessionManager> g_session_manager; | |||
| namespace ge { | |||
| void GetOpsProtoPath(std::string &opsproto_path) { | |||
| @@ -70,8 +71,7 @@ Status CheckOptionsValid(const std::map<string, string> &options) { | |||
| auto job_id_iter = options.find(OPTION_EXEC_JOB_ID); | |||
| if (job_id_iter != options.end()) { | |||
| if (job_id_iter->second.length() > kMaxStrLen) { | |||
| GELOGE(PARAM_INVALID, "[Check][JobId]Failed," | |||
| "the job_id [%s] string length: %zu > max string length: %d", | |||
| GELOGE(PARAM_INVALID, "[Check][JobId]Failed, the job_id [%s] string length: %zu > max string length: %d", | |||
| job_id_iter->second.c_str(), job_id_iter->second.length(), kMaxStrLen); | |||
| REPORT_INPUT_ERROR("E10051", std::vector<std::string>({"id", "length"}), | |||
| std::vector<std::string>({job_id_iter->second, | |||
| @@ -95,8 +95,7 @@ Status GEInitializeImpl(const std::map<string, string> &options) { | |||
| std::string path_base = ge::GELib::GetPath(); | |||
| auto ret = ErrorManager::GetInstance().Init(path_base); | |||
| if (ret != SUCCESS) { | |||
| GELOGE(GE_CLI_INIT_FAILED, | |||
| "[Init][PathBase]Init failed when pass param path_base:%s", path_base.c_str()); | |||
| GELOGE(GE_CLI_INIT_FAILED, "[Init][PathBase]Init failed when pass param path_base:%s", path_base.c_str()); | |||
| REPORT_CALL_ERROR("E19999", "Init failed when pass param path_base:%s", path_base.c_str()); | |||
| return ret; | |||
| } | |||
| @@ -117,11 +116,9 @@ Status GEInitializeImpl(const std::map<string, string> &options) { | |||
| bool is_proto_init = manager->Initialize(option_tmp); | |||
| GE_TIMESTAMP_END(GEInitialize, "GEInitialize::ManagerInitialize"); | |||
| if (!is_proto_init) { | |||
| GELOGE(GE_CLI_INIT_FAILED, | |||
| "[Init][OpsProtoPath]Loading OpsProto lib plugin failed, OpsProtoPath:%s invalid.", | |||
| GELOGE(GE_CLI_INIT_FAILED, "[Init][OpsProtoPath]Loading OpsProto lib plugin failed, OpsProtoPath:%s invalid.", | |||
| opsproto_path.c_str()); | |||
| REPORT_CALL_ERROR("E19999", "Loading OpsProto lib plugin failed, OpsProtoPath:%s invalid", | |||
| opsproto_path.c_str()); | |||
| REPORT_CALL_ERROR("E19999", "Loading OpsProto lib plugin failed, OpsProtoPath:%s invalid", opsproto_path.c_str()); | |||
| return FAILED; | |||
| } | |||
| @@ -148,6 +145,22 @@ Status GEInitializeImpl(const std::map<string, string> &options) { | |||
| return FAILED; | |||
| } | |||
| ErrorManager::GetInstance().SetStage(error_message::kInitialize, error_message::kOther); | |||
| GELOGI("sessionManager initial."); | |||
| GE_TIMESTAMP_START(SessionManagerInitialize); | |||
| g_session_manager = MakeShared<ge::SessionManager>(); | |||
| if (g_session_manager == nullptr) { | |||
| GELOGE(GE_CLI_INIT_FAILED, "[Init][Create]SessionManager failed"); | |||
| return FAILED; | |||
| } | |||
| ret = g_session_manager->Initialize(options); | |||
| GE_TIMESTAMP_END(SessionManagerInitialize, "InnerInitialize::SessionManagerInitialize"); | |||
| if (ret != SUCCESS) { | |||
| GELOGE(ret, "[Init][SessionManager] GE session manager initial failed."); | |||
| REPORT_CALL_ERROR("E19999", "SessionManager initialize failed."); | |||
| return ret; | |||
| } | |||
| // 7.check return status, return | |||
| if (!g_ge_initialized) { | |||
| // Initialize success, first time calling initialize | |||
| @@ -173,8 +186,7 @@ Status GEInitialize(const std::map<AscendString, AscendString> &options) { | |||
| for (auto &option : options) { | |||
| if (option.first.GetString() == nullptr || option.second.GetString() == nullptr) { | |||
| GELOGE(FAILED, "[Check][Param]Options invalid, first or second option is nullptr."); | |||
| REPORT_INNER_ERROR("E19999", "Check parameter's options invalid," | |||
| "the first or second option is nullptr."); | |||
| REPORT_INNER_ERROR("E19999", "Check parameter's options invalid, the first or second option is nullptr."); | |||
| return FAILED; | |||
| } | |||
| std::string key = option.first.GetString(); | |||
| @@ -217,6 +229,12 @@ Status GEFinalize() { | |||
| ret = middle_ret; | |||
| } | |||
| } | |||
| GELOGI("SessionManager finalization."); | |||
| if (g_session_manager != nullptr) { | |||
| (void)g_session_manager->Finalize(); // always success. | |||
| } | |||
| middle_ret = TBEPluginManager::Instance().Finalize(); | |||
| if (middle_ret != SUCCESS) { | |||
| ret = middle_ret; | |||
| @@ -251,28 +269,18 @@ std::string GEGetWarningMsg() { | |||
| Session::Session(const std::map<string, string> &options) { | |||
| ErrorManager::GetInstance().SetStage(error_message::kInitialize, error_message::kOther); | |||
| GELOGT(TRACE_INIT, "Start to construct session."); | |||
| ErrorManager::GetInstance().GenWorkStreamIdDefault(); | |||
| // check init status | |||
| sessionId_ = 0; | |||
| if (!g_ge_initialized) { | |||
| GELOGE(GE_CLI_GE_NOT_INITIALIZED, | |||
| "[Construct][Session]Failed because lack GEInitialize call before."); | |||
| REPORT_INNER_ERROR("E19999", | |||
| "Creating session failed because lack GEInitialize call before."); | |||
| return; | |||
| } | |||
| // call Initialize | |||
| std::shared_ptr<GELib> instance_ptr = ge::GELib::GetInstance(); | |||
| if (instance_ptr == nullptr || !instance_ptr->InitFlag()) { | |||
| GELOGE(GE_CLI_GE_NOT_INITIALIZED, | |||
| "[Construct][Session]Failed, GELib instance is nullptr or it is not InitFlag"); | |||
| GELOGE(GE_CLI_GE_NOT_INITIALIZED, "[Construct][Session]Failed because lack GEInitialize call before."); | |||
| REPORT_INNER_ERROR("E19999", "Creating session failed because lack GEInitialize call before."); | |||
| return; | |||
| } | |||
| GELOGT(TRACE_RUNNING, "Creating session"); | |||
| uint64_t session_id = 0; | |||
| Status ret = instance_ptr->SessionManagerObj().CreateSession(options, session_id); | |||
| Status ret = g_session_manager->CreateSession(options, session_id); | |||
| GELOGT(TRACE_RUNNING, "Session id is %lu", session_id); | |||
| // check return status, return, update session id if success | |||
| @@ -288,32 +296,21 @@ Session::Session(const std::map<string, string> &options) { | |||
| Session::Session(const std::map<AscendString, AscendString> &options) { | |||
| ErrorManager::GetInstance().SetStage(error_message::kInitialize, error_message::kOther); | |||
| GELOGT(TRACE_INIT, "Session Constructor start"); | |||
| ErrorManager::GetInstance().GenWorkStreamIdDefault(); | |||
| // check init status | |||
| sessionId_ = 0; | |||
| if (!g_ge_initialized) { | |||
| GELOGE(GE_CLI_GE_NOT_INITIALIZED, | |||
| "[Construct][Session]Failed because lack GEInitialize call before."); | |||
| REPORT_INNER_ERROR("E19999", | |||
| "Creating session failed because lack GEInitialize call before."); | |||
| GELOGE(GE_CLI_GE_NOT_INITIALIZED, "[Construct][Session]Failed because lack GEInitialize call before."); | |||
| REPORT_INNER_ERROR("E19999", "Creating session failed because lack GEInitialize call before."); | |||
| return; | |||
| } | |||
| // call Initialize | |||
| std::shared_ptr<GELib> instance_ptr = ge::GELib::GetInstance(); | |||
| if (instance_ptr == nullptr || !instance_ptr->InitFlag()) { | |||
| GELOGE(GE_CLI_GE_NOT_INITIALIZED, | |||
| "[Construct][Session]Failed, the GELib instance is nullptr or is not InitFlag"); | |||
| return; | |||
| } | |||
| GELOGT(TRACE_RUNNING, "Creating session"); | |||
| std::map<std::string, std::string> str_options; | |||
| for (auto &option : options) { | |||
| if (option.first.GetString() == nullptr || option.second.GetString() == nullptr) { | |||
| GELOGE(FAILED, "[Construct][Session]Failed, the first or second option is nullptr."); | |||
| REPORT_INNER_ERROR("E19999", "Creating session's options invalid," | |||
| "the first or second option is nullptr."); | |||
| REPORT_INNER_ERROR("E19999", "Creating session's options invalid, the first or second option is nullptr."); | |||
| return; | |||
| } | |||
| std::string key = option.first.GetString(); | |||
| @@ -321,7 +318,7 @@ Session::Session(const std::map<AscendString, AscendString> &options) { | |||
| str_options[key] = val; | |||
| } | |||
| uint64_t session_id = 0; | |||
| Status ret = instance_ptr->SessionManagerObj().CreateSession(str_options, session_id); | |||
| Status ret = g_session_manager->CreateSession(str_options, session_id); | |||
| GELOGT(TRACE_RUNNING, "Session id is %lu", session_id); | |||
| // check return status, return, update session id if success | |||
| @@ -350,19 +347,12 @@ Session::~Session() { | |||
| try { | |||
| uint64_t session_id = sessionId_; | |||
| // call DestroySession | |||
| std::shared_ptr<GELib> instance_ptr = ge::GELib::GetInstance(); | |||
| if (instance_ptr == nullptr || !instance_ptr->InitFlag()) { | |||
| GELOGW("GE is not yet initialized or is finalized."); | |||
| return; | |||
| } | |||
| GELOGT(TRACE_RUNNING, "Session id is %lu", session_id); | |||
| GELOGT(TRACE_RUNNING, "Destroying session"); | |||
| ret = instance_ptr->SessionManagerObj().DestroySession(session_id); | |||
| ret = g_session_manager->DestroySession(session_id); | |||
| } catch (google::protobuf::FatalException &e) { | |||
| GELOGE(GE_CLI_SESS_DESTROY_FAILED, "[Destruct][Session]Failed " | |||
| "because get fatalException."); | |||
| GELOGE(GE_CLI_SESS_DESTROY_FAILED, "[Destruct][Session]Failed because get fatalException."); | |||
| REPORT_CALL_ERROR("E19999", "Destruct session failed, get fatal exception"); | |||
| } | |||
| @@ -377,9 +367,7 @@ Session::~Session() { | |||
| // Add Graph | |||
| Status Session::AddGraph(uint32_t graph_id, const Graph &graph) { | |||
| ErrorManager::GetInstance().SetStage(error_message::kModelCompile, error_message::kOther); | |||
| std::map<std::string, std::string> options; | |||
| ErrorManager::GetInstance().GenWorkStreamIdBySessionGraph(sessionId_, graph_id); | |||
| return AddGraph(graph_id, graph, options); | |||
| } | |||
| @@ -388,20 +376,16 @@ Status Session::AddGraph(uint32_t graph_id, const Graph &graph, const std::map<s | |||
| ErrorManager::GetInstance().SetStage(error_message::kModelCompile, error_message::kOther); | |||
| GELOGT(TRACE_INIT, "Start to add graph in Session. graph_id: %u, session_id: %lu.", graph_id, sessionId_); | |||
| ErrorManager::GetInstance().GenWorkStreamIdBySessionGraph(sessionId_, graph_id); | |||
| std::shared_ptr<GELib> instance_ptr = ge::GELib::GetInstance(); | |||
| if (instance_ptr == nullptr || !instance_ptr->InitFlag()) { | |||
| GELOGE(GE_CLI_GE_NOT_INITIALIZED, | |||
| "[Add][Graph]Failed because GELib instance is nullptr or it is not InitFlag."); | |||
| REPORT_INNER_ERROR("E19999", | |||
| "AddGraph Failed, GELib instance is nullptr or it is not InitFlag."); | |||
| if (!g_ge_initialized) { | |||
| GELOGE(GE_CLI_GE_NOT_INITIALIZED, "[Construct][Session]Failed because lack GEInitialize call before."); | |||
| REPORT_INNER_ERROR("E19999", "Creating session failed because lack GEInitialize call before."); | |||
| return FAILED; | |||
| } | |||
| GELOGD("Adding graph to session"); | |||
| Status ret = instance_ptr->SessionManagerObj().AddGraph(sessionId_, graph_id, graph, options); | |||
| Status ret = g_session_manager->AddGraph(sessionId_, graph_id, graph, options); | |||
| if (ret != SUCCESS) { | |||
| GELOGE(ret, | |||
| "[Add][Graph]Failed, error code:%u, session_id:%lu, graph_id:%u.", | |||
| ret, sessionId_, graph_id); | |||
| GELOGE(ret, "[Add][Graph]Failed, error code:%u, session_id:%lu, graph_id:%u.", ret, sessionId_, graph_id); | |||
| return FAILED; | |||
| } | |||
| GELOGD("AddGraph finished in Session."); | |||
| @@ -409,37 +393,31 @@ Status Session::AddGraph(uint32_t graph_id, const Graph &graph, const std::map<s | |||
| } | |||
| //Add Graph | |||
| Status Session::AddGraph(uint32_t graph_id, const Graph &graph, | |||
| const std::map<AscendString, AscendString> &options) { | |||
| Status Session::AddGraph(uint32_t graph_id, const Graph &graph, const std::map<AscendString, AscendString> &options) { | |||
| ErrorManager::GetInstance().SetStage(error_message::kModelCompile, error_message::kOther); | |||
| GELOGT(TRACE_INIT, "Start to add graph in Session. graph_id: %u, session_id: %lu.", graph_id, sessionId_); | |||
| ErrorManager::GetInstance().GenWorkStreamIdBySessionGraph(sessionId_, graph_id); | |||
| std::shared_ptr<GELib> instance_ptr = ge::GELib::GetInstance(); | |||
| if (instance_ptr == nullptr || !instance_ptr->InitFlag()) { | |||
| GELOGE(GE_CLI_GE_NOT_INITIALIZED, | |||
| "[Add][Graph]Failed, the GELib instance is nullptr or is not InitFlag."); | |||
| REPORT_INNER_ERROR("E19999", | |||
| "AddGraph Failed, GELib instance is nullptr or it is not InitFlag."); | |||
| if (!g_ge_initialized) { | |||
| GELOGE(GE_CLI_GE_NOT_INITIALIZED, "[Construct][Session]Failed because lack GEInitialize call before."); | |||
| REPORT_INNER_ERROR("E19999", "Creating session failed because lack GEInitialize call before."); | |||
| return FAILED; | |||
| } | |||
| GELOGD("Adding graph to session"); | |||
| std::map<std::string, std::string> str_options; | |||
| for (auto &option : options) { | |||
| if (option.first.GetString() == nullptr || option.second.GetString() == nullptr) { | |||
| GELOGE(FAILED, "[Add][Graph]Failed, the first or second option is nullptr."); | |||
| REPORT_INNER_ERROR("E19999", | |||
| "Add Graph Failed, the first or second option is nullptr."); | |||
| REPORT_INNER_ERROR("E19999", "Add Graph Failed, the first or second option is nullptr."); | |||
| return FAILED; | |||
| } | |||
| std::string key = option.first.GetString(); | |||
| std::string val = option.second.GetString(); | |||
| str_options[key] = val; | |||
| } | |||
| Status ret = instance_ptr->SessionManagerObj().AddGraph(sessionId_, graph_id, graph, str_options); | |||
| Status ret = g_session_manager->AddGraph(sessionId_, graph_id, graph, str_options); | |||
| if (ret != SUCCESS) { | |||
| GELOGE(ret, | |||
| "[Add][Graph]Failed, error code:%u, session_id:%lu, graph_id:%u.", | |||
| ret, sessionId_, graph_id); | |||
| GELOGE(ret, "[Add][Graph]Failed, error code:%u, session_id:%lu, graph_id:%u.", ret, sessionId_, graph_id); | |||
| return FAILED; | |||
| } | |||
| GELOGD("AddGraph finished in Session."); | |||
| @@ -447,8 +425,6 @@ Status Session::AddGraph(uint32_t graph_id, const Graph &graph, | |||
| } | |||
| Status Session::AddGraphWithCopy(uint32_t graph_id, const Graph &graph) { | |||
| ErrorManager::GetInstance().SetStage(error_message::kModelCompile, error_message::kOther); | |||
| ErrorManager::GetInstance().GenWorkStreamIdBySessionGraph(sessionId_, graph_id); | |||
| std::map<AscendString, AscendString> options; | |||
| return AddGraphWithCopy(graph_id, graph, options); | |||
| } | |||
| @@ -459,24 +435,20 @@ Status Session::AddGraphWithCopy(uint32_t graph_id, const Graph &graph, | |||
| ErrorManager::GetInstance().SetStage(error_message::kModelCompile, error_message::kOther); | |||
| GELOGT(TRACE_INIT, "Start to add graph in Session. graph_id: %u, session_id: %lu.", graph_id, sessionId_); | |||
| ErrorManager::GetInstance().GenWorkStreamIdBySessionGraph(sessionId_, graph_id); | |||
| std::shared_ptr<GELib> instance_ptr = ge::GELib::GetInstance(); | |||
| if (instance_ptr == nullptr || !instance_ptr->InitFlag()) { | |||
| GELOGE(GE_CLI_GE_NOT_INITIALIZED, | |||
| "[Add][Graph]Failed, the GELib instance is nullptr or is not InitFlag."); | |||
| REPORT_INNER_ERROR("E19999", | |||
| "AddGraph Failed, GELib instance is nullptr or is not InitFlag."); | |||
| if (!g_ge_initialized) { | |||
| GELOGE(GE_CLI_GE_NOT_INITIALIZED, "[Construct][Session]Failed because lack GEInitialize call before."); | |||
| REPORT_INNER_ERROR("E19999", "Creating session failed because lack GEInitialize call before."); | |||
| return FAILED; | |||
| } | |||
| std::map<std::string, std::string> str_options; | |||
| for (auto it = options.begin(); it != options.end(); ++it) { | |||
| str_options.insert({it->first.GetString(), it->second.GetString()}); | |||
| } | |||
| GELOGD("Adding graph to session"); | |||
| Status ret = instance_ptr->SessionManagerObj().AddGraphWithCopy(sessionId_, graph_id, graph, str_options); | |||
| Status ret = g_session_manager->AddGraphWithCopy(sessionId_, graph_id, graph, str_options); | |||
| if (ret != SUCCESS) { | |||
| GELOGE(ret, | |||
| "[Add][Graph]Failed, error code:%u, session_id:%lu, graph_id:%u.", | |||
| ret, sessionId_, graph_id); | |||
| GELOGE(ret, "[Add][Graph]Failed, error code:%u, session_id:%lu, graph_id:%u.", ret, sessionId_, graph_id); | |||
| return FAILED; | |||
| } | |||
| GELOGD("AddGraph finished in Session."); | |||
| @@ -487,29 +459,21 @@ Status Session::AddGraphWithCopy(uint32_t graph_id, const Graph &graph, | |||
| Status Session::RemoveGraph(uint32_t graph_id) { | |||
| ErrorManager::GetInstance().SetStage(error_message::kModelCompile, error_message::kOther); | |||
| GELOGT(TRACE_INIT, "Session RemoveGraph start"); | |||
| ErrorManager::GetInstance().GenWorkStreamIdBySessionGraph(sessionId_, graph_id); | |||
| // call RemoveGraph | |||
| std::shared_ptr<GELib> instance_ptr = ge::GELib::GetInstance(); | |||
| if (!instance_ptr || !instance_ptr->InitFlag()) { | |||
| GELOGE(GE_CLI_GE_NOT_INITIALIZED, | |||
| "[Remove][Graph]Failed, GELib instance is nullptr or is not InitFlag, " | |||
| "session_id %lu, graph_id %u", sessionId_, graph_id); | |||
| REPORT_INNER_ERROR("E19999", | |||
| "RemoveGraph Failed, GELib instance is nullptr or is not InitFlag, " | |||
| "session_id %lu, graph_id %u", sessionId_, graph_id); | |||
| if (!g_ge_initialized) { | |||
| GELOGE(GE_CLI_GE_NOT_INITIALIZED, "[Construct][Session]Failed because lack GEInitialize call before."); | |||
| REPORT_INNER_ERROR("E19999", "Creating session failed because lack GEInitialize call before."); | |||
| return FAILED; | |||
| } | |||
| GELOGT(TRACE_RUNNING, "Removing Graph from session"); | |||
| Status ret = instance_ptr->SessionManagerObj().RemoveGraph(sessionId_, graph_id); | |||
| Status ret = g_session_manager->RemoveGraph(sessionId_, graph_id); | |||
| // check return status, return | |||
| if (ret != SUCCESS) { | |||
| GELOGE(ret, | |||
| "[Remove][Graph]Failed, error code:%u, session_id:%lu, graph_id:%u.", | |||
| ret, sessionId_, graph_id); | |||
| REPORT_CALL_ERROR("E19999", "Remove graph failed, error code:%u, " | |||
| "session_id:%lu, graph_id:%u", ret, sessionId_, graph_id); | |||
| GELOGE(ret, "[Remove][Graph]Failed, error code:%u, session_id:%lu, graph_id:%u.", ret, sessionId_, graph_id); | |||
| REPORT_CALL_ERROR("E19999", "Remove graph failed, error code:%u, session_id:%lu, graph_id:%u", | |||
| ret, sessionId_, graph_id); | |||
| return FAILED; | |||
| } | |||
| GELOGT(TRACE_STOP, "Session RemoveGraph finished"); | |||
| @@ -568,29 +532,21 @@ void PrintOutputResult(std::vector<Tensor> &outputs) { | |||
| Status Session::RunGraph(uint32_t graph_id, const std::vector<Tensor> &inputs, std::vector<Tensor> &outputs) { | |||
| ErrorManager::GetInstance().SetStage(error_message::kModelCompile, error_message::kOther); | |||
| GELOGT(TRACE_INIT, "Session RunGraph start"); | |||
| ErrorManager::GetInstance().GenWorkStreamIdBySessionGraph(sessionId_, graph_id); | |||
| std::vector<Tensor> graph_inputs = inputs; | |||
| // call RunGraph | |||
| std::shared_ptr<GELib> instance_ptr = ge::GELib::GetInstance(); | |||
| if (instance_ptr == nullptr || !instance_ptr->InitFlag()) { | |||
| GELOGE(GE_CLI_GE_NOT_INITIALIZED, | |||
| "[Run][Graph]Failed, GELib instance is nullptr or is not InitFlag, " | |||
| "session_id %lu, graph_id %u", sessionId_, graph_id); | |||
| REPORT_INNER_ERROR("E19999", | |||
| "RunGraph Failed, GELib instance is nullptr or is not InitFlag, " | |||
| "session_id %lu, graph_id %u", sessionId_, graph_id); | |||
| if (!g_ge_initialized) { | |||
| GELOGE(GE_CLI_GE_NOT_INITIALIZED, "[Construct][Session]Failed because lack GEInitialize call before."); | |||
| REPORT_INNER_ERROR("E19999", "Creating session failed because lack GEInitialize call before."); | |||
| return FAILED; | |||
| } | |||
| // call RunGraph | |||
| GELOGT(TRACE_RUNNING, "Running Graph"); | |||
| Status ret = instance_ptr->SessionManagerObj().RunGraph(sessionId_, graph_id, graph_inputs, outputs); | |||
| Status ret = g_session_manager->RunGraph(sessionId_, graph_id, inputs, outputs); | |||
| // check return status | |||
| if (ret != SUCCESS) { | |||
| GELOGE(ret, | |||
| "[Run][Graph]Failed, error code:%u, session_id:%lu, graph_id:%u.", | |||
| ret, sessionId_, graph_id); | |||
| REPORT_CALL_ERROR("E19999", "Remove graph failed, error code:%u, " | |||
| "session_id:%lu, graph_id:%u", ret, sessionId_, graph_id); | |||
| GELOGE(ret, "[Run][Graph]Failed, error code:%u, session_id:%lu, graph_id:%u.", ret, sessionId_, graph_id); | |||
| REPORT_CALL_ERROR("E19999", "Remove graph failed, error code:%u, session_id:%lu, graph_id:%u", | |||
| ret, sessionId_, graph_id); | |||
| return FAILED; | |||
| } | |||
| @@ -609,30 +565,15 @@ Status Session::RunGraphWithStreamAsync(uint32_t graph_id, void *stream, const s | |||
| std::vector<Tensor> &outputs) { | |||
| ErrorManager::GetInstance().SetStage(error_message::kModelCompile, error_message::kOther); | |||
| GELOGT(TRACE_INIT, "Start to run graph with stream async."); | |||
| ErrorManager::GetInstance().GenWorkStreamIdBySessionGraph(sessionId_, graph_id); | |||
| std::shared_ptr<GELib> instance_ptr = ge::GELib::GetInstance(); | |||
| if (instance_ptr == nullptr) { | |||
| GELOGE(GE_CLI_GE_NOT_INITIALIZED, | |||
| "[Run][Graph]Run graph with stream async failed, the GELib instance is nullptr," | |||
| "session id = %lu, graph id = %u, stream = %p.", sessionId_, graph_id, stream); | |||
| REPORT_INNER_ERROR("E19999", | |||
| "Run graph with stream async failed, the GELib instance is nullptr" | |||
| "session id = %lu, graph id = %u, stream = %p.", sessionId_, graph_id, stream); | |||
| return FAILED; | |||
| } | |||
| if (!instance_ptr->InitFlag()) { | |||
| GELOGE(GE_CLI_GE_NOT_INITIALIZED, | |||
| "[Run][Graph]Run graph with stream asyn failed, the GELib instance is not init," | |||
| "session id = %lu, graph id = %u, stream = %p.", sessionId_, graph_id, stream); | |||
| REPORT_INNER_ERROR("E19999", | |||
| "Run graph with stream asyn failed, the GELib instance is not init," | |||
| "session id = %lu, graph id = %u, stream = %p.", sessionId_, graph_id, stream); | |||
| if (!g_ge_initialized) { | |||
| GELOGE(GE_CLI_GE_NOT_INITIALIZED, "[Construct][Session]Failed because lack GEInitialize call before."); | |||
| REPORT_INNER_ERROR("E19999", "Creating session failed because lack GEInitialize call before."); | |||
| return FAILED; | |||
| } | |||
| GELOGT(TRACE_RUNNING, "Run Graph Run graph with stream asyn."); | |||
| Status ret = instance_ptr->SessionManagerObj().RunGraphWithStreamAsync(sessionId_, graph_id, stream, inputs, | |||
| outputs); | |||
| Status ret = g_session_manager->RunGraphWithStreamAsync(sessionId_, graph_id, stream, inputs, outputs); | |||
| if (ret != SUCCESS) { | |||
| GELOGE(ret, "[Run][Graph]Run graph with stream asyn Failed," | |||
| "error code = %u, session id = %lu, graph id = %u, stream = %p.", ret, sessionId_, graph_id, stream); | |||
| @@ -648,40 +589,46 @@ Status Session::RunGraphWithStreamAsync(uint32_t graph_id, void *stream, const s | |||
| // Register Call Back | |||
| Status Session::RegisterCallBackFunc(const std::string &key, const pCallBackFunc &callback) { | |||
| ErrorManager::GetInstance().GenWorkStreamIdDefault(); | |||
| return ge::GELib::GetInstance()->SessionManagerObj().RegisterCallBackFunc(sessionId_, key, callback); | |||
| if (!g_ge_initialized) { | |||
| GELOGE(GE_CLI_GE_NOT_INITIALIZED, "[Construct][Session]Failed because lack GEInitialize call before."); | |||
| REPORT_INNER_ERROR("E19999", "Creating session failed because lack GEInitialize call before."); | |||
| return FAILED; | |||
| } | |||
| return g_session_manager->RegisterCallBackFunc(sessionId_, key, callback); | |||
| } | |||
| Status Session::RegisterCallBackFunc(const char *key, const session::pCallBackFunc &callback) { | |||
| ErrorManager::GetInstance().GenWorkStreamIdDefault(); | |||
| if (!g_ge_initialized) { | |||
| GELOGE(GE_CLI_GE_NOT_INITIALIZED, "[Construct][Session]Failed because lack GEInitialize call before."); | |||
| REPORT_INNER_ERROR("E19999", "Creating session failed because lack GEInitialize call before."); | |||
| return FAILED; | |||
| } | |||
| std::string str_key; | |||
| if (key != nullptr) { | |||
| str_key = key; | |||
| } | |||
| return ge::GELib::GetInstance()->SessionManagerObj().RegisterCallBackFunc(sessionId_, str_key, callback); | |||
| return g_session_manager->RegisterCallBackFunc(sessionId_, str_key, callback); | |||
| } | |||
| // Build Graph | |||
| Status Session::BuildGraph(uint32_t graph_id, const std::vector<InputTensorInfo> &inputs) { | |||
| ErrorManager::GetInstance().SetStage(error_message::kModelCompile, error_message::kOther); | |||
| ErrorManager::GetInstance().GenWorkStreamIdBySessionGraph(sessionId_, graph_id); | |||
| std::shared_ptr<GELib> instance_ptr = ge::GELib::GetInstance(); | |||
| if (instance_ptr == nullptr || !instance_ptr->InitFlag()) { | |||
| GELOGE(GE_CLI_GE_NOT_INITIALIZED, | |||
| "[Build][Graph]Failed, the GELib instance is nullptr or is not InitFlag, " | |||
| "session_id %lu, graph_id %u", sessionId_, graph_id); | |||
| REPORT_INNER_ERROR("E19999", | |||
| "Build graph failed, the GELib instance is nullptr or is not InitFlag, " | |||
| "session_id %lu, graph_id %u", sessionId_, graph_id); | |||
| if (!g_ge_initialized) { | |||
| GELOGE(GE_CLI_GE_NOT_INITIALIZED, "[Construct][Session]Failed because lack GEInitialize call before."); | |||
| REPORT_INNER_ERROR("E19999", "Creating session failed because lack GEInitialize call before."); | |||
| return FAILED; | |||
| } | |||
| GELOGT(TRACE_RUNNING, "Building Graph"); | |||
| Status ret = instance_ptr->SessionManagerObj().BuildGraph(sessionId_, graph_id, inputs); | |||
| Status ret = g_session_manager->BuildGraph(sessionId_, graph_id, inputs); | |||
| if (ret != SUCCESS) { | |||
| GELOGE(ret, | |||
| "[Build][Graph]Failed, error code:%u, session_id:%lu, graph_id:%u.", | |||
| ret, sessionId_, graph_id); | |||
| REPORT_CALL_ERROR("E19999", "Build graph failed , error code:%u, " | |||
| "session_id:%lu, graph_id:%u", ret, sessionId_, graph_id); | |||
| GELOGE(ret, "[Build][Graph]Failed, error code:%u, session_id:%lu, graph_id:%u.", ret, sessionId_, graph_id); | |||
| REPORT_CALL_ERROR("E19999", "Build graph failed , error code:%u, session_id:%lu, graph_id:%u", | |||
| ret, sessionId_, graph_id); | |||
| return FAILED; | |||
| } | |||
| return SUCCESS; | |||
| @@ -691,24 +638,18 @@ Status Session::BuildGraph(uint32_t graph_id, const std::vector<InputTensorInfo> | |||
| Status Session::BuildGraph(uint32_t graph_id, const std::vector<ge::Tensor> &inputs) { | |||
| ErrorManager::GetInstance().SetStage(error_message::kModelCompile, error_message::kOther); | |||
| ErrorManager::GetInstance().GenWorkStreamIdBySessionGraph(sessionId_, graph_id); | |||
| std::shared_ptr<GELib> instance_ptr = ge::GELib::GetInstance(); | |||
| if (instance_ptr == nullptr || !instance_ptr->InitFlag()) { | |||
| GELOGE(GE_CLI_GE_NOT_INITIALIZED, | |||
| "[Build][Graph]Failed, the GELib instance is nullptr or is not InitFlag, " | |||
| "session_id %lu, graph_id %u", sessionId_, graph_id); | |||
| REPORT_INNER_ERROR("E19999", | |||
| "Build graph failed, the GELib instance is nullptr or is not InitFlag, " | |||
| "session_id %lu, graph_id %u", sessionId_, graph_id); | |||
| if (!g_ge_initialized) { | |||
| GELOGE(GE_CLI_GE_NOT_INITIALIZED, "[Construct][Session]Failed because lack GEInitialize call before."); | |||
| REPORT_INNER_ERROR("E19999", "Creating session failed because lack GEInitialize call before."); | |||
| return FAILED; | |||
| } | |||
| GELOGT(TRACE_RUNNING, "Building Graph"); | |||
| Status ret = instance_ptr->SessionManagerObj().BuildGraph(sessionId_, graph_id, inputs); | |||
| Status ret = g_session_manager->BuildGraph(sessionId_, graph_id, inputs); | |||
| if (ret != SUCCESS) { | |||
| GELOGE(ret, | |||
| "[Build][Graph]Failed, error code:%u, session_id:%lu, graph_id:%u.", | |||
| ret, sessionId_, graph_id); | |||
| REPORT_CALL_ERROR("E19999", "Build graph failed , error code:%u, " | |||
| "session_id:%lu, graph_id:%u", ret, sessionId_, graph_id); | |||
| GELOGE(ret, "[Build][Graph]Failed, error code:%u, session_id:%lu, graph_id:%u.", ret, sessionId_, graph_id); | |||
| REPORT_CALL_ERROR("E19999", "Build graph failed , error code:%u, session_id:%lu, graph_id:%u", | |||
| ret, sessionId_, graph_id); | |||
| return FAILED; | |||
| } | |||
| return SUCCESS; | |||
| @@ -719,26 +660,22 @@ Status Session::RunGraphAsync(uint32_t graph_id, const std::vector<ge::Tensor> & | |||
| RunAsyncCallback callback) { | |||
| ErrorManager::GetInstance().SetStage(error_message::kModelExecute, error_message::kModelExecute); | |||
| ErrorManager::GetInstance().GenWorkStreamIdBySessionGraph(sessionId_, graph_id); | |||
| std::shared_ptr<GELib> instance_ptr = ge::GELib::GetInstance(); | |||
| if (instance_ptr == nullptr || !instance_ptr->InitFlag()) { | |||
| GELOGE(GE_CLI_GE_NOT_INITIALIZED, | |||
| "[Run][Graph]RunGraphAsyncFailed, the GELib instance is nullptr or is not InitFlag, " | |||
| "session_id %lu, graph_id %u", sessionId_, graph_id); | |||
| REPORT_INNER_ERROR("E19999", | |||
| "RunGraphAsync Failed, the GELib instance is nullptr or is not InitFlag, " | |||
| "session_id %lu, graph_id %u", sessionId_, graph_id); | |||
| if (!g_ge_initialized) { | |||
| GELOGE(GE_CLI_GE_NOT_INITIALIZED, "[Construct][Session]Failed because lack GEInitialize call before."); | |||
| REPORT_INNER_ERROR("E19999", "Creating session failed because lack GEInitialize call before."); | |||
| return FAILED; | |||
| } | |||
| GELOGT(TRACE_RUNNING, "Run Graph Asynchronously"); | |||
| GELOGW( | |||
| "The callback function will not be checked. Please ensure that the implementation of the function is trusted."); | |||
| Status ret = ge::GELib::GetInstance()->SessionManagerObj().RunGraphAsync(sessionId_, graph_id, inputs, callback); | |||
| Status ret = g_session_manager->RunGraphAsync(sessionId_, graph_id, inputs, callback); | |||
| if (ret != SUCCESS) { | |||
| GELOGE(ret, "[Run][Graph]RunGraphAsync Failed, error code:%u, session_id:%lu, graph_id:%u.", | |||
| ret, sessionId_, graph_id); | |||
| REPORT_CALL_ERROR("E19999", "RunGraphAsync Failed, error code:%u, session_id:%lu, " | |||
| "graph_id:%u", ret, sessionId_, graph_id); | |||
| REPORT_CALL_ERROR("E19999", "RunGraphAsync Failed, error code:%u, session_id:%lu, graph_id:%u", | |||
| ret, sessionId_, graph_id); | |||
| return FAILED; | |||
| } | |||
| return SUCCESS; | |||
| @@ -748,16 +685,14 @@ Status Session::RunGraphAsync(uint32_t graph_id, const std::vector<ge::Tensor> & | |||
| Status Session::GetVariables(const std::vector<std::string> &var_names, std::vector<Tensor> &var_values) { | |||
| ErrorManager::GetInstance().SetStage(error_message::kModelExecute, error_message::kModelExecute); | |||
| ErrorManager::GetInstance().GenWorkStreamIdDefault(); | |||
| auto instance_ptr = ge::GELib::GetInstance(); | |||
| if (instance_ptr == nullptr || !instance_ptr->InitFlag()) { | |||
| GELOGE(GE_CLI_GE_NOT_INITIALIZED, | |||
| "[Get][Variables]Failed, the GELib instance is nullptr or is not InitFlag."); | |||
| REPORT_INNER_ERROR("E19999", | |||
| "GetVariables failed, the GELib instance is nullptr or is not InitFlag."); | |||
| if (!g_ge_initialized) { | |||
| GELOGE(GE_CLI_GE_NOT_INITIALIZED, "[Construct][Session]Failed because lack GEInitialize call before."); | |||
| REPORT_INNER_ERROR("E19999", "Creating session failed because lack GEInitialize call before."); | |||
| return FAILED; | |||
| } | |||
| GELOGT(TRACE_RUNNING, "Get Variables"); | |||
| Status ret = ge::GELib::GetInstance()->SessionManagerObj().GetVariables(sessionId_, var_names, var_values); | |||
| Status ret = g_session_manager->GetVariables(sessionId_, var_names, var_values); | |||
| if (ret != SUCCESS) { | |||
| GELOGE(ret, "[Get][Variables]Failed, error code:%u, session_id:%lu.", ret, sessionId_); | |||
| return FAILED; | |||
| @@ -769,14 +704,12 @@ Status Session::GetVariables(const std::vector<std::string> &var_names, std::vec | |||
| Status Session::GetVariables(const std::vector<AscendString> &var_names, std::vector<Tensor> &var_values) { | |||
| ErrorManager::GetInstance().SetStage(error_message::kModelExecute, error_message::kModelExecute); | |||
| ErrorManager::GetInstance().GenWorkStreamIdDefault(); | |||
| auto instance_ptr = ge::GELib::GetInstance(); | |||
| if (instance_ptr == nullptr || !instance_ptr->InitFlag()) { | |||
| GELOGE(GE_CLI_GE_NOT_INITIALIZED, | |||
| "[Get][Variables]Failed, the GELib instance is nullptr or is not InitFlag."); | |||
| REPORT_INNER_ERROR("E19999", | |||
| "GetVariables failed, the GELib instance is nullptr or is not InitFlag."); | |||
| if (!g_ge_initialized) { | |||
| GELOGE(GE_CLI_GE_NOT_INITIALIZED, "[Construct][Session]Failed because lack GEInitialize call before."); | |||
| REPORT_INNER_ERROR("E19999", "Creating session failed because lack GEInitialize call before."); | |||
| return FAILED; | |||
| } | |||
| GELOGT(TRACE_RUNNING, "Get Variables"); | |||
| std::vector<ge::string> str_var_names; | |||
| for (auto &var_name : var_names) { | |||
| @@ -787,17 +720,22 @@ Status Session::GetVariables(const std::vector<AscendString> &var_names, std::ve | |||
| } | |||
| str_var_names.emplace_back(var_name.GetString()); | |||
| } | |||
| Status ret = ge::GELib::GetInstance()->SessionManagerObj().GetVariables(sessionId_, str_var_names, var_values); | |||
| Status ret = g_session_manager->GetVariables(sessionId_, str_var_names, var_values); | |||
| if (ret != SUCCESS) { | |||
| GELOGE(ret, "[Get][Variables]Failed, error code:%u, session_id:%lu.", ret, sessionId_); | |||
| REPORT_CALL_ERROR("E19999", "Get variables failed, error code:%u, session_id:%lu.", | |||
| ret, sessionId_); | |||
| REPORT_CALL_ERROR("E19999", "Get variables failed, error code:%u, session_id:%lu.", ret, sessionId_); | |||
| return FAILED; | |||
| } | |||
| return SUCCESS; | |||
| } | |||
| bool Session::IsGraphNeedRebuild(uint32_t graph_id) { | |||
| return ge::GELib::GetInstance()->SessionManagerObj().IsGraphNeedRebuild(sessionId_, graph_id); | |||
| if (!g_ge_initialized) { | |||
| GELOGE(GE_CLI_GE_NOT_INITIALIZED, "[Construct][Session]Failed because lack GEInitialize call before."); | |||
| REPORT_INNER_ERROR("E19999", "Creating session failed because lack GEInitialize call before."); | |||
| return false; | |||
| } | |||
| return g_session_manager->IsGraphNeedRebuild(sessionId_, graph_id); | |||
| } | |||
| } // namespace ge | |||
| @@ -1 +0,0 @@ | |||
| ../../proto/ge_api.proto | |||
| @@ -1,193 +0,0 @@ | |||
| syntax = "proto3"; | |||
| package ge.proto; | |||
| enum DataType | |||
| { | |||
| DT_UNDEFINED = 0; // Used to indicate a DataType field has not been set. | |||
| DT_FLOAT = 1; // float type | |||
| DT_FLOAT16 = 2; // fp16 type | |||
| DT_INT8 = 3; // int8 type | |||
| DT_UINT8 = 4; // uint8 type | |||
| DT_INT16 = 5; // int16 type | |||
| DT_UINT16 = 6; // uint16 type | |||
| DT_INT32 = 7; // | |||
| DT_INT64 = 8; // int64 type | |||
| DT_UINT32 = 9; // unsigned int32 | |||
| DT_UINT64 = 10; // unsigned int64 | |||
| DT_BOOL = 11; // bool type | |||
| DT_DOUBLE = 12; // double type | |||
| DT_STRING = 13; // string type | |||
| DT_DUAL_SUB_INT8 = 14; /**< dual output int8 type */ | |||
| DT_DUAL_SUB_UINT8 = 15; /**< dual output uint8 type */ | |||
| DT_COMPLEX64 = 16; // complex64 type | |||
| DT_COMPLEX128 = 17; // complex128 type | |||
| DT_QINT8 = 18; // qint8 type | |||
| DT_QINT16 = 19; // qint16 type | |||
| DT_QINT32 = 20; // qint32 type | |||
| DT_QUINT8 = 21; // quint8 type | |||
| DT_QUINT16 = 22; // quint16 type | |||
| DT_RESOURCE = 23; // resource type | |||
| DT_STRING_REF = 24; // string_ref type | |||
| DT_DUAL = 25; /**< dual output type */ | |||
| DT_VARIANT = 26; // variant type | |||
| DT_BF16 = 27; // bf16 type | |||
| DT_INT4 = 28; // int4 type | |||
| } | |||
| message AttrDef | |||
| { | |||
| message ListValue | |||
| { | |||
| enum ListValueType{ | |||
| VT_LIST_NONE = 0; | |||
| VT_LIST_STRING = 1; | |||
| VT_LIST_INT = 2; | |||
| VT_LIST_FLOAT = 3; | |||
| VT_LIST_BOOL = 4; | |||
| VT_LIST_BYTES = 5; | |||
| VT_LIST_TENSOR_DESC = 6; | |||
| VT_LIST_TENSOR = 7; | |||
| VT_LIST_GRAPH = 8; | |||
| VT_LIST_NAMED_ATTRS = 9; | |||
| VT_LIST_DATA_TYPE = 10; | |||
| } | |||
| repeated bytes s = 2; // "list(string)" | |||
| repeated int64 i = 3; // "list(int)" | |||
| repeated float f = 4; // "list(float)" | |||
| repeated bool b = 5; // "list(bool)" | |||
| repeated bytes bt = 7; | |||
| repeated TensorDescriptor td = 8; | |||
| repeated TensorDef t = 9; | |||
| repeated GraphDef g = 10; | |||
| repeated NamedAttrs na = 11; | |||
| repeated int64 dt = 12; // list ge::DataType | |||
| ListValueType val_type = 20; | |||
| } | |||
| message ListListInt{ | |||
| message ListInt{ | |||
| repeated int64 list_i = 1; // list int | |||
| } | |||
| repeated ListInt list_list_i = 1; // list list int | |||
| } | |||
| oneof value | |||
| { | |||
| bytes s = 2; // "string" | |||
| int64 i = 3; // "int" | |||
| float f = 4; // "float" | |||
| bool b = 5; // "bool" | |||
| bytes bt = 7; | |||
| ListValue list = 1; // any "list(...)" | |||
| NamedAttrs func = 10; // Used to support attr nesting | |||
| TensorDescriptor td = 11; // GeTensorDesc type | |||
| TensorDef t = 12; // GeTensor type | |||
| GraphDef g = 13; // Graph type | |||
| ListListInt list_list_int = 14; // List List Int type | |||
| int64 dt = 15; // ge::DataType | |||
| } | |||
| } | |||
| // A list of attr names and their values. The whole list is attached | |||
| // with a string name. E.g., MatMul[T=float]. | |||
| message NamedAttrs | |||
| { | |||
| string name = 1; | |||
| map<string, AttrDef> attr = 2; | |||
| } | |||
| // Shape / dimension description, using row-major order | |||
| message ShapeDef | |||
| { | |||
| repeated int64 dim = 1; // Size of each dimension | |||
| } | |||
| // Multidimensional data description | |||
| message TensorDescriptor | |||
| { | |||
| string name = 1; // Optional parameter, tensor name | |||
| DataType dtype = 2; // tensor datatype | |||
| ShapeDef shape = 3; // Shape / dimension | |||
| string layout = 4; // Tensor format, eg: "NCHW", "NHWC", "CHW", "ND" | |||
| bool has_out_attr = 9; | |||
| int64 size = 10; | |||
| int64 weight_size = 11; | |||
| bool reuse_input = 12; | |||
| bool output_tensor = 13; | |||
| string device_type = 14; | |||
| bool input_tensor =15; | |||
| int64 real_dim_cnt = 16; | |||
| int64 reuse_input_index = 17; | |||
| int64 data_offset = 18; | |||
| int64 cmps_size = 19; | |||
| string cmps_tab = 20; | |||
| int64 cmps_tab_offset = 21; | |||
| map<string, AttrDef> attr = 5; // Set of extra parameter fields | |||
| } | |||
| // GeTensor definition | |||
| message TensorDef | |||
| { | |||
| TensorDescriptor desc = 1; // Tensor description | |||
| bytes data = 2; // Tensor data | |||
| } | |||
| // Operator description | |||
| message OpDef | |||
| { | |||
| string name = 1; // name | |||
| string type = 2; // type | |||
| repeated string input = 5; // input original op name + outgoing index. op_name:index | |||
| map<string, AttrDef> attr = 10; // Set of operator parameter fields | |||
| bool has_out_attr = 20; | |||
| int64 id = 21; | |||
| int64 stream_id =22; | |||
| repeated string input_name = 23; | |||
| repeated string src_name = 24; | |||
| repeated int64 src_index = 25; | |||
| repeated string dst_name = 26; | |||
| repeated int64 dst_index = 27; | |||
| repeated int64 input_i = 28; | |||
| repeated int64 output_i = 29; | |||
| repeated int64 workspace = 30; | |||
| repeated int64 workspace_bytes = 31; | |||
| repeated bool is_input_const = 32; | |||
| repeated TensorDescriptor input_desc = 33; | |||
| repeated TensorDescriptor output_desc = 34; | |||
| repeated string subgraph_name = 35; | |||
| } | |||
| // Graph definition | |||
| message GraphDef | |||
| { | |||
| string name = 1; // name | |||
| repeated string input = 4; // Graph input | |||
| repeated string output = 5; // Graph output | |||
| repeated OpDef op = 6; // List of operators | |||
| map<string, AttrDef> attr = 11; // Extended field | |||
| } | |||
| // model definition | |||
| message ModelDef | |||
| { | |||
| string name = 1; // name | |||
| uint32 version = 2; // IR Proto verion | |||
| string custom_version = 3; // User model version number, passed in by user | |||
| repeated GraphDef graph = 7; // Graph definition,graph[0] represents the main diagram in modeldef | |||
| map<string, AttrDef> attr = 11; // Extended field | |||
| } | |||
| @@ -1,140 +0,0 @@ | |||
| syntax = "proto3"; | |||
| package domi; | |||
| message InsertNewOps { | |||
| repeated AippOpParams aipp_op = 1; | |||
| repeated MultiShapeOpParams multi_shape_op = 2; | |||
| } | |||
| message AippOpParams { | |||
| enum InputFormat { | |||
| UNDEFINED = 0; | |||
| YUV420SP_U8 = 1; | |||
| XRGB8888_U8 = 2; | |||
| RGB888_U8 = 3; | |||
| YUV400_U8 = 4; | |||
| NC1HWC0DI_FP16 = 5; | |||
| NC1HWC0DI_S8 = 6; | |||
| ARGB8888_U8 = 7; | |||
| YUYV_U8 = 8; | |||
| YUV422SP_U8 = 9; | |||
| AYUV444_U8 = 10; | |||
| RAW10 = 11; | |||
| RAW12 = 12; | |||
| RAW16 = 13; | |||
| RAW24 = 14; | |||
| RGB16 = 15; | |||
| RGB20 = 16; | |||
| RGB24 = 17; | |||
| RGB8_IR = 18; | |||
| RGB16_IR = 19; | |||
| RGB24_IR = 20; | |||
| } | |||
| enum AippMode { | |||
| undefined = 0; | |||
| static = 1; | |||
| dynamic = 2; | |||
| } | |||
| // AIPP模式,区分静态AIPP和动态AIPP | |||
| AippMode aipp_mode = 1; | |||
| // related_input_rank参数为必填,类型为整型,配置范围>=0, <=输入Data算子的个数,默认值为0。 | |||
| // 标识对模型的第几个输入做AIPP处理,例如模型有两个输入,需要对第2个输入做AIPP,则配置related_input_rank为1。 | |||
| uint32 related_input_rank = 2; | |||
| // related_input_name is optional and the top name of data node which inserts aipp | |||
| string related_input_name = 6; | |||
| // input_edge_idx参数为可选,类型为整型,配置范围为>=0。 | |||
| // 配置该参数的作用,在于对Data算子不同的输出做不同的AIPP处理,如果该参数没有配置,默认对related_input_rank指定的模型输入的所有输出边做AIPP。 | |||
| // 配置值 <= Data算子输出边的个数。 | |||
| repeated uint32 input_edge_idx = 3; | |||
| // [Begin] 动态AIPP参数,配置静态AIPP时无效 | |||
| uint32 max_src_image_size = 4; | |||
| // 是否支持旋转。默认不支持,开启支持旋转时,会有额外的空间和性能损失 | |||
| bool support_rotation = 5; | |||
| // [End] 动态AIPP参数 | |||
| // [Begin] 静态AIPP参数,配置动态AIPP时无效 | |||
| InputFormat input_format = 51; | |||
| bool csc_switch = 52; | |||
| float cpadding_value = 53; | |||
| bool rbuv_swap_switch = 54; | |||
| bool ax_swap_switch = 55; | |||
| bool single_line_mode = 56; | |||
| int32 src_image_size_w = 57; | |||
| int32 src_image_size_h = 58; | |||
| bool crop = 59; | |||
| int32 load_start_pos_w = 60; | |||
| int32 load_start_pos_h = 61; | |||
| int32 crop_size_w = 62; | |||
| int32 crop_size_h = 63; | |||
| bool resize = 64; | |||
| int32 resize_output_w = 65; | |||
| int32 resize_output_h = 66; | |||
| bool padding = 67; | |||
| int32 left_padding_size = 68; | |||
| int32 right_padding_size = 69; | |||
| int32 top_padding_size = 70; | |||
| int32 bottom_padding_size = 71; | |||
| float padding_value = 72; | |||
| int32 mean_chn_0 = 10; | |||
| int32 mean_chn_1 = 11; | |||
| int32 mean_chn_2 = 12; | |||
| int32 mean_chn_3 = 19; | |||
| float min_chn_0 = 13; | |||
| float min_chn_1 = 14; | |||
| float min_chn_2 = 15; | |||
| float min_chn_3 = 20; | |||
| repeated float var_reci_chn_0 = 16; | |||
| repeated float var_reci_chn_1 = 17; | |||
| repeated float var_reci_chn_2 = 18; | |||
| repeated float var_reci_chn_3 = 21; | |||
| repeated int32 matrix_r0c0 = 30; | |||
| repeated int32 matrix_r0c1 = 31; | |||
| repeated int32 matrix_r0c2 = 32; | |||
| repeated int32 matrix_r1c0 = 33; | |||
| repeated int32 matrix_r1c1 = 34; | |||
| repeated int32 matrix_r1c2 = 35; | |||
| repeated int32 matrix_r2c0 = 36; | |||
| repeated int32 matrix_r2c1 = 37; | |||
| repeated int32 matrix_r2c2 = 38; | |||
| repeated int32 output_bias_0 = 39; | |||
| repeated int32 output_bias_1 = 40; | |||
| repeated int32 output_bias_2 = 41; | |||
| repeated int32 input_bias_0 = 42; | |||
| repeated int32 input_bias_1 = 43; | |||
| repeated int32 input_bias_2 = 44; | |||
| // [End] 静态AIPP参数 | |||
| // The n number that is used for raw/rgbir data into f16 transformation. | |||
| // The transformation equation is x/(2^n). If set to 0, no transform is performed. | |||
| uint32 raw_rgbir_to_f16_n = 45; | |||
| } | |||
| message MultiShapeOpParams { | |||
| enum MultiShapeMode { | |||
| batch = 0; //动态batch | |||
| resolution = 1; //动态分辨率,扩展用 | |||
| } | |||
| MultiShapeMode mode = 1; //算子模式 | |||
| uint32 related_input_rank = 2; //新增算子插入到哪个输入 | |||
| repeated uint32 batch_list = 11; //batch_list值,batch_list的个数是2到8之间 | |||
| } | |||
| @@ -1,396 +0,0 @@ | |||
| /* Copyright (C) 2018. Huawei Technologies Co., Ltd. All rights reserved. | |||
| * | |||
| * This program is free software; you can redistribute it and/or modify | |||
| * it under the terms of the Apache License Version 2.0.You may not use this file except in compliance with the License. | |||
| * | |||
| * This program is distributed in the hope that it will be useful, | |||
| * but WITHOUT ANY WARRANTY; without even the implied warranty of | |||
| * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |||
| * Apache License for more details at | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| */ | |||
| syntax = "proto3"; | |||
| package domi; | |||
| enum TargetType | |||
| { | |||
| MINI = 0; | |||
| TINY = 1; | |||
| LITE = 2; | |||
| } | |||
| // offline model | |||
| message ModelDef { | |||
| string name = 1; | |||
| uint32 version = 2; | |||
| uint64 memory_size = 10; | |||
| uint32 stream_num = 11; | |||
| uint32 event_num = 12; | |||
| uint64 weight_size = 13; | |||
| uint32 label_num = 15; | |||
| repeated OpDef op = 20; | |||
| TargetType target_type = 23; | |||
| map<string, AttrDef> attr = 30; | |||
| }; | |||
| // operator define | |||
| message OpDef { | |||
| string name = 1; | |||
| string type = 2; | |||
| uint32 id = 3; | |||
| uint32 stream_id = 4; | |||
| repeated string input_name = 5; | |||
| repeated string src_name = 8; | |||
| repeated int32 src_index = 9; | |||
| repeated int64 input = 10; | |||
| repeated int64 output = 11; | |||
| repeated TensorDescriptor input_desc = 12; | |||
| repeated TensorDescriptor output_desc = 13; | |||
| repeated WeightDef weights = 14; | |||
| repeated string dst_name = 15; | |||
| repeated int32 dst_index = 16; | |||
| repeated int64 workspace = 20; | |||
| repeated uint32 workspace_bytes = 21; | |||
| repeated string weight_name = 22; | |||
| repeated bool is_input_const = 23; | |||
| map<string, AttrDef> attr = 30; | |||
| QuantizeFactorParams quantize_factor = 31; | |||
| oneof op_params { | |||
| // start at 100 here | |||
| SendOpParams sender_param = 100; | |||
| RecvOpParams receiver_param = 200; | |||
| ConvolutionOpParams convolution_param = 300; | |||
| PoolingOpParams pooling_param = 400; | |||
| EltwiseOpParams eltwise_param = 500; | |||
| BatchNormOpParams batchnorm_param = 600; | |||
| ScaleOpParams scale_param = 700; | |||
| FullConnectionOpParams full_connection_param = 800; | |||
| SoftmaxOpParams softmax_param = 900; | |||
| ActivationOpParams activation_param = 1000; | |||
| ReshapeOpParams reshape_param = 1100; | |||
| } | |||
| }; | |||
| message SendOpParams { | |||
| uint32 event_id = 1; | |||
| }; | |||
| message RecvOpParams { | |||
| uint32 event_id = 1; | |||
| }; | |||
| enum QuantizeScaleType | |||
| { | |||
| VECTOR_SCALE = 0; | |||
| SCALAR_SCALE = 1; | |||
| } | |||
| enum QuantizeScaleMode | |||
| { | |||
| NORMAL_MODE = 0; | |||
| SQRT_MODE = 1; | |||
| } | |||
| enum QuantizeAlgorithm | |||
| { | |||
| NON_OFFSET_ALGO = 0; | |||
| HALF_OFFSET_ALGO = 1; | |||
| ALL_OFFSET_ALGO = 2; | |||
| } | |||
| message QuantizeFactor | |||
| { | |||
| QuantizeScaleMode scale_mode = 1; | |||
| bytes scale_value = 2; | |||
| int64 scale_offset = 3; | |||
| bytes offset_data_value = 4; | |||
| int64 offset_data_offset = 5; | |||
| bytes offset_weight_value = 6; | |||
| int64 offset_weight_offset = 7; | |||
| bytes offset_pad_value = 8; | |||
| int64 offset_pad_offset = 9; | |||
| }; | |||
| message QuantizeCalcFactor | |||
| { | |||
| bytes offsetw = 1; | |||
| int64 offsetw_offset = 2; | |||
| bytes offsetd = 3; | |||
| int64 offsetd_offset = 4; | |||
| bytes scalereq = 5; | |||
| int64 scaledreq_offset = 6; | |||
| bytes offsetdnext = 7; | |||
| int64 offsetdnext_offset = 8; | |||
| } | |||
| message QuantizeFactorParams | |||
| { | |||
| QuantizeAlgorithm quantize_algo = 1; | |||
| QuantizeScaleType scale_type = 2; | |||
| QuantizeFactor quantize_param = 3; | |||
| QuantizeFactor dequantize_param = 4; | |||
| QuantizeFactor requantize_param = 5; | |||
| QuantizeCalcFactor quantizecalc_param = 6; | |||
| }; | |||
| message ConvolutionOpParams { | |||
| int32 mode = 1; | |||
| int32 algo = 2; | |||
| int32 pad_mode = 3; | |||
| uint32 group = 4; | |||
| uint32 num_output = 5; | |||
| repeated uint32 pad = 10; | |||
| repeated uint32 stride = 11; | |||
| repeated uint32 dilation = 12; | |||
| repeated uint32 kernel = 13; | |||
| float alpha = 20; | |||
| float beta = 21; | |||
| WeightDef filter = 40; | |||
| WeightDef bias = 41; | |||
| bool relu_flag = 62; | |||
| repeated uint32 adj = 70; | |||
| repeated uint32 target_shape = 71; | |||
| repeated uint32 before_pad = 72; | |||
| }; | |||
| message PoolingOpParams { | |||
| int32 mode = 1; | |||
| int32 nan_opt = 2; | |||
| int32 pad_mode = 3; | |||
| bool global_pooling = 4; | |||
| repeated uint32 window = 10; | |||
| repeated uint32 pad = 11; | |||
| repeated uint32 stride = 12; | |||
| bool ceil_mode = 13; | |||
| int32 data_mode = 14; | |||
| float alpha = 20; | |||
| float beta = 21; | |||
| repeated uint32 before_pad = 22; | |||
| }; | |||
| message EltwiseOpParams { | |||
| int32 mode = 1; | |||
| repeated float coeff = 2; | |||
| float alpha = 3; | |||
| float beta = 4; | |||
| repeated WeightDef weight = 5; | |||
| bool relu_flag = 6; | |||
| }; | |||
| message ActivationOpParams { | |||
| int32 mode = 1; | |||
| float coef = 2; | |||
| float alpha = 3; | |||
| float beta = 4; | |||
| }; | |||
| message BatchNormOpParams { | |||
| int32 mode = 1; | |||
| float alpha = 2; | |||
| float beta = 3; | |||
| double epsilon = 4;//optinal,[default = 1e-5] | |||
| bool use_global_stats = 5; //optinal,by default true,testing mode | |||
| float moving_average_fraction = 6; //optinal,[default = .999]; | |||
| WeightDef estimated_mean = 7; | |||
| WeightDef estimated_variance = 8; | |||
| WeightDef scale = 9; | |||
| WeightDef bias = 10; | |||
| }; | |||
| message ScaleOpParams { | |||
| WeightDef scale = 1; | |||
| WeightDef bias = 2; | |||
| }; | |||
| message ReshapeOpParams { | |||
| float alpha = 1; | |||
| float beta = 2; | |||
| ShapeDef shape = 3; | |||
| int32 axis = 4; | |||
| int32 num_axes = 5; | |||
| int32 format = 6; | |||
| }; | |||
| message SoftmaxOpParams { | |||
| int32 algo = 1; | |||
| int32 mode = 2; | |||
| float alpha = 3; | |||
| float beta = 4; | |||
| }; | |||
| message FullConnectionOpParams { | |||
| WeightDef filter = 1; | |||
| WeightDef bias = 2; | |||
| uint32 num_output = 3; | |||
| bool relu_flag = 12; | |||
| }; | |||
| message FlattenOpParams { | |||
| float alpha = 1; | |||
| float beta = 2; | |||
| int32 start_axis = 3; | |||
| int32 end_axis = 4; | |||
| } | |||
| message AddLimitedOpParams { | |||
| float alpha = 1; | |||
| float beta = 2; | |||
| int32 axis = 3; | |||
| bool broadcast = 4; | |||
| repeated WeightDef weight = 10; | |||
| }; | |||
| message MulLimitedOpParams { | |||
| float alpha = 1; | |||
| float beta = 2; | |||
| int32 axis = 3; | |||
| bool broadcast = 4; | |||
| repeated WeightDef weight = 10; | |||
| }; | |||
| message AddOpParams { | |||
| float alpha = 1; | |||
| float beta = 2; | |||
| repeated WeightDef weight = 10; | |||
| }; | |||
| message MulOpParams { | |||
| float alpha = 1; | |||
| float beta = 2; | |||
| repeated WeightDef weight = 10; | |||
| }; | |||
| message SubOpParams { | |||
| float alpha = 1; | |||
| float beta = 2; | |||
| repeated WeightDef weight = 10; | |||
| }; | |||
| message BiasAddOpParams { | |||
| float alpha = 1; | |||
| float beta = 2; | |||
| WeightDef bias = 10; | |||
| }; | |||
| message MatMulOpParams { | |||
| float alpha = 1; | |||
| float beta = 2; | |||
| bool transposeX = 3; | |||
| bool transposeW = 4; | |||
| WeightDef filter = 10; | |||
| WeightDef bias = 12; | |||
| }; | |||
| message RsqrtOpParams { | |||
| float alpha = 1; | |||
| float beta = 2; | |||
| }; | |||
| message WeightDef { | |||
| int32 format = 1; | |||
| int32 data_type = 2; | |||
| ShapeDef shape = 3; | |||
| bytes data = 4; | |||
| int64 data_offset = 5; | |||
| uint32 cmps_size = 6; | |||
| bytes cmps_tab = 7; | |||
| int64 cmps_tab_offset = 10; | |||
| CompressInfo cmps_info = 8; | |||
| AllOffsetQuantizeInfo alloffset_quantize_info = 11; | |||
| } | |||
| message ShapeDef { | |||
| repeated int64 dim = 1; | |||
| } | |||
| enum DeviceType { | |||
| NPU = 0; // In default, we will use NPU. | |||
| CPU = 1; // CPU | |||
| } | |||
| message AllOffsetQuantizeInfo { | |||
| float scale = 1; | |||
| int32 offset = 2; | |||
| } | |||
| message TensorDescriptor { | |||
| int32 format = 1; | |||
| int32 data_type = 2; | |||
| repeated int64 dim = 3; | |||
| uint32 size = 4; | |||
| bool reuse_input = 5; | |||
| bool output_tensor = 7; | |||
| DeviceType device_type = 8; | |||
| bool input_tensor = 9; | |||
| uint32 real_dim_cnt = 10; | |||
| uint32 reuse_input_index = 11; | |||
| AllOffsetQuantizeInfo alloffset_quantize_info = 12; | |||
| } | |||
| message CompressInfo { | |||
| int32 blockRow = 1; // block row | |||
| int32 blockCol = 2; // block col | |||
| int32 fractalK = 3; // fractal K | |||
| int32 fractalN = 4; // fractal N | |||
| int32 lastFractalK = 5; // K of last fractal | |||
| int32 lastFractalN = 6; // N of last fractal | |||
| int32 cubeSize = 7; // cube's length | |||
| int32 loadDir = 8; // data load directtiono 0:col load 1:row load | |||
| } | |||
| message AttrDef { | |||
| message ListValue { | |||
| repeated string s = 2; // "list(string)" | |||
| repeated int64 i = 3 [packed = true]; // "list(int)" | |||
| repeated float f = 4 [packed = true]; // "list(float)" | |||
| repeated bool b = 5 [packed = true]; // "list(bool)" | |||
| repeated uint32 u = 6 [packed = true]; // "list(uint)" | |||
| repeated bytes bt = 7; | |||
| } | |||
| oneof value { | |||
| string s = 2; // "string" | |||
| int64 i = 3; // "int" | |||
| float f = 4; // "float" | |||
| bool b = 5; // "bool" | |||
| uint32 u = 6; // "uint32" | |||
| bytes bt = 7; | |||
| ListValue list = 1; // any "list(...)" | |||
| NamedAttrs func = 10; | |||
| } | |||
| } | |||
| // A list of attr names and their values. The whole list is attached | |||
| // with a string name. E.g., MatMul[T=float]. | |||
| message NamedAttrs { | |||
| string name = 1; | |||
| map<string, AttrDef> attr = 2; | |||
| } | |||
| @@ -1,179 +0,0 @@ | |||
| /* Copyright (C) 2018. Huawei Technologies Co., Ltd. All rights reserved. | |||
| * | |||
| * This program is free software; you can redistribute it and/or modify | |||
| * it under the terms of the Apache License Version 2.0.You may not use this file except in compliance with the License. | |||
| * | |||
| * This program is distributed in the hope that it will be useful, | |||
| * but WITHOUT ANY WARRANTY; without even the implied warranty of | |||
| * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |||
| * Apache License for more details at | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| */ | |||
| syntax = "proto3"; | |||
| package domi; | |||
| message ModelTaskDef { | |||
| string version = 1; | |||
| map<string, string> attr = 9; // Extended field | |||
| repeated TaskDef task = 10; | |||
| uint64 memory_size = 11; | |||
| uint32 stream_num = 12; | |||
| uint32 event_num = 13; | |||
| uint64 weight_size = 14; | |||
| repeated bytes op = 15; // input/output opdef in bytes | |||
| uint64 base_addr = 16; // base addr | |||
| uint64 weight_addr = 17; // weight addr | |||
| uint32 batch_num = 18; | |||
| } | |||
| message TaskDef { | |||
| uint32 id = 1; | |||
| uint32 type = 2; | |||
| uint32 stream_id = 10; | |||
| uint32 event_id = 11; | |||
| KernelDef kernel = 20; | |||
| KernelExDef kernel_ex = 21; | |||
| KernelHcclDef kernel_hccl = 25; | |||
| EventExDef event_ex = 26; | |||
| LogTimeStampDef log_timestamp = 28; | |||
| uint32 label_id = 30; | |||
| MemcpyAsyncDef memcpy_async = 31; | |||
| StreamSwitchDef stream_switch = 32; | |||
| StreamActiveDef stream_active = 33; | |||
| bytes private_def = 34; | |||
| uint64 ops_kernel_store_ptr = 35; // adjustments to other fields in the future | |||
| StreamSwitchNDef stream_switch_n = 36; | |||
| LabelSetDef label_set = 37; | |||
| LabelGotoExDef label_goto_ex = 38; | |||
| LabelSwitchByIndexDef label_switch_by_index = 39; | |||
| KernelDefWithHandle kernel_with_handle = 40; | |||
| } | |||
| message KernelDef { | |||
| KernelContext context = 1; | |||
| string stub_func = 10; | |||
| uint32 block_dim = 11; | |||
| uint32 args_size = 12; | |||
| bytes args = 13; | |||
| bytes sm_desc = 14; | |||
| bytes flowtable = 15; | |||
| string so_name = 16; | |||
| string kernel_name = 17; | |||
| bytes kernel_ext_info = 18; | |||
| uint32 kernel_ext_info_size = 19; | |||
| } | |||
| message KernelDefWithHandle { | |||
| KernelContext context = 1; | |||
| uint64 handle = 10; | |||
| string dev_func = 11; | |||
| uint32 block_dim = 12; | |||
| uint32 args_size = 13; | |||
| bytes args = 14; | |||
| bytes sm_desc = 15; | |||
| string original_kernel_key = 16; | |||
| string node_info = 17; | |||
| } | |||
| message KernelContext { | |||
| uint32 kernel_type = 1; | |||
| uint32 op_id = 2; // OP type in CCE | |||
| uint32 kernel_func_id = 3; | |||
| uint32 op_index = 4; // TE/Custom operator | |||
| bool is_flowtable = 5; // Identify whether args is a flowtable structure | |||
| bytes args_offset = 6; // args offset information | |||
| uint32 args_count = 7; // args count | |||
| repeated uint32 origin_op_index = 8; | |||
| } | |||
| message KernelExDef { | |||
| uint32 flags = 1; | |||
| uint32 op_index = 4; | |||
| uint32 args_size = 12; | |||
| bytes args = 13; | |||
| bytes task_info = 14; // serialized nodeDef, funcDef, inputoutput | |||
| uint32 task_info_size = 15; | |||
| bytes kernel_ext_info = 16; | |||
| uint32 kernel_ext_info_size = 17; | |||
| } | |||
| message KernelHcclDef { | |||
| uint32 op_index = 8; | |||
| string hccl_type = 9; | |||
| } | |||
| message EventExDef { | |||
| uint32 op_index = 1; | |||
| uint32 event_type = 2; | |||
| } | |||
| message LogTimeStampDef { | |||
| uint64 logid = 1; | |||
| bool notify = 2; | |||
| uint32 flat = 3; | |||
| } | |||
| message MemcpyAsyncDef { | |||
| uint64 dst = 1; | |||
| uint64 dst_max = 2; | |||
| uint64 src = 3; | |||
| uint64 count = 4; | |||
| uint32 kind = 5; | |||
| uint32 op_index = 6; | |||
| } | |||
| message StreamSwitchDef { | |||
| uint32 op_index = 1; | |||
| uint32 true_stream_id = 2; | |||
| int64 value = 3; | |||
| uint64 value_ptr = 4; | |||
| uint32 data_type = 5; | |||
| } | |||
| message StreamActiveDef { | |||
| uint32 op_index = 1; | |||
| uint32 active_stream_id = 2; | |||
| } | |||
| message StreamSwitchNDef { | |||
| uint32 op_index = 1; | |||
| uint32 size = 2; | |||
| repeated int64 target_value = 3; | |||
| repeated uint32 true_stream_id = 4; | |||
| uint32 element_size = 5; | |||
| uint32 data_type = 6; | |||
| } | |||
| message LabelSetDef { | |||
| uint32 op_index = 1; | |||
| uint32 label_id = 2; | |||
| uint32 model_id = 3; | |||
| } | |||
| message LabelGotoExDef { | |||
| uint32 op_index = 1; | |||
| uint32 label_id = 2; | |||
| uint32 model_id = 3; | |||
| } | |||
| message LabelSwitchByIndexDef { | |||
| uint32 op_index = 1; | |||
| uint32 label_max = 2; | |||
| } | |||
| @@ -1,48 +1,55 @@ | |||
| set(SRC_LIST | |||
| "context/ctx.cc" | |||
| "model_saver.cc" | |||
| "ge/datatype_util.cc" | |||
| "helper/om_file_helper.cc" | |||
| "helper/model_helper.cc" | |||
| "../model/ge_model.cc" | |||
| "../model/ge_root_model.cc" | |||
| "auth/file_saver.cc" | |||
| "fp16_t.cc" | |||
| "math/fp16_math.cc" | |||
| "debug/memory_dumper.cc" | |||
| "formats/utils/formats_trans_utils.cc" | |||
| "dump/dump_properties.cc" | |||
| "formats/format_transfers/datatype_transfer.cc" | |||
| "formats/format_transfers/format_transfer_transpose.cc" | |||
| "formats/format_transfers/format_transfer_nchw_nc1hwc0.cc" | |||
| "formats/format_transfers/format_transfer_fractal_z.cc" | |||
| "formats/format_transfers/format_transfer_fractal_nz.cc" | |||
| "formats/format_transfers/format_transfer_fractal_zz.cc" | |||
| "formats/format_transfers/format_transfer_nhwc_nc1hwc0.cc" | |||
| "formats/format_transfers/format_transfer_nc1hwc0_nchw.cc" | |||
| "formats/format_transfers/format_transfer_nc1hwc0_nhwc.cc" | |||
| "formats/format_transfers/format_transfer_hwcn_c1hwncoc0.cc" | |||
| "formats/format_transfers/format_transfer_c1hwncoc0_hwcn.cc" | |||
| "formats/format_transfers/format_transfer_fracz_nchw.cc" | |||
| "formats/format_transfers/format_transfer_fracz_nhwc.cc" | |||
| "formats/format_transfers/format_transfer_fracz_hwcn.cc" | |||
| "formats/format_transfers/format_transfer_dhwcn_fracz3D.cc" | |||
| "formats/format_transfers/format_transfer_dhwnc_fracz3D_transpose.cc" | |||
| "formats/format_transfers/format_transfer_nchw_fz_c04.cc" | |||
| "formats/formats.cc" | |||
| "ge_format_util.cc" | |||
| "fmk_error_codes.cc" | |||
| "util.cc" | |||
| "properties_manager.cc" | |||
| "types.cc" | |||
| "model_parser/model_parser.cc" | |||
| "kernel_store.cc" | |||
| "tbe_kernel_store.cc" | |||
| "cust_aicpu_kernel_store.cc" | |||
| "op/attr_value_util.cc" | |||
| "op/ge_op_utils.cc" | |||
| "thread_pool.cc" | |||
| "ge/tbe_plugin_manager.cc" | |||
| "${GE_CODE_DIR}/ge/common/auth/file_saver.cc" | |||
| "${GE_CODE_DIR}/ge/common/bcast.cc" | |||
| "${GE_CODE_DIR}/ge/common/context/ctx.cc" | |||
| "${GE_CODE_DIR}/ge/common/cust_aicpu_kernel_store.cc" | |||
| "${GE_CODE_DIR}/ge/common/debug/memory_dumper.cc" | |||
| "${GE_CODE_DIR}/ge/common/dump/dump_manager.cc" | |||
| "${GE_CODE_DIR}/ge/common/dump/dump_properties.cc" | |||
| "${GE_CODE_DIR}/ge/common/fmk_error_codes.cc" | |||
| "${GE_CODE_DIR}/ge/common/formats/format_transfers/datatype_transfer.cc" | |||
| "${GE_CODE_DIR}/ge/common/formats/format_transfers/format_transfer_c1hwncoc0_hwcn.cc" | |||
| "${GE_CODE_DIR}/ge/common/formats/format_transfers/format_transfer_dhwcn_fracz3D.cc" | |||
| "${GE_CODE_DIR}/ge/common/formats/format_transfers/format_transfer_dhwnc_fracz3D_transpose.cc" | |||
| "${GE_CODE_DIR}/ge/common/formats/format_transfers/format_transfer_fractal_nz.cc" | |||
| "${GE_CODE_DIR}/ge/common/formats/format_transfers/format_transfer_fractal_z.cc" | |||
| "${GE_CODE_DIR}/ge/common/formats/format_transfers/format_transfer_fractal_zz.cc" | |||
| "${GE_CODE_DIR}/ge/common/formats/format_transfers/format_transfer_fracz_hwcn.cc" | |||
| "${GE_CODE_DIR}/ge/common/formats/format_transfers/format_transfer_fracz_nchw.cc" | |||
| "${GE_CODE_DIR}/ge/common/formats/format_transfers/format_transfer_fracz_nhwc.cc" | |||
| "${GE_CODE_DIR}/ge/common/formats/format_transfers/format_transfer_hwcn_c1hwncoc0.cc" | |||
| "${GE_CODE_DIR}/ge/common/formats/format_transfers/format_transfer_nc1hwc0_nchw.cc" | |||
| "${GE_CODE_DIR}/ge/common/formats/format_transfers/format_transfer_nc1hwc0_nhwc.cc" | |||
| "${GE_CODE_DIR}/ge/common/formats/format_transfers/format_transfer_nchw_fz_c04.cc" | |||
| "${GE_CODE_DIR}/ge/common/formats/format_transfers/format_transfer_nchw_nc1hwc0.cc" | |||
| "${GE_CODE_DIR}/ge/common/formats/format_transfers/format_transfer_nhwc_nc1hwc0.cc" | |||
| "${GE_CODE_DIR}/ge/common/formats/format_transfers/format_transfer_transpose.cc" | |||
| "${GE_CODE_DIR}/ge/common/formats/formats.cc" | |||
| "${GE_CODE_DIR}/ge/common/formats/utils/formats_trans_utils.cc" | |||
| "${GE_CODE_DIR}/ge/common/fp16_t.cc" | |||
| "${GE_CODE_DIR}/ge/common/ge/datatype_util.cc" | |||
| "${GE_CODE_DIR}/ge/common/ge/op_tiling_manager.cc" | |||
| "${GE_CODE_DIR}/ge/common/ge/plugin_manager.cc" | |||
| "${GE_CODE_DIR}/ge/common/ge/tbe_plugin_manager.cc" | |||
| "${GE_CODE_DIR}/ge/common/ge_format_util.cc" | |||
| "${GE_CODE_DIR}/ge/common/helper/model_helper.cc" | |||
| "${GE_CODE_DIR}/ge/common/helper/om_file_helper.cc" | |||
| "${GE_CODE_DIR}/ge/common/kernel_store.cc" | |||
| "${GE_CODE_DIR}/ge/common/local_context.cc" | |||
| "${GE_CODE_DIR}/ge/common/math/fp16_math.cc" | |||
| "${GE_CODE_DIR}/ge/common/model/ge_model.cc" | |||
| "${GE_CODE_DIR}/ge/common/model/ge_root_model.cc" | |||
| "${GE_CODE_DIR}/ge/common/model_parser/model_parser.cc" | |||
| "${GE_CODE_DIR}/ge/common/model_saver.cc" | |||
| "${GE_CODE_DIR}/ge/common/omg_util.cc" | |||
| "${GE_CODE_DIR}/ge/common/op/attr_value_util.cc" | |||
| "${GE_CODE_DIR}/ge/common/op/ge_op_utils.cc" | |||
| "${GE_CODE_DIR}/ge/common/properties_manager.cc" | |||
| "${GE_CODE_DIR}/ge/common/tbe_kernel_store.cc" | |||
| "${GE_CODE_DIR}/ge/common/thread_pool.cc" | |||
| "${GE_CODE_DIR}/ge/common/transop_util.cc" | |||
| "${GE_CODE_DIR}/ge/common/types.cc" | |||
| "${GE_CODE_DIR}/ge/common/util.cc" | |||
| ) | |||
| if (NOT ENABLE_D AND NOT ENABLE_ACL) | |||
| @@ -63,7 +70,7 @@ target_compile_definitions(ge_common PRIVATE | |||
| ) | |||
| target_compile_options(ge_common PRIVATE | |||
| -fvisibility=hidden | |||
| -fvisibility=default | |||
| -O2 | |||
| -Werror | |||
| -Wno-deprecated-declarations | |||
| @@ -72,24 +79,18 @@ target_compile_options(ge_common PRIVATE | |||
| target_include_directories(ge_common PRIVATE | |||
| ${GE_CODE_DIR}/ge | |||
| ${GE_CODE_DIR}/ge/common | |||
| ${GE_CODE_DIR}/ge/common/op | |||
| ${GE_CODE_DIR}/inc/external | |||
| ${GE_CODE_DIR}/inc | |||
| ${GE_CODE_DIR}/inc/framework | |||
| ${METADEF_DIR}/inc | |||
| ${METADEF_DIR}/inc/external | |||
| ${METADEF_DIR}/inc/external/graph | |||
| ${METADEF_DIR}/inc/graph | |||
| ${CMAKE_BINARY_DIR} | |||
| ${CMAKE_BINARY_DIR}/proto/graphengine_protos | |||
| #### yellow zone #### | |||
| ${GE_DEPEND_DIR}/inc | |||
| ${GE_DEPEND_DIR}/inc/cce | |||
| $<$<NOT:$<BOOL:${ENABLE_OPEN_SRC}>>:${GE_DEPEND_DIR}/inc> | |||
| #### blue zone #### | |||
| #${GE_DEPEND_DIR}/include | |||
| ${GE_CODE_DIR}/third_party/fwkacllib/inc | |||
| ${GE_CODE_DIR}/third_party/fwkacllib/inc/toolchain | |||
| $<$<BOOL:${ENABLE_OPEN_SRC}>:${GE_CODE_DIR}/third_party/fwkacllib/inc> | |||
| $<$<BOOL:${ENABLE_OPEN_SRC}>:${GE_CODE_DIR}/third_party/fwkacllib/inc/toolchain> | |||
| ) | |||
| target_link_options(ge_common PRIVATE | |||
| @@ -98,6 +99,10 @@ target_link_options(ge_common PRIVATE | |||
| target_link_libraries(ge_common PRIVATE | |||
| $<BUILD_INTERFACE:intf_pub> | |||
| $<$<NOT:$<BOOL:${ENABLE_OPEN_SRC}>>:$<BUILD_INTERFACE:slog_headers>> | |||
| $<$<NOT:$<BOOL:${ENABLE_OPEN_SRC}>>:$<BUILD_INTERFACE:msprof_headers>> | |||
| $<$<NOT:$<BOOL:${ENABLE_OPEN_SRC}>>:$<BUILD_INTERFACE:mmpa_headers>> | |||
| $<$<NOT:$<BOOL:${ENABLE_OPEN_SRC}>>:$<BUILD_INTERFACE:runtime_headers>> | |||
| static_mmpa | |||
| -Wl,--no-as-needed | |||
| graph | |||
| @@ -139,28 +144,26 @@ target_compile_options(ge_common_static PRIVATE | |||
| target_include_directories(ge_common_static PRIVATE | |||
| ${GE_CODE_DIR}/ge | |||
| ${GE_CODE_DIR}/ge/common | |||
| ${GE_CODE_DIR}/ge/common/op | |||
| ${GE_CODE_DIR}/inc | |||
| ${GE_CODE_DIR}/inc/external | |||
| ${GE_CODE_DIR}/inc/framework | |||
| ${METADEF_DIR}/inc | |||
| ${METADEF_DIR}/inc/external | |||
| ${METADEF_DIR}/inc/external/graph | |||
| ${METADEF_DIR}/inc/graph | |||
| ${CMAKE_BINARY_DIR} | |||
| ${CMAKE_BINARY_DIR}/proto/graphengine_protos | |||
| #### yellow zone #### | |||
| ${GE_DEPEND_DIR}/inc | |||
| ${GE_DEPEND_DIR}/inc/cce | |||
| $<$<NOT:$<BOOL:${ENABLE_OPEN_SRC}>>:${GE_DEPEND_DIR}/inc> | |||
| #### blue zone #### | |||
| #${GE_DEPEND_DIR}/include | |||
| ${GE_CODE_DIR}/third_party/fwkacllib/inc | |||
| ${GE_CODE_DIR}/third_party/fwkacllib/inc/toolchain | |||
| $<$<BOOL:${ENABLE_OPEN_SRC}>:${GE_CODE_DIR}/third_party/fwkacllib/inc> | |||
| $<$<BOOL:${ENABLE_OPEN_SRC}>:${GE_CODE_DIR}/third_party/fwkacllib/inc/toolchain> | |||
| ) | |||
| target_link_libraries(ge_common_static PRIVATE | |||
| $<BUILD_INTERFACE:intf_pub> | |||
| $<$<NOT:$<BOOL:${ENABLE_OPEN_SRC}>>:$<BUILD_INTERFACE:slog_headers>> | |||
| $<$<NOT:$<BOOL:${ENABLE_OPEN_SRC}>>:$<BUILD_INTERFACE:msprof_headers>> | |||
| $<$<NOT:$<BOOL:${ENABLE_OPEN_SRC}>>:$<BUILD_INTERFACE:mmpa_headers>> | |||
| $<$<NOT:$<BOOL:${ENABLE_OPEN_SRC}>>:$<BUILD_INTERFACE:runtime_headers>> | |||
| ascend_protobuf_static | |||
| json | |||
| c_sec | |||
| @@ -187,7 +190,7 @@ target_compile_definitions(ge_common PRIVATE | |||
| ) | |||
| target_compile_options(ge_common PRIVATE | |||
| -fvisibility=hidden | |||
| -fvisibility=default | |||
| -O2 | |||
| -Werror | |||
| -Wno-deprecated-declarations | |||
| @@ -196,15 +199,11 @@ target_compile_options(ge_common PRIVATE | |||
| target_include_directories(ge_common PRIVATE | |||
| ${GE_CODE_DIR}/ge | |||
| ${GE_CODE_DIR}/ge/common | |||
| ${GE_CODE_DIR}/ge/common/op | |||
| ${GE_CODE_DIR}/inc/external | |||
| ${GE_CODE_DIR}/inc | |||
| ${GE_CODE_DIR}/inc/framework | |||
| ${METADEF_DIR}/inc | |||
| ${METADEF_DIR}/inc/external | |||
| ${METADEF_DIR}/inc/external/graph | |||
| ${METADEF_DIR}/inc/graph | |||
| ${CMAKE_BINARY_DIR} | |||
| ${CMAKE_BINARY_DIR}/proto/graphengine_protos | |||
| ${GE_CODE_DIR}/third_party/fwkacllib/inc | |||
| @@ -238,7 +238,7 @@ Status FileSaver::SaveToBuffWithFileHeader(const ModelFileHeader &file_header, | |||
| return SUCCESS; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status FileSaver::CheckPath(const std::string &file_path) { | |||
| Status FileSaver::CheckPath(const std::string &file_path) { | |||
| // Determine file path length | |||
| if (file_path.size() >= MMPA_MAX_PATH) { | |||
| GELOGE(FAILED, "[Check][FilePath]Failed, file path's length:%zu > mmpa_max_path:%d", | |||
| @@ -271,8 +271,8 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status FileSaver::CheckPath(con | |||
| return SUCCESS; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status | |||
| FileSaver::SaveToFile(const string &file_path, const ge::ModelData &model, const ModelFileHeader *model_file_header) { | |||
| Status FileSaver::SaveToFile(const string &file_path, const ge::ModelData &model, | |||
| const ModelFileHeader *model_file_header) { | |||
| if (file_path.empty() || model.model_data == nullptr || model.model_len == 0) { | |||
| GELOGE(FAILED, "[Save][File]Incorrect input param, " | |||
| "file_path is empty or model_data is nullptr or model_len is 0"); | |||
| @@ -301,19 +301,18 @@ FileSaver::SaveToFile(const string &file_path, const ge::ModelData &model, const | |||
| return SUCCESS; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status | |||
| FileSaver::SaveToFile(const string &file_path, ModelFileHeader &file_header, ModelPartitionTable &model_partition_table, | |||
| const std::vector<ModelPartition> &partition_datas) { | |||
| Status FileSaver::SaveToFile(const string &file_path, ModelFileHeader &file_header, | |||
| ModelPartitionTable &model_partition_table, | |||
| const std::vector<ModelPartition> &partition_datas) { | |||
| const Status ret = SaveWithFileHeader(file_path, file_header, model_partition_table, partition_datas); | |||
| GE_CHK_BOOL_RET_STATUS(ret == SUCCESS, FAILED, "save file failed, file_path:%s, file header len:%u.", | |||
| file_path.c_str(), file_header.length); | |||
| return SUCCESS; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status | |||
| FileSaver::SaveToFile(const string &file_path, ModelFileHeader &file_header, | |||
| vector<ModelPartitionTable *> &model_partition_tables, | |||
| const vector<vector<ModelPartition>> &all_partition_datas) { | |||
| Status FileSaver::SaveToFile(const string &file_path, ModelFileHeader &file_header, | |||
| vector<ModelPartitionTable *> &model_partition_tables, | |||
| const vector<vector<ModelPartition>> &all_partition_datas) { | |||
| const Status ret = SaveWithFileHeader(file_path, file_header, model_partition_tables, all_partition_datas); | |||
| GE_CHK_BOOL_RET_STATUS(ret == SUCCESS, FAILED, "save file failed, file_path:%s, file header len:%u.", | |||
| file_path.c_str(), file_header.length); | |||
| @@ -372,8 +371,7 @@ Status FileSaver::SaveWithFileHeader(const std::string &file_path, const ModelFi | |||
| return ret; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status FileSaver::SaveToFile(const string &file_path, const void *data, | |||
| int len) { | |||
| Status FileSaver::SaveToFile(const string &file_path, const void *data, int len) { | |||
| if (data == nullptr || len <= 0) { | |||
| GELOGE(FAILED, "[Check][Param]Failed, model_data is null or the " | |||
| "length[%d] is less than 1.", len); | |||
| @@ -20,8 +20,8 @@ | |||
| #include <algorithm> | |||
| #include <string> | |||
| #include "debug/ge_log.h" | |||
| #include "ge_error_codes.h" | |||
| #include "framework/common/debug/ge_log.h" | |||
| #include "external/ge/ge_error_codes.h" | |||
| namespace ge { | |||
| namespace { | |||
| @@ -14,7 +14,7 @@ | |||
| * limitations under the License. | |||
| */ | |||
| #include "graph/common/bcast.h" | |||
| #include "common/bcast.h" | |||
| #include <vector> | |||
| @@ -18,7 +18,7 @@ | |||
| using ge::OmgContext; | |||
| namespace domi { | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY OmgContext &GetContext() { | |||
| OmgContext &GetContext() { | |||
| static OmgContext context; | |||
| return context; | |||
| } | |||
| @@ -21,7 +21,7 @@ | |||
| namespace ge { | |||
| class FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY CustAICPUKernelStore : public KernelStore { | |||
| class CustAICPUKernelStore : public KernelStore { | |||
| public: | |||
| CustAICPUKernelStore(); | |||
| ~CustAICPUKernelStore() {} | |||
| @@ -30,13 +30,12 @@ const int kInvalidFd = (-1); | |||
| } // namespace | |||
| namespace ge { | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY MemoryDumper::MemoryDumper() : fd_(kInvalidFd) {} | |||
| MemoryDumper::MemoryDumper() : fd_(kInvalidFd) {} | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY MemoryDumper::~MemoryDumper() { Close(); } | |||
| MemoryDumper::~MemoryDumper() { Close(); } | |||
| // Dump the data to the file | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status MemoryDumper::DumpToFile(const char *filename, void *data, | |||
| int64_t len) { | |||
| Status MemoryDumper::DumpToFile(const char *filename, void *data, int64_t len) { | |||
| #ifdef FMK_SUPPORT_DUMP | |||
| GE_CHECK_NOTNULL(filename); | |||
| GE_CHECK_NOTNULL(data); | |||
| @@ -81,7 +80,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status MemoryDumper::DumpToFile | |||
| } | |||
| // Open file | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status MemoryDumper::Open(const char *filename) { | |||
| Status MemoryDumper::Open(const char *filename) { | |||
| GE_CHK_BOOL_RET_STATUS(filename != nullptr, FAILED, "Incorrect parameter. filename is nullptr"); | |||
| // Try to remove file first for reduce the close time by overwriting way | |||
| @@ -104,7 +103,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status MemoryDumper::Open(const | |||
| } | |||
| // Dump the data to file | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status MemoryDumper::Dump(void *data, uint32_t len) const { | |||
| Status MemoryDumper::Dump(void *data, uint32_t len) const { | |||
| GE_CHK_BOOL_RET_STATUS(data != nullptr, FAILED, "Incorrect parameter. data is nullptr"); | |||
| #ifdef FMK_SUPPORT_DUMP | |||
| @@ -15,6 +15,7 @@ | |||
| */ | |||
| #include "common/dump/dump_manager.h" | |||
| #include "framework/common/debug/ge_log.h" | |||
| #include "framework/common/debug/log.h" | |||
| @@ -26,14 +27,14 @@ const uint64_t kInferSessionId = 0; | |||
| const uint32_t kAllOverflow = 3; | |||
| } // namespace | |||
| namespace ge { | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY DumpManager &DumpManager::GetInstance() { | |||
| DumpManager &DumpManager::GetInstance() { | |||
| static DumpManager instance; | |||
| return instance; | |||
| } | |||
| bool DumpManager::NeedDoDump(const DumpConfig &dump_config, DumpProperties &dump_properties) { | |||
| if (dump_config.dump_status.empty() && dump_config.dump_debug.empty()) { | |||
| dump_properties_map_.emplace(kInferSessionId, dump_properties); | |||
| dump_properties_map_[kInferSessionId] = dump_properties; | |||
| GELOGI("Dump does not open"); | |||
| return false; | |||
| } | |||
| @@ -41,7 +42,7 @@ bool DumpManager::NeedDoDump(const DumpConfig &dump_config, DumpProperties &dump | |||
| if ((dump_config.dump_status == kDumpoff || dump_config.dump_status == kDumpOFF) && | |||
| dump_config.dump_debug == kDumpoff) { | |||
| dump_properties.ClearDumpPropertyValue(); | |||
| dump_properties_map_.emplace(kInferSessionId, dump_properties); | |||
| dump_properties_map_[kInferSessionId] = dump_properties; | |||
| return false; | |||
| } | |||
| if (dump_config.dump_status == kDumpOn && dump_config.dump_debug == kDumpOn) { | |||
| @@ -74,7 +75,7 @@ void DumpManager::SetDumpList(const DumpConfig &dump_config, DumpProperties &dum | |||
| Status DumpManager::SetNormalDumpConf(const DumpConfig &dump_config, DumpProperties &dump_properties) { | |||
| if (dump_config.dump_status == kDumpOn) { | |||
| GELOGI("Only do normal dump process, dump status is %s.", dump_config.dump_status.c_str()); | |||
| GELOGI("Only do normal dump process, dump status is %s", dump_config.dump_status.c_str()); | |||
| dump_properties.SetDumpStatus(dump_config.dump_status); | |||
| std::string dump_op_switch = dump_config.dump_op_switch; | |||
| dump_properties.SetDumpOpSwitch(dump_op_switch); | |||
| @@ -104,8 +105,8 @@ Status DumpManager::SetNormalDumpConf(const DumpConfig &dump_config, DumpPropert | |||
| Status DumpManager::SetDumpPath(const DumpConfig &dump_config, DumpProperties &dump_properties) { | |||
| std::string dump_path = dump_config.dump_path; | |||
| if (dump_path.empty()) { | |||
| GELOGE(PARAM_INVALID, "[Check][DumpPath]It is empty"); | |||
| REPORT_INNER_ERROR("E19999", "Dump path check is empty"); | |||
| GELOGE(PARAM_INVALID, "[Check][DumpPath]It is empty."); | |||
| REPORT_INNER_ERROR("E19999", "Dump path check is empty."); | |||
| return PARAM_INVALID; | |||
| } | |||
| if (dump_path[dump_path.size() - 1] != '/') { | |||
| @@ -117,7 +118,7 @@ Status DumpManager::SetDumpPath(const DumpConfig &dump_config, DumpProperties &d | |||
| return SUCCESS; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status DumpManager::SetDumpConf(const DumpConfig &dump_config) { | |||
| Status DumpManager::SetDumpConf(const DumpConfig &dump_config) { | |||
| DumpProperties dump_properties; | |||
| if (!NeedDoDump(dump_config, dump_properties)) { | |||
| GELOGD("No need do dump process."); | |||
| @@ -131,8 +132,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status DumpManager::SetDumpConf | |||
| return SUCCESS; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY const DumpProperties &DumpManager::GetDumpProperties( | |||
| uint64_t session_id) { | |||
| const DumpProperties &DumpManager::GetDumpProperties(uint64_t session_id) { | |||
| std::lock_guard<std::mutex> lock(mutex_); | |||
| auto iter = dump_properties_map_.find(session_id); | |||
| if (iter != dump_properties_map_.end()) { | |||
| @@ -142,13 +142,12 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY const DumpProperties &DumpManag | |||
| return default_properties; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void DumpManager::AddDumpProperties( | |||
| uint64_t session_id, const DumpProperties &dump_properties) { | |||
| void DumpManager::AddDumpProperties(uint64_t session_id, const DumpProperties &dump_properties) { | |||
| std::lock_guard<std::mutex> lock(mutex_); | |||
| dump_properties_map_.emplace(session_id, dump_properties); | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void DumpManager::RemoveDumpProperties(uint64_t session_id) { | |||
| void DumpManager::RemoveDumpProperties(uint64_t session_id) { | |||
| std::lock_guard<std::mutex> lock(mutex_); | |||
| auto iter = dump_properties_map_.find(session_id); | |||
| if (iter != dump_properties_map_.end()) { | |||
| @@ -20,7 +20,7 @@ | |||
| #include <mutex> | |||
| #include "common/dump/dump_properties.h" | |||
| #include "common/ge_types.h" | |||
| #include "framework/common/ge_types.h" | |||
| namespace ge { | |||
| class DumpManager { | |||
| @@ -19,7 +19,7 @@ | |||
| #include <string> | |||
| #include "common/ge_inner_error_codes.h" | |||
| #include "framework/common/ge_inner_error_codes.h" | |||
| #include "common/properties_manager.h" | |||
| #include "proto/op_mapping.pb.h" | |||
| #include "runtime/stream.h" | |||
| @@ -18,9 +18,10 @@ | |||
| #include <cstdio> | |||
| #include <string> | |||
| #include <regex> | |||
| #include "common/ge/ge_util.h" | |||
| #include "common/util.h" | |||
| #include "framework/common/util.h" | |||
| #include "framework/common/debug/ge_log.h" | |||
| #include "framework/common/debug/log.h" | |||
| #include "framework/common/ge_types.h" | |||
| @@ -37,17 +38,186 @@ const uint32_t kAtomicOverflow = (0x1 << 1); | |||
| const uint32_t kAllOverflow = (kAicoreOverflow | kAtomicOverflow); | |||
| } // namespace | |||
| namespace ge { | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY DumpProperties::DumpProperties(const DumpProperties &other) { | |||
| void DumpProperties::Split(const std::string &s, std::vector<std::string> &result, const char *delchar) { | |||
| if (s.empty()) { | |||
| return; | |||
| } | |||
| result.clear(); | |||
| char *buffer = new (std::nothrow)char[s.size() + 1]; | |||
| if (buffer == nullptr) { | |||
| GELOGE(FAILED, "[Split][string] failed while malloc memory, string value is:%s", s.c_str()); | |||
| REPORT_CALL_ERROR("E19999", "Memory malloc may fail when split string, get fatal exception, " | |||
| "string value is:%s", s.c_str()); | |||
| return; | |||
| } | |||
| buffer[s.size()] = '\0'; | |||
| errno_t e = strcpy_s(buffer, s.size() + 1, s.c_str()); | |||
| if (e != EOK) { | |||
| delete[] buffer; | |||
| return; | |||
| } | |||
| char *context = nullptr; | |||
| char *p = strtok_s(buffer, delchar, &context); | |||
| while (p != nullptr) { | |||
| result.emplace_back(p); | |||
| p = strtok_s(nullptr, delchar, &context); | |||
| } | |||
| delete[] buffer; | |||
| } | |||
| Status DumpProperties::CheckDumpStep(const std::string &dump_step) { | |||
| std::string modified_dum_step = dump_step + "|"; | |||
| std::smatch result; | |||
| std::vector<string> match_vecs; | |||
| std::regex pattern(R"((\d{1,}-\d{1,}\||\d{1,}\|)+)"); | |||
| if (regex_match(modified_dum_step, result, pattern)) { | |||
| Split(result.str(), match_vecs, "|"); | |||
| if (match_vecs.empty()) { | |||
| REPORT_CALL_ERROR("E19999", "Split may get fatal exception, dump_step:%s.", dump_step.c_str()); | |||
| GELOGE(FAILED, "[Check][Param] failed. Split may get fatal exception, ge.exec.dumpStep:%s.", dump_step.c_str()); | |||
| return FAILED; | |||
| } | |||
| // 100 is the max sets of dump steps. | |||
| if (match_vecs.size() > 100) { | |||
| REPORT_INPUT_ERROR("E10001", std::vector<std::string>({"parameter", "value", "reason"}), | |||
| std::vector<std::string>({ | |||
| "ge.exec.dumpStep", | |||
| dump_step.c_str(), | |||
| " is not supported, only support dump <= 100 sets of data"})); | |||
| GELOGE(PARAM_INVALID, "[Check][Param] get dump_step value:%s, " | |||
| "dump_step only support dump <= 100 sets of data.", dump_step.c_str()); | |||
| return PARAM_INVALID; | |||
| } | |||
| for (const auto &match_vec : match_vecs) { | |||
| std::vector<string> vec_after_split; | |||
| Split(match_vec, vec_after_split, "-"); | |||
| if (match_vecs.empty()) { | |||
| REPORT_CALL_ERROR("E19999", "Split may get fatal exception."); | |||
| GELOGE(FAILED, "[Check][Param] failed, split may get fatal exception."); | |||
| return FAILED; | |||
| } | |||
| if (vec_after_split.size() > 1) { | |||
| if (std::atoi(vec_after_split[0].c_str()) >= std::atoi(vec_after_split[1].c_str())) { | |||
| REPORT_INPUT_ERROR("E10001", std::vector<std::string>({"parameter", "value", "reason"}), | |||
| std::vector<std::string>({ | |||
| "ge.exec.dumpStep", | |||
| dump_step.c_str(), | |||
| " is not supported." | |||
| "in range steps, the first step is >= second step, correct example:'0|5|10-20"})); | |||
| GELOGE(PARAM_INVALID, "[Check][Param] get dump_step value:%s, " | |||
| "in range steps, the first step is >= second step, correct example:'0|5|10-20'", dump_step.c_str()); | |||
| return PARAM_INVALID; | |||
| } | |||
| } | |||
| } | |||
| } else { | |||
| REPORT_INPUT_ERROR("E10001", std::vector<std::string>({"parameter", "value", "reason"}), | |||
| std::vector<std::string>({ | |||
| "ge.exec.dumpStep", | |||
| dump_step.c_str(), | |||
| " is not supported, correct example:'0|5|10|50-100."})); | |||
| GELOGE(PARAM_INVALID, "[Check][Param] get dump_step value:%s, " | |||
| "dump_step string style is error, correct example:'0|5|10|50-100.'", dump_step.c_str()); | |||
| return PARAM_INVALID; | |||
| } | |||
| return SUCCESS; | |||
| } | |||
| Status DumpProperties::CheckDumpMode(const std::string &dump_mode) { | |||
| const std::set<string> dump_mode_list = {"input", "output", "all"}; | |||
| std::set<string>::iterator iter; | |||
| if ((iter = dump_mode_list.find(dump_mode)) == dump_mode_list.end()) { | |||
| REPORT_INPUT_ERROR("E10001", std::vector<std::string>({"parameter", "value", "reason"}), | |||
| std::vector<std::string>({ | |||
| "ge.exec.dumpMode", | |||
| dump_mode.c_str(), | |||
| " is not supported, should be one of the following:[input, output, all]"})); | |||
| GELOGE(PARAM_INVALID, "[Check][Param] the dump_debug_mode:%s, is is not supported," | |||
| "should be one of the following:[input, output, all].", dump_mode.c_str()); | |||
| return PARAM_INVALID; | |||
| } | |||
| return SUCCESS; | |||
| } | |||
| Status DumpProperties::CheckDumpPath(const std::string &input) { | |||
| if (mmIsDir(input.c_str()) != EN_OK) { | |||
| REPORT_INPUT_ERROR("E10001", std::vector<std::string>({"parameter", "value", "reason"}), | |||
| std::vector<std::string>({ | |||
| "ge.exec.dumpPath", | |||
| input.c_str(), | |||
| " is not a directory."})); | |||
| GELOGE(PARAM_INVALID, "[Check][Param] the path:%s, is not directory.", input.c_str()); | |||
| return PARAM_INVALID; | |||
| } | |||
| char trusted_path[MMPA_MAX_PATH] = { "\0" }; | |||
| if (mmRealPath(input.c_str(), trusted_path, MMPA_MAX_PATH) != EN_OK) { | |||
| REPORT_INPUT_ERROR("E10001", std::vector<std::string>({"parameter", "value", "reason"}), | |||
| std::vector<std::string>({ | |||
| "ge.exec.dumpPath", | |||
| input.c_str(), | |||
| " dumpPath invalid."})); | |||
| GELOGE(PARAM_INVALID, "[Check][Param] the dumpPath:%s, is invalid.", input.c_str()); | |||
| return PARAM_INVALID; | |||
| } | |||
| if (mmAccess2(trusted_path, M_R_OK | M_W_OK) != EN_OK) { | |||
| REPORT_INPUT_ERROR("E10001", std::vector<std::string>({"parameter", "value", "reason"}), | |||
| std::vector<std::string>({ | |||
| "ge.exec.dumpPath", | |||
| input.c_str(), | |||
| " does't have read, write permissions."})); | |||
| GELOGE(PARAM_INVALID, "[Check][Param] the path:%s, does't have read, write permissions.", input.c_str()); | |||
| return PARAM_INVALID; | |||
| } | |||
| return SUCCESS; | |||
| } | |||
| Status DumpProperties::CheckEnableDump(const std::string &input) { | |||
| std::set<string> enable_dump_option_list = {"1", "0"}; | |||
| auto it = enable_dump_option_list.find(input); | |||
| if (it == enable_dump_option_list.end()) { | |||
| REPORT_INPUT_ERROR("E10001", std::vector<std::string>({"parameter", "value", "reason"}), | |||
| std::vector<std::string>({ | |||
| "ge.exec.enableDump", | |||
| input.c_str(), | |||
| " only support 1 or 0."})); | |||
| GELOGE(PARAM_INVALID, "[Check][Param] Not support ge.exec.enableDump or ge.exec.enableDumpDebug format:%s, " | |||
| "only support 1 or 0.", input.c_str()); | |||
| return PARAM_INVALID; | |||
| } | |||
| return SUCCESS; | |||
| } | |||
| DumpProperties::DumpProperties(const DumpProperties &other) { | |||
| CopyFrom(other); | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY DumpProperties &DumpProperties::operator=( | |||
| const DumpProperties &other) { | |||
| DumpProperties &DumpProperties::operator=(const DumpProperties &other) { | |||
| CopyFrom(other); | |||
| return *this; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void DumpProperties::InitByOptions() { | |||
| Status DumpProperties::SetDumpOptions() { | |||
| if (enable_dump_ == kEnableFlag) { | |||
| std::string dump_step; | |||
| if (GetContext().GetOption(OPTION_EXEC_DUMP_STEP, dump_step) == GRAPH_SUCCESS && !dump_step.empty()) { | |||
| GE_CHK_STATUS_RET(CheckDumpStep(dump_step), "[Check][dump_step] failed."); | |||
| GELOGI("Get dump step %s successfully", dump_step.c_str()); | |||
| SetDumpStep(dump_step); | |||
| } | |||
| string dump_mode = "output"; | |||
| if (GetContext().GetOption(OPTION_EXEC_DUMP_MODE, dump_mode) == GRAPH_SUCCESS) { | |||
| GELOGI("Get dump mode %s successfully", dump_mode.c_str()); | |||
| GE_CHK_STATUS_RET(CheckDumpMode(dump_mode), "[Check][dump_mode] failed."); | |||
| SetDumpMode(dump_mode); | |||
| } | |||
| AddPropertyValue(DUMP_ALL_MODEL, {}); | |||
| } | |||
| return SUCCESS; | |||
| } | |||
| Status DumpProperties::InitByOptions() { | |||
| enable_dump_.clear(); | |||
| enable_dump_debug_.clear(); | |||
| dump_path_.clear(); | |||
| @@ -57,17 +227,32 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void DumpProperties::InitByOpti | |||
| is_infer_op_debug_ = false; | |||
| op_debug_mode_ = 0; | |||
| std::string enable_dump; | |||
| std::string enable_dump = std::to_string(false); | |||
| (void)GetContext().GetOption(OPTION_EXEC_ENABLE_DUMP, enable_dump); | |||
| enable_dump_ = enable_dump; | |||
| if (!enable_dump_.empty()) { | |||
| GE_CHK_STATUS_RET(CheckEnableDump(enable_dump_), "[Check][enable_dump] failed."); | |||
| } | |||
| std::string enable_dump_debug; | |||
| std::string enable_dump_debug = std::to_string(false); | |||
| (void)GetContext().GetOption(OPTION_EXEC_ENABLE_DUMP_DEBUG, enable_dump_debug); | |||
| enable_dump_debug_ = enable_dump_debug; | |||
| if (!enable_dump_debug_.empty()) { | |||
| GE_CHK_STATUS_RET(CheckEnableDump(enable_dump_debug_), "[Check][enable_dump_debug] failed."); | |||
| } | |||
| if ((enable_dump_ == kEnableFlag) && (enable_dump_debug_ == kEnableFlag)) { | |||
| REPORT_INPUT_ERROR("E10001", std::vector<std::string>({"parameter", "value", "reason"}), | |||
| std::vector<std::string>({ | |||
| "ge.exec.enableDump and ge.exec.enableDumpDebug", | |||
| enable_dump_ + ", " + enable_dump_debug, | |||
| "ge.exec.enableDump and ge.exec.enableDumpDebug cannot be set to 1 at the same time."})); | |||
| GELOGE(FAILED, "ge.exec.enableDump and ge.exec.enableDumpDebug cannot be both set to 1 at the same time."); | |||
| return FAILED; | |||
| } | |||
| if ((enable_dump_ == kEnableFlag) || (enable_dump_debug_ == kEnableFlag)) { | |||
| std::string dump_path; | |||
| if (GetContext().GetOption(OPTION_EXEC_DUMP_PATH, dump_path) == GRAPH_SUCCESS) { | |||
| GE_CHK_STATUS_RET(CheckDumpPath(dump_path), "Check dump path failed."); | |||
| if (!dump_path.empty() && dump_path[dump_path.size() - 1] != '/') { | |||
| dump_path = dump_path + "/"; | |||
| } | |||
| @@ -75,30 +260,25 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void DumpProperties::InitByOpti | |||
| GELOGI("Get dump path %s successfully", dump_path.c_str()); | |||
| SetDumpPath(dump_path); | |||
| } else { | |||
| GELOGW("Dump path is not set"); | |||
| REPORT_INPUT_ERROR("E10001", std::vector<std::string>({"parameter", "value", "reason"}), | |||
| std::vector<std::string>({ | |||
| "ge.exec.dumpPath", | |||
| dump_path, | |||
| "ge.exec.dumpPath is not set."})); | |||
| GELOGE(FAILED, "[Check][dump_path] failed. Dump path is not set."); | |||
| return FAILED; | |||
| } | |||
| } | |||
| if (enable_dump_ == kEnableFlag) { | |||
| std::string dump_step; | |||
| if (GetContext().GetOption(OPTION_EXEC_DUMP_STEP, dump_step) == GRAPH_SUCCESS) { | |||
| GELOGI("Get dump step %s successfully", dump_step.c_str()); | |||
| SetDumpStep(dump_step); | |||
| } | |||
| string dump_mode; | |||
| if (GetContext().GetOption(OPTION_EXEC_DUMP_MODE, dump_mode) == GRAPH_SUCCESS) { | |||
| GELOGI("Get dump mode %s successfully", dump_mode.c_str()); | |||
| SetDumpMode(dump_mode); | |||
| } | |||
| AddPropertyValue(DUMP_ALL_MODEL, {}); | |||
| } | |||
| GE_CHK_STATUS_RET(SetDumpOptions(), "SetDumpOptions failed."); | |||
| GE_CHK_STATUS_RET(SetDumpDebugOptions(), "SetDumpDebugOptions failed."); | |||
| SetDumpDebugOptions(); | |||
| return SUCCESS; | |||
| } | |||
| // The following is the new dump scenario of the fusion operator | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void DumpProperties::AddPropertyValue( | |||
| const std::string &model, const std::set<std::string> &layers) { | |||
| void DumpProperties::AddPropertyValue(const std::string &model, const std::set<std::string> &layers) { | |||
| for (const std::string &layer : layers) { | |||
| GELOGI("This model %s config to dump layer %s", model.c_str(), layer.c_str()); | |||
| } | |||
| @@ -106,18 +286,18 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void DumpProperties::AddPropert | |||
| model_dump_properties_map_[model] = layers; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void DumpProperties::DeletePropertyValue(const std::string &model) { | |||
| void DumpProperties::DeletePropertyValue(const std::string &model) { | |||
| auto iter = model_dump_properties_map_.find(model); | |||
| if (iter != model_dump_properties_map_.end()) { | |||
| model_dump_properties_map_.erase(iter); | |||
| } | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void DumpProperties::ClearDumpPropertyValue() { | |||
| void DumpProperties::ClearDumpPropertyValue() { | |||
| model_dump_properties_map_.clear(); | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void DumpProperties::ClearDumpInfo() { | |||
| void DumpProperties::ClearDumpInfo() { | |||
| enable_dump_.clear(); | |||
| enable_dump_debug_.clear(); | |||
| dump_path_.clear(); | |||
| @@ -130,7 +310,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void DumpProperties::ClearDumpI | |||
| op_debug_mode_ = 0; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY std::set<std::string> DumpProperties::GetAllDumpModel() const { | |||
| std::set<std::string> DumpProperties::GetAllDumpModel() const { | |||
| std::set<std::string> model_list; | |||
| for (auto &iter : model_dump_properties_map_) { | |||
| model_list.insert(iter.first); | |||
| @@ -139,8 +319,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY std::set<std::string> DumpPrope | |||
| return model_list; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY std::set<std::string> DumpProperties::GetPropertyValue( | |||
| const std::string &model) const { | |||
| std::set<std::string> DumpProperties::GetPropertyValue(const std::string &model) const { | |||
| auto iter = model_dump_properties_map_.find(model); | |||
| if (iter != model_dump_properties_map_.end()) { | |||
| return iter->second; | |||
| @@ -148,8 +327,8 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY std::set<std::string> DumpPrope | |||
| return {}; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY bool DumpProperties::IsLayerNeedDump( | |||
| const std::string &model, const std::string &om_name, const std::string &op_name) const { | |||
| bool DumpProperties::IsLayerNeedDump(const std::string &model, const std::string &om_name, | |||
| const std::string &op_name) const { | |||
| // if dump all | |||
| GELOGD("model name is %s om name is %s op is %s in layer need dump", model.c_str(), om_name.c_str(), op_name.c_str()); | |||
| if (model_dump_properties_map_.find(DUMP_ALL_MODEL) != model_dump_properties_map_.end()) { | |||
| @@ -169,67 +348,66 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY bool DumpProperties::IsLayerNee | |||
| return model_iter->second.find(op_name) != model_iter->second.end(); | |||
| } | |||
| GELOGD("Model %s is not seated to be dump.", model.c_str()); | |||
| GELOGD("Model %s is not seated to be dump", model.c_str()); | |||
| return false; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void DumpProperties::SetDumpPath(const std::string &path) { | |||
| void DumpProperties::SetDumpPath(const std::string &path) { | |||
| dump_path_ = path; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY const std::string &DumpProperties::GetDumpPath() const { | |||
| const std::string &DumpProperties::GetDumpPath() const { | |||
| return dump_path_; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void DumpProperties::SetDumpStep(const std::string &step) { | |||
| void DumpProperties::SetDumpStep(const std::string &step) { | |||
| dump_step_ = step; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY const std::string &DumpProperties::GetDumpStep() const { | |||
| const std::string &DumpProperties::GetDumpStep() const { | |||
| return dump_step_; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void DumpProperties::SetDumpMode(const std::string &mode) { | |||
| void DumpProperties::SetDumpMode(const std::string &mode) { | |||
| dump_mode_ = mode; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY const std::string &DumpProperties::GetDumpMode() const { | |||
| const std::string &DumpProperties::GetDumpMode() const { | |||
| return dump_mode_; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void DumpProperties::SetDumpStatus(const std::string &status) { | |||
| void DumpProperties::SetDumpStatus(const std::string &status) { | |||
| dump_status_ = status; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY const std::string &DumpProperties::GetDumpStatus() const { | |||
| const std::string &DumpProperties::GetDumpStatus() const { | |||
| return dump_status_; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void DumpProperties::InitInferOpDebug() { | |||
| void DumpProperties::InitInferOpDebug() { | |||
| is_infer_op_debug_ = true; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void DumpProperties::SetOpDebugMode(const uint32_t &op_debug_mode) { | |||
| void DumpProperties::SetOpDebugMode(const uint32_t &op_debug_mode) { | |||
| op_debug_mode_ = op_debug_mode; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void DumpProperties::SetDumpOpSwitch( | |||
| const std::string &dump_op_switch) { | |||
| void DumpProperties::SetDumpOpSwitch(const std::string &dump_op_switch) { | |||
| dump_op_switch_ = dump_op_switch; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY const std::string &DumpProperties::GetDumpOpSwitch() const { | |||
| const std::string &DumpProperties::GetDumpOpSwitch() const { | |||
| return dump_op_switch_; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY bool DumpProperties::IsSingleOpNeedDump() const { | |||
| bool DumpProperties::IsSingleOpNeedDump() const { | |||
| if (dump_op_switch_ == kDumpStatusOpen) { | |||
| return true; | |||
| } | |||
| return false; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY bool DumpProperties::IsDumpOpen() const { | |||
| bool DumpProperties::IsDumpOpen() const { | |||
| if (enable_dump_ == kEnableFlag || dump_status_ == kDumpStatusOpen) { | |||
| return true; | |||
| } | |||
| @@ -253,14 +431,14 @@ void DumpProperties::CopyFrom(const DumpProperties &other) { | |||
| } | |||
| } | |||
| void DumpProperties::SetDumpDebugOptions() { | |||
| Status DumpProperties::SetDumpDebugOptions() { | |||
| if (enable_dump_debug_ == kEnableFlag) { | |||
| std::string dump_debug_mode; | |||
| if (GetContext().GetOption(OPTION_EXEC_DUMP_DEBUG_MODE, dump_debug_mode) == GRAPH_SUCCESS) { | |||
| GELOGD("Get dump debug mode %s successfully", dump_debug_mode.c_str()); | |||
| GELOGD("Get ge.exec.dumpDebugMode %s successfully.", dump_debug_mode.c_str()); | |||
| } else { | |||
| GELOGW("Dump debug mode is not set."); | |||
| return; | |||
| GELOGW("ge.exec.dumpDebugMode is not set."); | |||
| return SUCCESS; | |||
| } | |||
| if (dump_debug_mode == OP_DEBUG_AICORE) { | |||
| @@ -276,10 +454,17 @@ void DumpProperties::SetDumpDebugOptions() { | |||
| is_train_op_debug_ = true; | |||
| op_debug_mode_ = kAllOverflow; | |||
| } else { | |||
| GELOGW("ge.exec.dumpDebugMode is invalid."); | |||
| REPORT_INPUT_ERROR("E10001", std::vector<std::string>({"parameter", "value", "reason"}), | |||
| std::vector<std::string>({ | |||
| "ge.exec.dumpDebugMode", | |||
| dump_debug_mode, | |||
| "ge.exec.dumpDebugMode is invalid."})); | |||
| GELOGE(PARAM_INVALID, "[Set][DumpDebugOptions] failed, ge.exec.dumpDebugMode is invalid."); | |||
| return PARAM_INVALID; | |||
| } | |||
| } else { | |||
| GELOGI("ge.exec.enableDumpDebug is false or is not set."); | |||
| GELOGI("ge.exec.enableDumpDebug is false or is not set"); | |||
| } | |||
| return SUCCESS; | |||
| } | |||
| } // namespace ge | |||
| @@ -23,6 +23,7 @@ | |||
| #include <vector> | |||
| namespace ge { | |||
| using Status = uint32_t; | |||
| class DumpProperties { | |||
| public: | |||
| DumpProperties() = default; | |||
| @@ -33,7 +34,7 @@ class DumpProperties { | |||
| DumpProperties &operator=(const DumpProperties &dump); | |||
| void InitByOptions(); | |||
| Status InitByOptions(); | |||
| void AddPropertyValue(const std::string &model, const std::set<std::string> &layers); | |||
| @@ -95,7 +96,20 @@ class DumpProperties { | |||
| private: | |||
| void CopyFrom(const DumpProperties &other); | |||
| void SetDumpDebugOptions(); | |||
| Status SetDumpDebugOptions(); | |||
| Status SetDumpOptions(); | |||
| void Split(const std::string &s, std::vector<std::string> &result, const char *delchar); | |||
| Status CheckDumpStep(const std::string &dump_step); | |||
| Status CheckDumpMode(const std::string &dump_mode); | |||
| Status CheckDumpPath(const std::string &input); | |||
| Status CheckEnableDump(const std::string &input); | |||
| std::string enable_dump_; | |||
| std::string enable_dump_debug_; | |||
| @@ -161,6 +161,7 @@ Status ExceptionDumper::DumpExceptionInfo(const std::vector<rtExceptionInfo> &ex | |||
| uint64_t proto_size = dump_data.ByteSizeLong(); | |||
| std::unique_ptr<char[]> proto_msg(new (std::nothrow) char[proto_size]); | |||
| GE_CHECK_NOTNULL(proto_msg); | |||
| bool ret = dump_data.SerializeToArray(proto_msg.get(), proto_size); | |||
| if (!ret || proto_size == 0) { | |||
| REPORT_INNER_ERROR("E19999", "Serialize proto to string fail"); | |||
| @@ -14,7 +14,7 @@ | |||
| * limitations under the License. | |||
| */ | |||
| #include "opdebug_register.h" | |||
| #include "common/dump/opdebug_register.h" | |||
| namespace { | |||
| const size_t kOpDebugMemorySize = 2048UL; | |||
| @@ -18,8 +18,8 @@ | |||
| #define GE_COMMON_DUMP_OPDEBUG_REGISTER_H_ | |||
| #include <map> | |||
| #include "common/debug/ge_log.h" | |||
| #include "common/debug/log.h" | |||
| #include "framework/common/debug/ge_log.h" | |||
| #include "framework/common/debug/log.h" | |||
| #include "graph/load/model_manager/data_dumper.h" | |||
| namespace ge { | |||
| @@ -0,0 +1,89 @@ | |||
| /** | |||
| * Copyright 2021 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #ifndef GE_COMMON_EXECUTOR_H | |||
| #define GE_COMMON_EXECUTOR_H | |||
| #include "external/ge/ge_api_types.h" | |||
| #include "graph/ge_local_context.h" | |||
| #include "graph/manager/graph_manager_utils.h" | |||
| namespace ge { | |||
| struct RunArgs { | |||
| GraphNodePtr graph_node; | |||
| GraphId graph_id; | |||
| uint64_t session_id; | |||
| struct error_message::Context error_context; | |||
| std::vector<ge::Tensor> input_tensor; | |||
| GeRootModelPtr ge_root_model; | |||
| GEThreadLocalContext context; | |||
| RunAsyncCallback callback; | |||
| }; | |||
| class Executor { | |||
| public: | |||
| /// | |||
| /// @ingroup ge | |||
| /// @brief Load mode from graph. | |||
| /// @param [in] GeRootModel: root model of graph compiled. | |||
| /// @param [in] GraphNode: node of graph. | |||
| /// @return Status result of function | |||
| /// | |||
| virtual Status LoadGraph(const GeRootModelPtr &ge_root_model, const GraphNodePtr &graph_node) = 0; | |||
| /// | |||
| /// @ingroup ge | |||
| /// @brief Unload mode. | |||
| /// @param [in] GeRootModel: root model of graph compiled. | |||
| /// @param [in] graph_id: graph identifier. | |||
| /// @return Status result of function | |||
| /// | |||
| virtual Status UnloadGraph(const GeRootModelPtr &ge_root_model, uint32_t graph_id) = 0; | |||
| /// | |||
| /// @ingroup ge | |||
| /// @brief Push model execution params to queue. | |||
| /// @param [in] RunArgs of for model execution. | |||
| /// @return Status result of function | |||
| /// | |||
| virtual Status PushGraph(const RunArgs &args) = 0; | |||
| /// | |||
| /// @ingroup ge | |||
| /// @brief Run graph for synchronize model. | |||
| /// @param [in] graph_node: node of graph. | |||
| /// @param [in] graph_id: graph identifier. | |||
| /// @param [in] inputs: input data for the graph running. | |||
| /// @param [out] outputs: output data of the graph running | |||
| /// @return Status result of function | |||
| /// | |||
| virtual Status RunGraph(const GraphNodePtr &graph_node, GraphId graph_id, | |||
| const std::vector<GeTensor> &inputs, std::vector<GeTensor> &outputs) = 0; | |||
| /// | |||
| /// @ingroup ge | |||
| /// @brief Run graph for NN synchronize model. | |||
| /// @param [in] graph_node: node of graph. | |||
| /// @param [in] graph_id: graph identifier. | |||
| /// @param [in] stream: Stream for model running. | |||
| /// @param [in] inputs: input data for the graph running. | |||
| /// @param [out] outputs: output data of the graph running | |||
| /// @return Status result of function | |||
| /// | |||
| virtual Status RunGraphWithStream(const GraphNodePtr &graph_node, GraphId graph_id, rtStream_t stream, | |||
| const std::vector<GeTensor> &inputs, std::vector<GeTensor> &outputs) = 0; | |||
| }; | |||
| } | |||
| #endif // GE_COMMON_EXECUTOR_H | |||
| @@ -17,19 +17,18 @@ | |||
| #include "framework/common/fmk_error_codes.h" | |||
| namespace domi { | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY StatusFactory *StatusFactory::Instance() { | |||
| StatusFactory *StatusFactory::Instance() { | |||
| static StatusFactory instance; | |||
| return &instance; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void StatusFactory::RegisterErrorNo(uint32_t err, | |||
| const std::string &desc) { | |||
| void StatusFactory::RegisterErrorNo(uint32_t err, const std::string &desc) { | |||
| if (err_desc_.find(err) != err_desc_.end()) { | |||
| return; | |||
| } | |||
| err_desc_[err] = desc; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY std::string StatusFactory::GetErrDesc(uint32_t err) { | |||
| std::string StatusFactory::GetErrDesc(uint32_t err) { | |||
| auto iter_find = err_desc_.find(err); | |||
| if (iter_find == err_desc_.end()) { | |||
| return ""; | |||
| @@ -123,6 +123,7 @@ Status GetDstDataAfterTrans(const TransArgs &args, TransResult &result, int size | |||
| auto protected_size = total_size - dst_offset < static_cast<int64_t>(SECUREC_MEM_MAX_LEN) | |||
| ? total_size - dst_offset | |||
| : static_cast<int64_t>(SECUREC_MEM_MAX_LEN); | |||
| GE_CHECK_GE(protected_size, 0); | |||
| auto ret = memcpy_s(dst.get() + dst_offset, static_cast<size_t>(protected_size), args.data + src_offset, | |||
| static_cast<size_t>(size)); | |||
| if (ret != EOK) { | |||
| @@ -59,7 +59,7 @@ bool CheckShape(Format format, const ShapeVector &shape) { | |||
| return CheckShapeValid(shape, kDimSize4D); | |||
| default: | |||
| std::string error = "Trans format between " + FmtToStr(TypeUtils::FormatToSerialString(format)) + | |||
| " and FORMAT_FRACTAL_NZ is not supported."; | |||
| " and FORMAT_FRACTAL_NZ is not supported."; | |||
| GE_ERRORLOG_AND_ERRORMSG(ACL_ERROR_GE_FORMAT_INVALID, error.c_str()); | |||
| return false; | |||
| } | |||
| @@ -185,6 +185,7 @@ Status TransFormatFromNdToFracNz(const TransArgs &args, TransResult &result, con | |||
| auto src_offset = (src_h_head + w1_idx * w0) * size; | |||
| auto protected_size = dst_size - dst_offset < static_cast<int64_t>(SECUREC_MEM_MAX_LEN) ? | |||
| dst_size - dst_offset : static_cast<int64_t>(SECUREC_MEM_MAX_LEN); | |||
| GE_CHECK_GE(protected_size, 0); | |||
| auto ret = memcpy_s(dst.get() + dst_offset, static_cast<size_t>(protected_size), args.data + src_offset, | |||
| static_cast<size_t>(size * w0)); | |||
| if (ret != EOK) { | |||
| @@ -202,6 +203,7 @@ Status TransFormatFromNdToFracNz(const TransArgs &args, TransResult &result, con | |||
| auto src_offset = (src_h_head + src_w_idx) * size; | |||
| auto protected_size = dst_size - dst_offset < static_cast<int64_t>(SECUREC_MEM_MAX_LEN) ? | |||
| dst_size - dst_offset : static_cast<int64_t>(SECUREC_MEM_MAX_LEN); | |||
| GE_CHECK_GE(protected_size, 0); | |||
| auto ret = memcpy_s(dst.get() + dst_offset, static_cast<size_t>(protected_size), args.data + src_offset, | |||
| static_cast<size_t>(size)); | |||
| if (ret != EOK) { | |||
| @@ -267,6 +269,7 @@ Status TransFormatFromFracNzToNd(const TransArgs &args, TransResult &result, con | |||
| auto dst_offset = (dst_h_head + w1_idx * w0) * size; | |||
| auto protected_size = dst_size - dst_offset < static_cast<int64_t>(SECUREC_MEM_MAX_LEN) ? | |||
| dst_size - dst_offset : static_cast<int64_t>(SECUREC_MEM_MAX_LEN); | |||
| GE_CHECK_GE(protected_size, 0); | |||
| ret = memcpy_s(dst.get() + dst_offset, static_cast<size_t>(protected_size), args.data + src_offset, | |||
| static_cast<size_t>(size * w0)); | |||
| if (ret != EOK) { | |||
| @@ -285,6 +288,7 @@ Status TransFormatFromFracNzToNd(const TransArgs &args, TransResult &result, con | |||
| auto dst_offset = (dst_h_head + dst_w_idx) * size; | |||
| auto protected_size = dst_size - dst_offset < static_cast<int64_t>(SECUREC_MEM_MAX_LEN) ? | |||
| dst_size - dst_offset : static_cast<int64_t>(SECUREC_MEM_MAX_LEN); | |||
| GE_CHECK_GE(protected_size, 0); | |||
| ret = memcpy_s(dst.get() + dst_offset, static_cast<size_t>(protected_size), args.data + src_offset, | |||
| static_cast<size_t>(size)); | |||
| if (ret != EOK) { | |||
| @@ -19,7 +19,7 @@ | |||
| #include <securec.h> | |||
| #include <memory> | |||
| #include "common/debug/log.h" | |||
| #include "framework/common/debug/log.h" | |||
| #include "common/formats/utils/formats_definitions.h" | |||
| #include "common/formats/utils/formats_trans_utils.h" | |||
| #include "framework/common/debug/ge_log.h" | |||
| @@ -226,6 +226,7 @@ Status TransFormatFromNchwToFz(const TransArgs &args, TransResult &result) { | |||
| auto protected_size = dst_size - offset < static_cast<int64_t>(SECUREC_MEM_MAX_LEN) | |||
| ? dst_size - offset | |||
| : static_cast<int64_t>(SECUREC_MEM_MAX_LEN); | |||
| GE_CHECK_GE(protected_size, 0); | |||
| errno_t ret = EOK; | |||
| if (need_pad_zero) { | |||
| ret = memset_s(dst.get() + offset, static_cast<size_t>(protected_size), 0, static_cast<size_t>(size)); | |||
| @@ -390,6 +391,7 @@ Status TransFormatHwcnToFz(const TransArgs &args, TransResult &result) { | |||
| auto protected_size = dst_size - dst_offset < static_cast<int64_t>(SECUREC_MEM_MAX_LEN) | |||
| ? dst_size - dst_offset | |||
| : static_cast<int64_t>(SECUREC_MEM_MAX_LEN); | |||
| GE_CHECK_GE(protected_size, 0); | |||
| auto pad_zero = ((c1i * c0 + c0i) >= c) || (n1n0i >= n); | |||
| errno_t ret = EOK; | |||
| if (pad_zero) { | |||
| @@ -474,6 +476,7 @@ Status TransFormatNhwcToFz(const TransArgs &args, TransResult &result) { | |||
| auto protected_size = dst_size - dst_offset < static_cast<int64_t>(SECUREC_MEM_MAX_LEN) | |||
| ? dst_size - dst_offset | |||
| : static_cast<int64_t>(SECUREC_MEM_MAX_LEN); | |||
| GE_CHECK_GE(protected_size, 0); | |||
| auto pad_zero = ((c1i * c0 + c0i) >= c) || (n1n0i >= n); | |||
| errno_t ret = EOK; | |||
| if (pad_zero) { | |||
| @@ -193,6 +193,7 @@ Status TransFormatFromNdToFracZz(const TransArgs &args, TransResult &result, con | |||
| auto protected_size = dst_size - dst_offset < static_cast<int64_t>(SECUREC_MEM_MAX_LEN) | |||
| ? dst_size - dst_offset | |||
| : static_cast<int64_t>(SECUREC_MEM_MAX_LEN); | |||
| GE_CHECK_GE(protected_size, 0); | |||
| auto ret = memcpy_s(dst.get() + dst_offset, static_cast<size_t>(protected_size), args.data + src_offset, | |||
| static_cast<size_t>(size * w0)); | |||
| if (ret != EOK) { | |||
| @@ -213,6 +214,7 @@ Status TransFormatFromNdToFracZz(const TransArgs &args, TransResult &result, con | |||
| auto protected_size = dst_size - dst_offset < static_cast<int64_t>(SECUREC_MEM_MAX_LEN) | |||
| ? dst_size - dst_offset | |||
| : static_cast<int64_t>(SECUREC_MEM_MAX_LEN); | |||
| GE_CHECK_GE(protected_size, 0); | |||
| auto ret = memcpy_s(dst.get() + dst_offset, static_cast<size_t>(protected_size), args.data + src_offset, | |||
| static_cast<size_t>(size)); | |||
| if (ret != EOK) { | |||
| @@ -284,6 +286,7 @@ Status TransFormatFromFracZzToNd(const TransArgs &args, TransResult &result, con | |||
| auto protected_size = dst_size - dst_offset < static_cast<int64_t>(SECUREC_MEM_MAX_LEN) | |||
| ? dst_size - dst_offset | |||
| : static_cast<int64_t>(SECUREC_MEM_MAX_LEN); | |||
| GE_CHECK_GE(protected_size, 0); | |||
| auto ret = memcpy_s(dst.get() + dst_offset, static_cast<size_t>(protected_size), args.data + src_offset, | |||
| static_cast<size_t>(size * w0)); | |||
| if (ret != EOK) { | |||
| @@ -304,6 +307,7 @@ Status TransFormatFromFracZzToNd(const TransArgs &args, TransResult &result, con | |||
| auto protected_size = dst_size - dst_offset < static_cast<int64_t>(SECUREC_MEM_MAX_LEN) | |||
| ? dst_size - dst_offset | |||
| : static_cast<int64_t>(SECUREC_MEM_MAX_LEN); | |||
| GE_CHECK_GE(protected_size, 0); | |||
| auto ret = memcpy_s(dst.get() + dst_offset, static_cast<size_t>(protected_size), args.data + src_offset, | |||
| static_cast<size_t>(size)); | |||
| if (ret != EOK) { | |||
| @@ -17,6 +17,7 @@ | |||
| #include "common/formats/format_transfers/format_transfer_fracz_hwcn.h" | |||
| #include <securec.h> | |||
| #include <memory> | |||
| #include "common/formats/utils/formats_definitions.h" | |||
| @@ -35,8 +36,8 @@ Status CheckArgsForFracZToHwcn(const TransArgs &args) { | |||
| auto dst_shape = args.dst_shape; | |||
| if (args.src_format != FORMAT_FRACTAL_Z || args.dst_format != FORMAT_HWCN) { | |||
| std::string error = "Dose not support trans format from " + | |||
| FmtToStr(TypeUtils::FormatToSerialString(args.src_format)) + " to " + | |||
| FmtToStr(TypeUtils::FormatToSerialString(args.dst_format)); | |||
| FmtToStr(TypeUtils::FormatToSerialString(args.src_format)) + " to " + | |||
| FmtToStr(TypeUtils::FormatToSerialString(args.dst_format)); | |||
| GE_ERRORLOG_AND_ERRORMSG(ACL_ERROR_GE_FORMAT_INVALID, error.c_str()); | |||
| return ACL_ERROR_GE_FORMAT_INVALID; | |||
| } | |||
| @@ -52,15 +53,13 @@ Status CheckArgsForFracZToHwcn(const TransArgs &args) { | |||
| if (!CheckShapeValid(src_shape, kFracZDimsNum)) { | |||
| GELOGE(ACL_ERROR_GE_SHAPE_INVALID, "[Check][Shape]Value is invalid, src shape %s", | |||
| ShapeToString(src_shape).c_str()); | |||
| REPORT_CALL_ERROR("E19999", "Src shape %s check invalid", | |||
| ShapeToString(src_shape).c_str()); | |||
| REPORT_CALL_ERROR("E19999", "Src shape %s check invalid", ShapeToString(src_shape).c_str()); | |||
| return ACL_ERROR_GE_SHAPE_INVALID; | |||
| } | |||
| if (!CheckShapeValid(dst_shape, kHwcnDimsNum)) { | |||
| GELOGE(ACL_ERROR_GE_SHAPE_INVALID, "[Check][Shape]Value is invalid, dst shape %s", | |||
| ShapeToString(dst_shape).c_str()); | |||
| REPORT_CALL_ERROR("E19999", "Dst shape %s check invalid", | |||
| ShapeToString(dst_shape).c_str()); | |||
| REPORT_CALL_ERROR("E19999", "Dst shape %s check invalid", ShapeToString(dst_shape).c_str()); | |||
| return ACL_ERROR_GE_SHAPE_INVALID; | |||
| } | |||
| int64_t c0 = GetCubeSizeByDataType(args.src_data_type); | |||
| @@ -71,9 +70,8 @@ Status CheckArgsForFracZToHwcn(const TransArgs &args) { | |||
| int64_t n0 = Ceil(dst_shape.at(kHwcnN), static_cast<int64_t>(kNiSize)); | |||
| if (src_shape.at(kFracZHWC1) != dst_shape.at(kHwcnH) * dst_shape.at(kHwcnW) * c1 || src_shape.at(kFracZC0) != c0 || | |||
| src_shape.at(kFracZNi) != kNiSize || src_shape.at(kFracZN0) != n0) { | |||
| std::string error = "Failed to check relationship between src shape" + | |||
| FmtToStr(ShapeToString(src_shape)) + " and dst shape" + | |||
| FmtToStr(ShapeToString(dst_shape)); | |||
| std::string error = "Failed to check relationship between src shape" + FmtToStr(ShapeToString(src_shape)) + | |||
| " and dst shape" + FmtToStr(ShapeToString(dst_shape)); | |||
| GE_ERRORLOG_AND_ERRORMSG(ACL_ERROR_GE_SHAPE_INVALID, error.c_str()); | |||
| return ACL_ERROR_GE_SHAPE_INVALID; | |||
| } | |||
| @@ -128,6 +126,7 @@ Status GetDstDataAfterTrans(const TransArgs &args, TransResult &result, const in | |||
| auto dst_offset = dst_idx * size; | |||
| auto protected_size = total_size - dst_offset < static_cast<int64_t>(SECUREC_MEM_MAX_LEN) ? | |||
| total_size - dst_offset : static_cast<int64_t>(SECUREC_MEM_MAX_LEN); | |||
| GE_CHECK_GE(protected_size, 0); | |||
| auto ret = memcpy_s(dst.get() + dst_offset, static_cast<size_t>(protected_size), args.data + src_offset, | |||
| static_cast<size_t>(size)); | |||
| if (ret != EOK) { | |||
| @@ -130,6 +130,7 @@ Status GetDstDataAfterTrans(const TransArgs &args, TransResult &result, const in | |||
| auto dst_offset = dst_idx * size; | |||
| auto protected_size = total_size - dst_offset < static_cast<int64_t>(SECUREC_MEM_MAX_LEN) ? | |||
| total_size - dst_offset : static_cast<int64_t>(SECUREC_MEM_MAX_LEN); | |||
| GE_CHECK_GE(protected_size, 0); | |||
| auto ret = memcpy_s(dst.get() + dst_offset, static_cast<size_t>(protected_size), args.data + src_offset, | |||
| static_cast<size_t>(size)); | |||
| if (ret != EOK) { | |||
| @@ -128,6 +128,7 @@ Status GetDstDataAfterTrans(const TransArgs &args, TransResult &result, int size | |||
| auto dst_offset = dst_idx * size; | |||
| auto protected_size = total_size - dst_offset < static_cast<int64_t>(SECUREC_MEM_MAX_LEN) ? | |||
| total_size - dst_offset : static_cast<int64_t>(SECUREC_MEM_MAX_LEN); | |||
| GE_CHECK_GE(protected_size, 0); | |||
| auto ret = memcpy_s(dst.get() + dst_offset, static_cast<size_t>(protected_size), args.data + src_offset, | |||
| static_cast<size_t>(size)); | |||
| if (ret != EOK) { | |||
| @@ -149,6 +149,7 @@ Status GetDstDataAfterTrans(const TransArgs &args, TransResult &result, const in | |||
| auto protected_size = total_size - dst_offset < static_cast<int64_t>(SECUREC_MEM_MAX_LEN) | |||
| ? total_size - dst_offset | |||
| : static_cast<int64_t>(SECUREC_MEM_MAX_LEN); | |||
| GE_CHECK_GE(protected_size, 0); | |||
| int64_t c_idx = c0_idx + c1_idx * c0; | |||
| int64_t src_idx = h_idx * wcn + w_idx * cn + c_idx * n + n_idx; | |||
| auto src_offset = src_idx * size; | |||
| @@ -129,6 +129,7 @@ Status GetDstDataAfterTrans(const TransArgs &args, TransResult &result, const in | |||
| auto protected_size = total_size - dst_offset < static_cast<int64_t>(SECUREC_MEM_MAX_LEN) | |||
| ? total_size - dst_offset | |||
| : static_cast<int64_t>(SECUREC_MEM_MAX_LEN); | |||
| GE_CHECK_GE(protected_size, 0); | |||
| auto ret = memcpy_s(dst.get() + dst_offset, static_cast<size_t>(protected_size), args.data + src_offset, | |||
| static_cast<size_t>(size)); | |||
| if (ret != EOK) { | |||
| @@ -129,6 +129,7 @@ Status GetDstDataAfterTrans(const TransArgs &args, TransResult &result, const in | |||
| auto protected_size = total_size - dst_offset < static_cast<int64_t>(SECUREC_MEM_MAX_LEN) | |||
| ? total_size - dst_offset | |||
| : static_cast<int64_t>(SECUREC_MEM_MAX_LEN); | |||
| GE_CHECK_GE(protected_size, 0); | |||
| auto ret = memcpy_s(dst.get() + dst_offset, static_cast<size_t>(protected_size), args.data + src_offset, | |||
| static_cast<size_t>(size)); | |||
| if (ret != EOK) { | |||
| @@ -23,7 +23,7 @@ | |||
| #include "common/formats/utils/formats_definitions.h" | |||
| #include "common/formats/utils/formats_trans_utils.h" | |||
| #include "common/util.h" | |||
| #include "framework/common/util.h" | |||
| #include "framework/common/debug/ge_log.h" | |||
| #include "graph/utils/type_utils.h" | |||
| @@ -144,6 +144,7 @@ Status GetDstDataAfterTrans(const TransArgs &args, TransResult &result, const in | |||
| auto protected_size = total_size - dst_offset < static_cast<int64_t>(SECUREC_MEM_MAX_LEN) | |||
| ? total_size - dst_offset | |||
| : static_cast<int64_t>(SECUREC_MEM_MAX_LEN); | |||
| GE_CHECK_GE(protected_size, 0); | |||
| int64_t cIdx = c0_idx + c1_idx * c0; | |||
| int64_t srcIdx = n_idx * chw + cIdx * hw + h_idx * w + w_idx; | |||
| auto src_offset = srcIdx * size; | |||
| @@ -149,6 +149,7 @@ Status GetDstDataAfterTrans(const TransArgs &args, TransResult &result, const in | |||
| auto protected_size = total_size - dst_offset < static_cast<int64_t>(SECUREC_MEM_MAX_LEN) | |||
| ? total_size - dst_offset | |||
| : static_cast<int64_t>(SECUREC_MEM_MAX_LEN); | |||
| GE_CHECK_GE(protected_size, 0); | |||
| int64_t c_idx = c0_idx + c1_idx * c0; | |||
| int64_t src_idx = n_idx * hwc + h_idx * wc + w_idx * c + c_idx; | |||
| auto src_offset = src_idx * size; | |||
| @@ -171,6 +171,7 @@ Status Transpose(const uint8_t *src, const std::vector<int64_t> &src_shape, Data | |||
| auto protected_size = dst_size - dst_offset_bytes < static_cast<int64_t>(SECUREC_MEM_MAX_LEN) | |||
| ? dst_size - dst_offset_bytes | |||
| : static_cast<int64_t>(SECUREC_MEM_MAX_LEN); | |||
| GE_CHECK_GE(protected_size, 0); | |||
| auto ret = memcpy_s(dst.get() + dst_offset_bytes, static_cast<size_t>(protected_size), src + src_offset, | |||
| static_cast<size_t>(data_size)); | |||
| if (ret != EOK) { | |||
| @@ -33,7 +33,6 @@ Status TransposeWithShapeCheck(const uint8_t *src, const std::vector<int64_t> &s | |||
| Status GetPermByForamt(Format src_format, Format dst_format, std::vector<int64_t> &perm); | |||
| class FormatTransferTranspose : public FormatTransfer { | |||
| public: | |||
| Status TransFormat(const TransArgs &args, TransResult &result) override; | |||
| @@ -17,6 +17,7 @@ | |||
| #include "common/formats/formats.h" | |||
| #include <securec.h> | |||
| #include <cmath> | |||
| #include <cstring> | |||
| #include <functional> | |||
| @@ -32,7 +33,7 @@ | |||
| namespace ge { | |||
| namespace formats { | |||
| GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY Status TransFormat(const TransArgs &args, TransResult &result) { | |||
| Status TransFormat(const TransArgs &args, TransResult &result) { | |||
| auto transfer = BuildFormatTransfer(args); | |||
| if (transfer == nullptr) { | |||
| std::string error = "Failed to trans data from format " + | |||
| @@ -56,11 +57,8 @@ GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY Status TransFormat(const TransArg | |||
| return transfer->TransFormat(args, result); | |||
| } | |||
| GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY Status TransShape(Format src_format, | |||
| const std::vector<int64_t> &src_shape, | |||
| DataType data_type, | |||
| Format dst_format, | |||
| std::vector<int64_t> &dst_shape) { | |||
| Status TransShape(Format src_format, const std::vector<int64_t> &src_shape, DataType data_type, Format dst_format, | |||
| std::vector<int64_t> &dst_shape) { | |||
| formats::TransArgs args; | |||
| args.src_format = src_format; | |||
| args.dst_format = dst_format; | |||
| @@ -76,7 +74,7 @@ GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY Status TransShape(Format src_form | |||
| return transfer->TransShape(src_format, src_shape, data_type, dst_format, dst_shape); | |||
| } | |||
| GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY Status TransDataType(const CastArgs &args, TransResult &result) { | |||
| Status TransDataType(const CastArgs &args, TransResult &result) { | |||
| auto transfer = BuildDataTypeTransfer(args); | |||
| if (transfer == nullptr) { | |||
| std::string error = "Failed to trans data from datatype " + | |||
| @@ -95,11 +93,11 @@ GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY Status TransDataType(const CastAr | |||
| return transfer->TransDataType(args, result); | |||
| } | |||
| GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY bool IsTransFormatSupport(const TransArgs &args) { | |||
| bool IsTransFormatSupport(const TransArgs &args) { | |||
| return FormatTransferExists(args); | |||
| } | |||
| GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY bool IsTransDataTypeSupport(const CastArgs &args) { | |||
| bool IsTransDataTypeSupport(const CastArgs &args) { | |||
| return DataTypeTransferExists(args); | |||
| } | |||
| } // namespace formats | |||
| @@ -41,14 +41,32 @@ int64_t GetCubeSizeByDataType(DataType data_type) { | |||
| } | |||
| } | |||
| GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY std::string ShapeToString(const GeShape &shape) { | |||
| std::string ShapeToString(const GeShape &shape) { | |||
| return ShapeToString(shape.GetDims()); | |||
| } | |||
| GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY std::string ShapeToString(const std::vector<int64_t> &shape) { | |||
| std::string ShapeToString(const std::vector<int64_t> &shape) { | |||
| return JoinToString(shape); | |||
| } | |||
| std::string RangeToString(const std::vector<std::pair<int64_t, int64_t>> &ranges) { | |||
| bool first = true; | |||
| std::stringstream ss; | |||
| ss << "["; | |||
| for (const auto &range : ranges) { | |||
| if (first) { | |||
| first = false; | |||
| } else { | |||
| ss << ","; | |||
| } | |||
| ss << "{"; | |||
| ss << range.first << "," << range.second; | |||
| ss << "}"; | |||
| } | |||
| ss << "]"; | |||
| return ss.str(); | |||
| } | |||
| int64_t GetItemNumByShape(const std::vector<int64_t> &shape) { | |||
| int64_t num = 1; | |||
| for (auto dim : shape) { | |||
| @@ -54,6 +54,8 @@ std::string ShapeToString(const GeShape &shape); | |||
| std::string ShapeToString(const std::vector<int64_t> &shape); | |||
| std::string RangeToString(const std::vector<std::pair<int64_t, int64_t>> &ranges); | |||
| int64_t GetItemNumByShape(const std::vector<int64_t> &shape); | |||
| bool CheckShapeValid(const std::vector<int64_t> &shape, const int64_t expect_dims); | |||
| @@ -1180,20 +1180,40 @@ fp16_t &fp16_t::operator=(const double &d_val) { | |||
| } | |||
| // convert | |||
| fp16_t::operator float() const { return Fp16ToFloat(val); } | |||
| fp16_t::operator double() const { return Fp16ToDouble(val); } | |||
| fp16_t::operator int8_t() const { return Fp16ToInt8(val); } | |||
| fp16_t::operator uint8_t() const { return Fp16ToUInt8(val); } | |||
| fp16_t::operator int16_t() const { return Fp16ToInt16(val); } | |||
| fp16_t::operator uint16_t() const { return Fp16ToUInt16(val); } | |||
| fp16_t::operator int32_t() const { return Fp16ToInt32(val); } | |||
| fp16_t::operator uint32_t() const { return Fp16ToUInt32(val); } | |||
| fp16_t::operator float() const { | |||
| return Fp16ToFloat(val); | |||
| } | |||
| fp16_t::operator double() const { | |||
| return Fp16ToDouble(val); | |||
| } | |||
| fp16_t::operator int8_t() const { | |||
| return Fp16ToInt8(val); | |||
| } | |||
| fp16_t::operator uint8_t() const { | |||
| return Fp16ToUInt8(val); | |||
| } | |||
| fp16_t::operator int16_t() const { | |||
| return Fp16ToInt16(val); | |||
| } | |||
| fp16_t::operator uint16_t() const { | |||
| return Fp16ToUInt16(val); | |||
| } | |||
| fp16_t::operator int32_t() const { | |||
| return Fp16ToInt32(val); | |||
| } | |||
| fp16_t::operator uint32_t() const { | |||
| return Fp16ToUInt32(val); | |||
| } | |||
| // Cannot be used, just in order to solve the compile error | |||
| fp16_t::operator int64_t() const { return 0; } | |||
| fp16_t::operator int64_t() const { | |||
| return 0; | |||
| } | |||
| // Cannot be used, just in order to solve the compile error | |||
| fp16_t::operator uint64_t() const { return 0; } | |||
| fp16_t::operator uint64_t() const { | |||
| return 0; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY int fp16_t::IsInf() { | |||
| int fp16_t::IsInf() { | |||
| if ((val & kFp16AbsMax) == kFp16ExpMask) { | |||
| if (val & kFp16SignMask) { | |||
| return -1; | |||
| @@ -1205,12 +1225,28 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY int fp16_t::IsInf() { | |||
| } | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY float fp16_t::ToFloat() const { return Fp16ToFloat(val); } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY double fp16_t::ToDouble() const { return Fp16ToDouble(val); } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY int8_t fp16_t::ToInt8() const { return Fp16ToInt8(val); } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY uint8_t fp16_t::ToUInt8() const { return Fp16ToUInt8(val); } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY int16_t fp16_t::ToInt16() const { return Fp16ToInt16(val); } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY uint16_t fp16_t::ToUInt16() const { return Fp16ToUInt16(val); } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY int32_t fp16_t::ToInt32() const { return Fp16ToInt32(val); } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY uint32_t fp16_t::ToUInt32() const { return Fp16ToUInt32(val); } | |||
| float fp16_t::ToFloat() const { | |||
| return Fp16ToFloat(val); | |||
| } | |||
| double fp16_t::ToDouble() const { | |||
| return Fp16ToDouble(val); | |||
| } | |||
| int8_t fp16_t::ToInt8() const { | |||
| return Fp16ToInt8(val); | |||
| } | |||
| uint8_t fp16_t::ToUInt8() const { | |||
| return Fp16ToUInt8(val); | |||
| } | |||
| int16_t fp16_t::ToInt16() const { | |||
| return Fp16ToInt16(val); | |||
| } | |||
| uint16_t fp16_t::ToUInt16() const { | |||
| return Fp16ToUInt16(val); | |||
| } | |||
| int32_t fp16_t::ToInt32() const { | |||
| return Fp16ToInt32(val); | |||
| } | |||
| uint32_t fp16_t::ToUInt32() const { | |||
| return Fp16ToUInt32(val); | |||
| } | |||
| } // namespace ge | |||
| @@ -20,7 +20,7 @@ | |||
| #include <map> | |||
| #include <vector> | |||
| #include "graph/types.h" | |||
| #include "external/graph/types.h" | |||
| namespace ge { | |||
| static const int32_t kGeSizeFloat = sizeof(float); | |||
| @@ -42,7 +42,7 @@ static std::map<ge::DataType, int32_t> CONST_OPDATA_TYPE_SIZE_MAP = { | |||
| {ge::DT_UINT8, kGeSizeUint8}, {ge::DT_UINT16, kGeSizeUint16}, {ge::DT_UINT32, kGeSizeUint32}, | |||
| {ge::DT_UINT64, kGeSizeUint64}, {ge::DT_DOUBLE, kGeSizeDouble}, {ge::DT_BOOL, kGeSizeBool}}; | |||
| class GE_FUNC_HOST_VISIBILITY GE_FUNC_DEV_VISIBILITY DataTypeUtil { | |||
| class DataTypeUtil { | |||
| public: | |||
| static bool DataTypeTranslatable(const ge::DataType &src_out_data_type, const ge::DataType &dst_in_data_type); | |||
| static const std::vector<ge::DataType> &GetTranslatableDataTypesBySrc(const ge::DataType &src_out_data_type); | |||
| @@ -26,8 +26,8 @@ | |||
| #include <typeinfo> | |||
| #include <vector> | |||
| #include "common/ge_inner_error_codes.h" | |||
| #include "engine/dnnengine.h" | |||
| #include "framework/common/ge_inner_error_codes.h" | |||
| #include "framework/engine/dnnengine.h" | |||
| #include "framework/common/debug/ge_log.h" | |||
| #include "mmpa/mmpa_api.h" | |||
| @@ -42,7 +42,7 @@ const int kBaseInt = 10; | |||
| std::map<string, string> TBEPluginManager::options_ = {}; | |||
| // Get Singleton Instance | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY TBEPluginManager &TBEPluginManager::Instance() { | |||
| TBEPluginManager &TBEPluginManager::Instance() { | |||
| static TBEPluginManager instance_ptr_; | |||
| return instance_ptr_; | |||
| } | |||
| @@ -61,7 +61,7 @@ Status TBEPluginManager::ClearHandles_() { | |||
| return ret; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status TBEPluginManager::Finalize() { | |||
| Status TBEPluginManager::Finalize() { | |||
| Status ret = ClearHandles_(); | |||
| return ret; | |||
| } | |||
| @@ -104,7 +104,15 @@ void TBEPluginManager::ProcessSoFullName(vector<string> &file_list, string &caff | |||
| } | |||
| } | |||
| void TBEPluginManager::FindParserSo(const string &path, vector<string> &file_list, string &caffe_parser_path) { | |||
| void TBEPluginManager::FindParserSo(const string &path, vector<string> &file_list, | |||
| string &caffe_parser_path, uint32_t recursive_depth) { | |||
| static const uint32_t max_recursive_depth = 20; // For recursive depth protection | |||
| if (recursive_depth >= max_recursive_depth) { | |||
| GELOGW("Recursive depth is become %u, Please check input!", recursive_depth); | |||
| return; | |||
| } | |||
| // Path, change to absolute path | |||
| string real_path = RealPath(path.c_str()); | |||
| // Plugin path does not exist | |||
| @@ -138,7 +146,7 @@ void TBEPluginManager::FindParserSo(const string &path, vector<string> &file_lis | |||
| ProcessSoFullName(file_list, caffe_parser_path, full_name, caffe_parser_so_suff, aicpu_so_suff, | |||
| aicpu_host_so_suff); | |||
| } else { | |||
| FindParserSo(full_name, file_list, caffe_parser_path); | |||
| FindParserSo(full_name, file_list, caffe_parser_path, recursive_depth + 1); | |||
| } | |||
| } | |||
| mmScandirFree(entries, ret); | |||
| @@ -199,7 +207,6 @@ void TBEPluginManager::LoadCustomOpLib() { | |||
| } | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY | |||
| void TBEPluginManager::LoadPluginSo(const std::map<string, string> &options) { | |||
| vector<string> file_list; | |||
| string caffe_parser_path; | |||
| @@ -238,7 +245,6 @@ void TBEPluginManager::LoadPluginSo(const std::map<string, string> &options) { | |||
| } | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY | |||
| void TBEPluginManager::InitPreparation(const std::map<string, string> &options) { | |||
| options_.insert(options.begin(), options.end()); | |||
| // Load TBE plugin | |||
| @@ -57,7 +57,8 @@ class TBEPluginManager { | |||
| static void ProcessSoFullName(vector<string> &file_list, string &caffe_parser_path, string &full_name, | |||
| const string &caffe_parser_so_suff, const string &aicpu_so_suff, | |||
| const string &aicpu_host_so_suff); | |||
| static void FindParserSo(const string &path, vector<string> &file_list, string &caffe_parser_path); | |||
| static void FindParserSo(const string &path, vector<string> &file_list, string &caffe_parser_path, | |||
| uint32_t recursive_depth = 0); | |||
| static void GetPluginSoFileList(const string &path, vector<string> &file_list, string &caffe_parser_path); | |||
| static void GetCustomOpPath(std::string &customop_path); | |||
| void LoadCustomOpLib(); | |||
| @@ -15,12 +15,10 @@ | |||
| */ | |||
| #include "framework/common/ge_format_util.h" | |||
| #include "formats/formats.h" | |||
| #include "common/formats/formats.h" | |||
| namespace ge { | |||
| GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY Status GeFormatUtil::TransShape(const TensorDesc &src_desc, | |||
| Format dst_format, | |||
| std::vector<int64_t> &dst_shape) { | |||
| Status GeFormatUtil::TransShape(const TensorDesc &src_desc, Format dst_format, std::vector<int64_t> &dst_shape) { | |||
| return formats::TransShape(src_desc.GetFormat(), src_desc.GetShape().GetDims(), src_desc.GetDataType(), dst_format, | |||
| dst_shape); | |||
| } | |||
| @@ -1,123 +0,0 @@ | |||
| /** | |||
| * Copyright 2019-2020 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #ifndef GE_COMMON_HELPER_MODEL_CACHE_HELPER_H_ | |||
| #define GE_COMMON_HELPER_MODEL_CACHE_HELPER_H_ | |||
| #include <nlohmann/json.hpp> | |||
| #include <set> | |||
| #include <string> | |||
| #include "ge/ge_api_error_codes.h" | |||
| #include "graph/compute_graph.h" | |||
| #include "graph/manager/graph_var_manager.h" | |||
| #include "model/ge_model.h" | |||
| namespace ge { | |||
| using Json = nlohmann::json; | |||
| struct CacheInfo { | |||
| size_t node_num; | |||
| size_t edge_num; | |||
| size_t graph_hash; | |||
| map<std::string, size_t> nodes_hash; | |||
| CacheInfo() : node_num(0), edge_num(0), graph_hash(0) {} | |||
| }; | |||
| class ModelCacheHelper { | |||
| public: | |||
| ModelCacheHelper(uint64_t session_id, uint32_t graph_id, ComputeGraphPtr &compute_graph); | |||
| ~ModelCacheHelper(); | |||
| Status SaveCacheInfoToCache () const; | |||
| Status SaveVarManagerToCache(bool before_build) const; | |||
| Status SaveOmModelToCache(const GeModelPtr &ge_model) const; | |||
| bool IsModelCacheHit() const; | |||
| Status RecoverVarManagerFromCache() const; | |||
| Status LoadOmModelFromCache(GeModelPtr &ge_model) const; | |||
| Status RefreshComputeGraph(const ComputeGraphPtr &compute_graph); | |||
| Status ClearCache(uint32_t graph_id) const; | |||
| private: | |||
| Status GetComputeGraphHash(size_t &hash) const; | |||
| Status GetNodesHash(map<std::string, size_t> &hash_map) const; | |||
| Status GetCacheInfo(CacheInfo &cache_info) const; | |||
| Status RecoverMemResource(const Json &json) const; | |||
| Status RecoverAllocatedGraphId(const Json &json) const; | |||
| Status RecoverChangedGraphId(const Json &json) const; | |||
| Status RecoverVarAddrAndTensorDesc(const Json &json) const; | |||
| Status RecoverBroadcastInfo(const Json &json) const; | |||
| Status RecoverTransRoads(const Json &json) const; | |||
| static Status GetNodesNeedRecompile(ComputeGraphPtr &graph, vector<NodePtr> &nodes); | |||
| static Status RecompileNodes(GeModelPtr &ge_model); | |||
| bool IsNodeHashSameAsCache(const map<std::string, size_t> &hash_map) const; | |||
| bool IsMemResourceSameAsCache(Json &json) const; | |||
| bool IsChangedGraphIdSameAsCache(Json &json) const; | |||
| bool IsAllocatedGraphIdSameAsCache(Json &json) const; | |||
| bool IsCurVarTensorDescSameAsCache(Json &json) const; | |||
| bool IsVarAddrMgrMapSameAsCache(Json &json) const; | |||
| bool IsBroadcastInfoSameAsCache(Json &json) const; | |||
| bool IsTransRoadsSameAsCache(Json &json) const; | |||
| bool IsVarManagerSameAsCache(Json &json) const; | |||
| bool IsVarManagerParamSameAsCache(Json &json) const; | |||
| Status SaveJsonToFile(const string &file_name, const Json &json) const; | |||
| Status LoadJsonFromFile(const string &file_name, Json &json) const; | |||
| Status GetNodesHashMapJson(Json &json) const; | |||
| Status GetMemResourceMap(Json &json) const; | |||
| Status GetVarAddrMgrMapJson(Json &json) const; | |||
| Status GetCurVarTensorDescMapJson(Json &json) const; | |||
| Status GetTransRoadsJson(Json &json) const; | |||
| Status GetChangedGraphIdJson(Json &json) const; | |||
| Status GetAllocatedGraphIdJson(Json &json) const; | |||
| Status GetBroadcastInfoJson(Json &json) const; | |||
| Status GetVarResourceJson(Json &json) const; | |||
| Status GetVarManagerJson(Json &json) const; | |||
| static Status TensorDescToJson(const GeTensorDesc &ge_tensor_desc, Json &json); | |||
| static Status JsonToTensorDesc(const Json &json, GeTensorDesc &ge_tensor_desc); | |||
| static Status ParseMemResourceFromJson(const Json &json, map<rtMemType_t, int64_t> &mem_resource); | |||
| static Status ParseVarAddrMgrMapFromJson(const Json &json, | |||
| std::vector<std::pair<std::string, VarAddrMgr>> &var_addr_mgr_vector, | |||
| std::set<uint64_t> &var_offset_set); | |||
| static Status ParseCurVarTensorDescMapFromJson( | |||
| const Json &json, std::unordered_map<std::string, ge::GeTensorDesc> &cur_var_tensor_desc_map); | |||
| static Status ParseTransRoadsFromJson(const Json &json, | |||
| std::unordered_map<std::string, std::vector<TransNodeInfo>> &trans_roads); | |||
| static Status ParseChangedGraphIdFromJson(const Json &json, | |||
| std::map<std::string, uint32_t> &changed_graph_id); | |||
| static Status ParseAllocatedGraphIdFromJson(const Json &json, | |||
| std::map<std::string, uint32_t> &allocated_graph_id); | |||
| static Status ParseBroadcastInfoFromJson(const Json &json, | |||
| std::unordered_map<std::string, VarBroadCastInfo> &var_broadcast_info); | |||
| static Status GetVarNameFromVarKey(const string &var_key, const GeTensorDesc &tensor_desc, string &var_name); | |||
| uint64_t session_id_; | |||
| uint32_t graph_id_; | |||
| string cache_path_; | |||
| ComputeGraphPtr compute_graph_; | |||
| std::set<string> var_names_; | |||
| bool is_cache_path_valid_for_output; | |||
| static map<uint32_t, uint32_t> graph_id_run_times_; | |||
| }; | |||
| using ModelCacheHelperPtr = std::shared_ptr<ModelCacheHelper>; | |||
| } // namespace ge | |||
| #endif // GE_COMMON_HELPER_MODEL_CACHE_HELPER_H_ | |||
| @@ -33,7 +33,7 @@ const uint32_t kStatiOmFileModelNum = 1; | |||
| namespace ge { | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY ModelHelper::~ModelHelper() { (void)ReleaseLocalModelData(); } | |||
| ModelHelper::~ModelHelper() { (void)ReleaseLocalModelData(); } | |||
| Status ModelHelper::SaveModelPartition(std::shared_ptr<OmFileSaveHelper> &om_file_save_helper, ModelPartitionType type, | |||
| const uint8_t *data, size_t size, size_t model_index) { | |||
| @@ -108,8 +108,8 @@ Status ModelHelper::SaveSizeToModelDef(const GeModelPtr &ge_model) { | |||
| return SUCCESS; | |||
| } | |||
| Status ModelHelper::SaveModelDef(std::shared_ptr<OmFileSaveHelper> &om_file_save_helper, | |||
| const GeModelPtr &ge_model, ge::Buffer &model_buffer, size_t model_index) { | |||
| Status ModelHelper::SaveModelDef(std::shared_ptr<OmFileSaveHelper> &om_file_save_helper, const GeModelPtr &ge_model, | |||
| ge::Buffer &model_buffer, size_t model_index) { | |||
| ModelPtr model_tmp = ge::MakeShared<ge::Model>(ge_model->GetName(), ge_model->GetPlatformVersion()); | |||
| if (model_tmp == nullptr) { | |||
| GELOGE(FAILED, "[Creat][Model]Failed, Model %s Ptr", ge_model->GetName().c_str()); | |||
| @@ -143,8 +143,8 @@ Status ModelHelper::SaveModelDef(std::shared_ptr<OmFileSaveHelper> &om_file_save | |||
| return SUCCESS; | |||
| } | |||
| Status ModelHelper::SaveModelWeights(std::shared_ptr<OmFileSaveHelper> &om_file_save_helper, | |||
| const GeModelPtr &ge_model, size_t model_index) { | |||
| Status ModelHelper::SaveModelWeights(std::shared_ptr<OmFileSaveHelper> &om_file_save_helper, const GeModelPtr &ge_model, | |||
| size_t model_index) { | |||
| auto ge_model_weight = ge_model->GetWeight(); | |||
| GELOGD("WEIGHTS_DATA size is %zu, %p", ge_model_weight.GetSize(), ge_model_weight.GetData()); | |||
| // weight is not necessary | |||
| @@ -187,8 +187,8 @@ Status ModelHelper::SaveModelCustAICPU(std::shared_ptr<OmFileSaveHelper> &om_fil | |||
| return SUCCESS; | |||
| } | |||
| Status ModelHelper::SaveModelTaskDef(std::shared_ptr<OmFileSaveHelper> &om_file_save_helper, | |||
| const GeModelPtr &ge_model, ge::Buffer &task_buffer, size_t model_index) { | |||
| Status ModelHelper::SaveModelTaskDef(std::shared_ptr<OmFileSaveHelper> &om_file_save_helper, const GeModelPtr &ge_model, | |||
| ge::Buffer &task_buffer, size_t model_index) { | |||
| std::shared_ptr<ModelTaskDef> model_task_def = ge_model->GetModelTaskDefPtr(); | |||
| if (model_task_def == nullptr) { | |||
| GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, "[Creat][ModelTaskDef]Failed, it is nullptr, " | |||
| @@ -231,8 +231,8 @@ Status ModelHelper::SaveModelTaskDef(std::shared_ptr<OmFileSaveHelper> &om_file_ | |||
| return SUCCESS; | |||
| } | |||
| Status ModelHelper::SaveModelHeader(std::shared_ptr<OmFileSaveHelper> &om_file_save_helper, | |||
| const GeModelPtr &ge_model, size_t model_num) { | |||
| Status ModelHelper::SaveModelHeader(std::shared_ptr<OmFileSaveHelper> &om_file_save_helper, const GeModelPtr &ge_model, | |||
| size_t model_num) { | |||
| // Save target/version to model_header | |||
| ModelFileHeader &model_header = om_file_save_helper->GetModelFileHeader(); | |||
| model_header.platform_type = ge_model->GetPlatformType(); | |||
| @@ -246,8 +246,10 @@ Status ModelHelper::SaveModelHeader(std::shared_ptr<OmFileSaveHelper> &om_file_s | |||
| if (err != EOK) { | |||
| GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, | |||
| "[Save][Model]Failed while allocating memory for platform_version %s, model %s, " | |||
| "errno %d", platform_version.c_str(), ge_model->GetName().c_str(), err); | |||
| REPORT_CALL_ERROR("E19999", "ModelHelper save model %s failed while " | |||
| "errno %d", | |||
| platform_version.c_str(), ge_model->GetName().c_str(), err); | |||
| REPORT_CALL_ERROR("E19999", | |||
| "ModelHelper save model %s failed while " | |||
| "allocating memory for platform_version %s, errno %d", | |||
| ge_model->GetName().c_str(), platform_version.c_str(), err); | |||
| return ACL_ERROR_GE_MEMORY_ALLOCATION; | |||
| @@ -271,9 +273,9 @@ Status ModelHelper::SaveModelHeader(std::shared_ptr<OmFileSaveHelper> &om_file_s | |||
| return SUCCESS; | |||
| } | |||
| Status ModelHelper::SaveAllModelPartiton(std::shared_ptr<OmFileSaveHelper>& om_file_save_helper, | |||
| const GeModelPtr &ge_model, ge::Buffer &model_buffer, | |||
| ge::Buffer &task_buffer, size_t model_index) { | |||
| Status ModelHelper::SaveAllModelPartiton(std::shared_ptr<OmFileSaveHelper> &om_file_save_helper, | |||
| const GeModelPtr &ge_model, ge::Buffer &model_buffer, ge::Buffer &task_buffer, | |||
| size_t model_index) { | |||
| if (SaveModelDef(om_file_save_helper, ge_model, model_buffer, model_index) != SUCCESS) { | |||
| GELOGE(FAILED, "[Save][ModelDef]Failed, model %s, model index %zu", | |||
| ge_model->GetName().c_str(), model_index); | |||
| @@ -316,10 +318,8 @@ Status ModelHelper::SaveAllModelPartiton(std::shared_ptr<OmFileSaveHelper>& om_f | |||
| return SUCCESS; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ModelHelper::SaveToOmModel(const GeModelPtr &ge_model, | |||
| const SaveParam &save_param, | |||
| const std::string &output_file, | |||
| ModelBufferData& model) { | |||
| Status ModelHelper::SaveToOmModel(const GeModelPtr &ge_model, const SaveParam &save_param, | |||
| const std::string &output_file, ModelBufferData &model) { | |||
| if (output_file.empty()) { | |||
| GELOGE(FAILED, "[Save][Model]GraphBuilder SaveModel received invalid file name prefix, " | |||
| "model %s", ge_model->GetName().c_str()); | |||
| @@ -367,13 +367,8 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ModelHelper::SaveToOmMod | |||
| return SUCCESS; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ModelHelper::SaveToOmRootModel( | |||
| const GeRootModelPtr &ge_root_model, | |||
| const SaveParam &save_param, | |||
| const std::string &output_file, | |||
| ModelBufferData& model, | |||
| bool is_unknown_shape) { | |||
| Status ModelHelper::SaveToOmRootModel(const GeRootModelPtr &ge_root_model, const SaveParam &save_param, | |||
| const std::string &output_file, ModelBufferData &model, bool is_unknown_shape) { | |||
| GE_CHECK_NOTNULL(ge_root_model); | |||
| GE_IF_BOOL_EXEC(ge_root_model == nullptr, | |||
| GELOGE(FAILED, "[Check][GERootModel]Ge_root_model is nullptr"); | |||
| @@ -466,8 +461,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ModelHelper::SaveToOmRoo | |||
| return SUCCESS; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status | |||
| ModelHelper::SaveOriginalGraphToOmModel(const ge::Graph &graph, const std::string &output_file) { | |||
| Status ModelHelper::SaveOriginalGraphToOmModel(const ge::Graph &graph, const std::string &output_file) { | |||
| if (output_file.empty()) { | |||
| GELOGE(FAILED, "[Save][Model]Received invalid file name prefix, output_file %s", output_file.c_str()); | |||
| REPORT_INNER_ERROR("E19999", "Save model received invalid file name prefix, output_file %s", output_file.c_str()); | |||
| @@ -545,7 +539,7 @@ ModelHelper::SaveOriginalGraphToOmModel(const ge::Graph &graph, const std::strin | |||
| return (ret == SUCCESS ? SUCCESS : FAILED); | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ModelHelper::LoadModel(const ge::ModelData &model_data) { | |||
| Status ModelHelper::LoadModel(const ge::ModelData &model_data) { | |||
| if (model_data.model_data == nullptr || model_data.model_len == 0) { | |||
| GELOGE(ACL_ERROR_GE_EXEC_MODEL_DATA_SIZE_INVALID, | |||
| "[Load][Model]Model_data is nullptr or model_data_size is 0"); | |||
| @@ -597,7 +591,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ModelHelper::LoadModel(c | |||
| return SUCCESS; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ModelHelper::LoadRootModel(const ge::ModelData &model_data) { | |||
| Status ModelHelper::LoadRootModel(const ge::ModelData &model_data) { | |||
| if (model_data.model_data == nullptr || model_data.model_len == 0) { | |||
| GELOGE(ACL_ERROR_GE_EXEC_MODEL_DATA_SIZE_INVALID, "[Load][RootModel] " | |||
| "Model_data is nullptr or model data is empty."); | |||
| @@ -783,7 +777,6 @@ Status ModelHelper::LoadModelData(OmFileLoadHelper &om_load_helper, GeModelPtr & | |||
| return SUCCESS; | |||
| } | |||
| Status ModelHelper::LoadWeights(OmFileLoadHelper &om_load_helper) { | |||
| ModelPartition partition; | |||
| if (om_load_helper.GetModelPartition(ModelPartitionType::WEIGHTS_DATA, partition) != SUCCESS) { | |||
| @@ -814,7 +807,7 @@ Status ModelHelper::LoadWeights(OmFileLoadHelper &om_load_helper, GeModelPtr &cu | |||
| return SUCCESS; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ModelHelper::LoadTask(OmFileLoadHelper &om_load_helper) { | |||
| Status ModelHelper::LoadTask(OmFileLoadHelper &om_load_helper) { | |||
| ModelPartition task_partition; | |||
| if (om_load_helper.GetModelPartition(ModelPartitionType::TASK_INFO, task_partition) != SUCCESS) { | |||
| GELOGE(FAILED, "[Get][ModelTaskPartition]Failed, task_partition size:%u", task_partition.size); | |||
| @@ -838,9 +831,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ModelHelper::LoadTask(Om | |||
| return SUCCESS; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ModelHelper::LoadTask(OmFileLoadHelper &om_load_helper, | |||
| GeModelPtr &cur_model, | |||
| size_t mode_index) { | |||
| Status ModelHelper::LoadTask(OmFileLoadHelper &om_load_helper, GeModelPtr &cur_model, size_t mode_index) { | |||
| ModelPartition task_partition; | |||
| if (om_load_helper.GetModelPartition(ModelPartitionType::TASK_INFO, task_partition, mode_index) != SUCCESS) { | |||
| GELOGE(FAILED, "Get task model partition failed."); | |||
| @@ -915,8 +906,8 @@ Status ModelHelper::LoadCustAICPUKernelStore(OmFileLoadHelper &om_load_helper) { | |||
| return SUCCESS; | |||
| } | |||
| Status ModelHelper::LoadCustAICPUKernelStore(OmFileLoadHelper &om_load_helper, | |||
| GeModelPtr &cur_model, size_t mode_index) { | |||
| Status ModelHelper::LoadCustAICPUKernelStore(OmFileLoadHelper &om_load_helper, GeModelPtr &cur_model, | |||
| size_t mode_index) { | |||
| // Load cust aicpu kernels | |||
| ModelPartition partition_kernel_def; | |||
| CustAICPUKernelStore kernel_store; | |||
| @@ -933,7 +924,7 @@ Status ModelHelper::LoadCustAICPUKernelStore(OmFileLoadHelper &om_load_helper, | |||
| return SUCCESS; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY GeModelPtr ModelHelper::GetGeModel() { | |||
| GeModelPtr ModelHelper::GetGeModel() { | |||
| if (model_ != nullptr) { | |||
| return model_; | |||
| } | |||
| @@ -946,7 +937,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY GeModelPtr ModelHelper::GetGeMo | |||
| return out_model; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY GeRootModelPtr ModelHelper::GetGeRootModel() { | |||
| GeRootModelPtr ModelHelper::GetGeRootModel() { | |||
| if (root_model_ != nullptr) { | |||
| return root_model_; | |||
| } | |||
| @@ -959,7 +950,6 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY GeRootModelPtr ModelHelper::Get | |||
| return out_model; | |||
| } | |||
| Status ModelHelper::ReleaseLocalModelData() noexcept { | |||
| Status result = SUCCESS; | |||
| if (model_addr_tmp_ != nullptr) { | |||
| @@ -976,8 +966,7 @@ Status ModelHelper::ReleaseLocalModelData() noexcept { | |||
| return result; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ModelHelper::GetBaseNameFromFileName( | |||
| const string &file_name, string &base_name) { | |||
| Status ModelHelper::GetBaseNameFromFileName(const string &file_name, string &base_name) { | |||
| GELOGD("Get base_name from file, file_name:%s", file_name.c_str()); | |||
| GE_CHK_BOOL_EXEC_WARN(!file_name.empty(), return FAILED, "File path may not valid, check params --output"); | |||
| size_t start_position = 0; | |||
| @@ -992,8 +981,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ModelHelper::GetBaseName | |||
| return SUCCESS; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ModelHelper::GetModelNameFromMergedGraphName( | |||
| const string &graph_name, string &model_name) { | |||
| Status ModelHelper::GetModelNameFromMergedGraphName(const string &graph_name, string &model_name) { | |||
| GELOGD("Get model_name from graph_name, graph_name:%s", graph_name.c_str()); | |||
| // this can only be used after merged graph(graph name will be append with "_x", x is index); | |||
| GE_CHK_BOOL_EXEC_WARN(!graph_name.empty(), return FAILED, "File path may not valid, check params --output"); | |||
| @@ -1035,8 +1023,7 @@ Status ModelTool::GetModelInfoFromOm(const char *model_file, ge::proto::ModelDef | |||
| ErrorManager::GetInstance().ATCReportErrMessage("E10003", | |||
| {"parameter", "value", "reason"}, {"om", model_file, "invalid om file, can't be parsed"}); | |||
| GELOGE(ACL_ERROR_GE_PARAM_INVALID, | |||
| "[Parse][ModelContent]Failed because of invalid om file %s, please check om param", | |||
| model_file); | |||
| "[Parse][ModelContent]Failed because of invalid om file %s, please check om param", model_file); | |||
| return ret; | |||
| } | |||
| @@ -18,10 +18,11 @@ | |||
| #include <string> | |||
| #include <vector> | |||
| #include "common/math/math_util.h" | |||
| #include "common/auth/file_saver.h" | |||
| #include "framework/common/debug/log.h" | |||
| #include "common/math/math_util.h" | |||
| #include "framework/common/debug/ge_log.h" | |||
| #include "framework/common/debug/log.h" | |||
| #include "framework/common/ge_inner_error_codes.h" | |||
| #include "framework/common/util.h" | |||
| @@ -32,7 +33,7 @@ const int32_t kOptionalNum = 2; | |||
| } | |||
| namespace ge { | |||
| // For Load | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status OmFileLoadHelper::Init(const ge::ModelData &model) { | |||
| Status OmFileLoadHelper::Init(const ge::ModelData &model) { | |||
| if (CheckModelValid(model) != SUCCESS) { | |||
| return FAILED; | |||
| } | |||
| @@ -42,8 +43,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status OmFileLoadHelper::Init(c | |||
| return ret; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status OmFileLoadHelper::Init(uint8_t *model_data, | |||
| const uint32_t model_data_size) { | |||
| Status OmFileLoadHelper::Init(uint8_t *model_data, const uint32_t model_data_size) { | |||
| Status status = LoadModelPartitionTable(model_data, model_data_size); | |||
| if (status != SUCCESS) { | |||
| return status; | |||
| @@ -52,9 +52,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status OmFileLoadHelper::Init(u | |||
| return SUCCESS; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status OmFileLoadHelper::Init(uint8_t *model_data, | |||
| uint32_t model_data_size, | |||
| uint32_t model_num) { | |||
| Status OmFileLoadHelper::Init(uint8_t *model_data, uint32_t model_data_size, uint32_t model_num) { | |||
| Status status = LoadModelPartitionTable(model_data, model_data_size, model_num); | |||
| if (status != SUCCESS) { | |||
| return status; | |||
| @@ -64,8 +62,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status OmFileLoadHelper::Init(u | |||
| } | |||
| // Use both | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status OmFileLoadHelper::GetModelPartition(ModelPartitionType type, | |||
| ModelPartition &partition) { | |||
| Status OmFileLoadHelper::GetModelPartition(ModelPartitionType type, ModelPartition &partition) { | |||
| if (!is_inited_) { | |||
| GELOGE(PARAM_INVALID, "OmFileLoadHelper has not been initialized!"); | |||
| return PARAM_INVALID; | |||
| @@ -90,9 +87,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status OmFileLoadHelper::GetMod | |||
| return SUCCESS; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status OmFileLoadHelper::GetModelPartition(ModelPartitionType type, | |||
| ModelPartition &partition, | |||
| size_t model_index) { | |||
| Status OmFileLoadHelper::GetModelPartition(ModelPartitionType type, ModelPartition &partition, size_t model_index) { | |||
| if (!is_inited_) { | |||
| GELOGE(PARAM_INVALID, "OmFileLoadHelper has not been initialized!"); | |||
| return PARAM_INVALID; | |||
| @@ -248,12 +243,11 @@ Status OmFileLoadHelper::LoadModelPartitionTable(uint8_t *model_data, uint32_t m | |||
| return SUCCESS; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY const std::vector<ModelPartition> | |||
| &OmFileSaveHelper::GetModelPartitions() const { | |||
| const std::vector<ModelPartition> &OmFileSaveHelper::GetModelPartitions() const { | |||
| return context_.partition_datas_; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY ModelPartitionTable *OmFileSaveHelper::GetPartitionTable() { | |||
| ModelPartitionTable *OmFileSaveHelper::GetPartitionTable() { | |||
| auto partition_size = static_cast<uint32_t>(context_.partition_datas_.size()); | |||
| // Build ModelPartitionTable, flex array | |||
| context_.partition_table_.clear(); | |||
| @@ -272,8 +266,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY ModelPartitionTable *OmFileSave | |||
| return partition_table; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY ModelPartitionTable *OmFileSaveHelper::GetPartitionTable( | |||
| size_t cur_ctx_index) { | |||
| ModelPartitionTable *OmFileSaveHelper::GetPartitionTable(size_t cur_ctx_index) { | |||
| auto &cur_ctx = model_contexts_[cur_ctx_index]; | |||
| auto partition_size = static_cast<uint32_t>(cur_ctx.partition_datas_.size()); | |||
| // Build ModelPartitionTable, flex array | |||
| @@ -293,8 +286,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY ModelPartitionTable *OmFileSave | |||
| return partition_table; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status OmFileSaveHelper::AddPartition(ModelPartition &partition) { | |||
| Status OmFileSaveHelper::AddPartition(ModelPartition &partition) { | |||
| if (ge::CheckUint32AddOverflow(context_.model_data_len_, partition.size) != SUCCESS) { | |||
| GELOGE(FAILED, "UINT32 %u and %u addition can result in overflow!", context_.model_data_len_, partition.size); | |||
| return FAILED; | |||
| @@ -379,8 +371,8 @@ Status OmFileSaveHelper::SaveModelToFile(const char *output_file, ModelBufferDat | |||
| #endif | |||
| } | |||
| Status OmFileSaveHelper::SaveRootModel(const SaveParam &save_param, const char *output_file, | |||
| ModelBufferData &model, bool is_offline) { | |||
| Status OmFileSaveHelper::SaveRootModel(const SaveParam &save_param, const char *output_file, ModelBufferData &model, | |||
| bool is_offline) { | |||
| (void)save_param.cert_file; | |||
| (void)save_param.ek_file; | |||
| (void)save_param.encode_mode; | |||
| @@ -409,8 +401,8 @@ Status OmFileSaveHelper::SaveRootModel(const SaveParam &save_param, const char * | |||
| model_header_.length += size_of_table + cur_model_data_len; | |||
| model_partition_tabels.push_back(tmp_table); | |||
| all_model_partitions.push_back(cur_ctx.partition_datas_); | |||
| GELOGD("sizeof(ModelPartitionTable):%u, cur_model_data_len:%u, cur_context_index:%zu", | |||
| size_of_table, cur_model_data_len, ctx_index); | |||
| GELOGD("sizeof(ModelPartitionTable):%u, cur_model_data_len:%u, cur_context_index:%zu", size_of_table, | |||
| cur_model_data_len, ctx_index); | |||
| } | |||
| Status ret; | |||
| if (is_offline) { | |||
| @@ -48,7 +48,7 @@ struct KernelStoreItemHead { | |||
| uint32_t bin_len; | |||
| }; | |||
| class FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY KernelStore { | |||
| class KernelStore { | |||
| public: | |||
| KernelStore() = default; | |||
| virtual ~KernelStore() = default; | |||
| @@ -14,15 +14,14 @@ | |||
| * limitations under the License. | |||
| */ | |||
| #include "graph/common/local_context.h" | |||
| #include "common/local_context.h" | |||
| #include "common/ge_inner_error_codes.h" | |||
| #include "common/debug/ge_log.h" | |||
| #include "omg/omg_inner_types.h" | |||
| #include "framework/common/debug/ge_log.h" | |||
| namespace ge { | |||
| namespace { | |||
| thread_local OmgContext *omg_context = nullptr; | |||
| thread_local OmeContext *ome_context = nullptr; | |||
| } | |||
| void SetLocalOmgContext(OmgContext &context) { | |||
| @@ -37,4 +36,18 @@ OmgContext &GetLocalOmgContext() { | |||
| return domi::GetContext(); | |||
| } | |||
| } | |||
| void SetLocalOmeContext(OmeContext &context) { | |||
| ome_context = &context; | |||
| } | |||
| OmeContext &GetLocalOmeContext() { | |||
| if (ome_context != nullptr) { | |||
| return *ome_context; | |||
| } | |||
| GELOGW("ome_context is nullptr."); | |||
| static OmeContext context; | |||
| return context; | |||
| } | |||
| } | |||
| @@ -0,0 +1,43 @@ | |||
| /** | |||
| * Copyright 2020 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #ifndef GE_GRAPH_COMMON_LOCAL_CONTEXT_H_ | |||
| #define GE_GRAPH_COMMON_LOCAL_CONTEXT_H_ | |||
| #include "framework/omg/omg_inner_types.h" | |||
| namespace ge { | |||
| void SetLocalOmgContext(OmgContext &context); | |||
| OmgContext &GetLocalOmgContext(); | |||
| struct OmeContext { | |||
| bool need_multi_batch = false; | |||
| std::string dynamic_node_type; | |||
| std::vector<NodePtr> data_nodes; | |||
| std::vector<NodePtr> getnext_nosink_nodes; | |||
| std::vector<std::string> dynamic_shape_dims; | |||
| std::vector<std::pair<std::string, std::vector<int64_t>>> user_input_dims; | |||
| std::vector<std::vector<int64_t>> user_real_input_dims; | |||
| }; | |||
| GE_FUNC_VISIBILITY | |||
| void SetLocalOmeContext(OmeContext &context); | |||
| GE_FUNC_VISIBILITY | |||
| OmeContext &GetLocalOmeContext(); | |||
| } // namespace ge | |||
| #endif // GE_GRAPH_COMMON_LOCAL_CONTEXT_H_ | |||
| @@ -14,11 +14,11 @@ | |||
| * limitations under the License. | |||
| */ | |||
| #include "fp16_math.h" | |||
| #include "common/math/fp16_math.h" | |||
| #include "external/register/register_types.h" | |||
| namespace ge { | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY fp16_t sqrt(fp16_t fp) { | |||
| fp16_t sqrt(fp16_t fp) { | |||
| fp16_t ret; | |||
| // Convert half precision float number to double | |||
| double dVal = fp; | |||
| @@ -29,7 +29,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY fp16_t sqrt(fp16_t fp) { | |||
| return ret; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY fp16_t rsqrt(fp16_t fp) { | |||
| fp16_t rsqrt(fp16_t fp) { | |||
| fp16_t ret; | |||
| // Convert half precision float number to double | |||
| double dVal = fp; | |||
| @@ -40,7 +40,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY fp16_t rsqrt(fp16_t fp) { | |||
| return ret; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY fp16_t rcp(fp16_t fp) { | |||
| fp16_t rcp(fp16_t fp) { | |||
| fp16_t ret; | |||
| // Convert half precision float number to double | |||
| double dVal = fp; | |||
| @@ -51,7 +51,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY fp16_t rcp(fp16_t fp) { | |||
| return ret; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY fp16_t exp(fp16_t fp) { | |||
| fp16_t exp(fp16_t fp) { | |||
| fp16_t ret; | |||
| // Convert half precision float number to double | |||
| double dVal = fp; | |||
| @@ -63,7 +63,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY fp16_t exp(fp16_t fp) { | |||
| return ret; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY fp16_t pow2(fp16_t fp) { | |||
| fp16_t pow2(fp16_t fp) { | |||
| fp16_t ret; | |||
| // Convert half precision float number to double | |||
| double dVal = fp; | |||
| @@ -75,7 +75,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY fp16_t pow2(fp16_t fp) { | |||
| return ret; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY fp16_t pow10(fp16_t fp) { | |||
| fp16_t pow10(fp16_t fp) { | |||
| fp16_t ret; | |||
| // Convert half precision float number to double | |||
| double dVal = fp; | |||
| @@ -87,7 +87,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY fp16_t pow10(fp16_t fp) { | |||
| return ret; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY fp16_t ln(fp16_t fp) { | |||
| fp16_t ln(fp16_t fp) { | |||
| fp16_t ret; | |||
| // Convert half precision float number to double | |||
| double dVal = fp; | |||
| @@ -99,7 +99,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY fp16_t ln(fp16_t fp) { | |||
| return ret; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY fp16_t log2(fp16_t fp) { | |||
| fp16_t log2(fp16_t fp) { | |||
| fp16_t ret; | |||
| // Convert half precision float number to double | |||
| double dVal = fp; | |||
| @@ -111,7 +111,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY fp16_t log2(fp16_t fp) { | |||
| return ret; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY fp16_t log10(fp16_t fp) { | |||
| fp16_t log10(fp16_t fp) { | |||
| fp16_t ret; | |||
| // Convert half precision float number to double | |||
| double dVal = fp; | |||
| @@ -123,7 +123,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY fp16_t log10(fp16_t fp) { | |||
| return ret; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY fp16_t cos(fp16_t fp) { | |||
| fp16_t cos(fp16_t fp) { | |||
| fp16_t ret; | |||
| // Convert half precision float number to double | |||
| double dVal = fp; | |||
| @@ -135,7 +135,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY fp16_t cos(fp16_t fp) { | |||
| return ret; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY fp16_t sin(fp16_t fp) { | |||
| fp16_t sin(fp16_t fp) { | |||
| fp16_t ret; | |||
| // Convert half precision float number to double | |||
| double dVal = fp; | |||
| @@ -147,13 +147,13 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY fp16_t sin(fp16_t fp) { | |||
| return ret; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY fp16_t abs(fp16_t fp) { | |||
| fp16_t abs(fp16_t fp) { | |||
| fp16_t ret; | |||
| ret.val = (fp.val & kFp16AbsMax); | |||
| return ret; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY fp16_t max(fp16_t fp1, fp16_t fp2) { | |||
| fp16_t max(fp16_t fp1, fp16_t fp2) { | |||
| if (fp1 >= fp2) { | |||
| return fp1; | |||
| } else { | |||
| @@ -161,7 +161,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY fp16_t max(fp16_t fp1, fp16_t f | |||
| } | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY fp16_t min(fp16_t fp1, fp16_t fp2) { | |||
| fp16_t min(fp16_t fp1, fp16_t fp2) { | |||
| if (fp1 <= fp2) { | |||
| return fp1; | |||
| } else { | |||
| @@ -14,9 +14,9 @@ | |||
| * limitations under the License. | |||
| */ | |||
| #include "model/ge_model.h" | |||
| #include "common/model/ge_model.h" | |||
| #include <utility> | |||
| #include "common/debug/log.h" | |||
| #include "framework/common/debug/log.h" | |||
| #include "graph/debug/ge_attr_define.h" | |||
| #include "graph/utils/attr_utils.h" | |||
| @@ -26,12 +26,12 @@ | |||
| #include "framework/common/debug/log.h" | |||
| #include "framework/common/fmk_error_codes.h" | |||
| #include "graph/buffer.h" | |||
| #include "graph/graph.h" | |||
| #include "external/graph/graph.h" | |||
| #include "proto/task.pb.h" | |||
| namespace ge { | |||
| const uint32_t INVALID_MODEL_ID = 0xFFFFFFFFUL; | |||
| class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY GeModel : public AttrHolder { | |||
| class GeModel : public AttrHolder { | |||
| public: | |||
| GeModel(); | |||
| ~GeModel() = default; | |||
| @@ -82,13 +82,13 @@ class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY GeModel : public AttrHolder | |||
| private: | |||
| void Init(); | |||
| ProtoAttrMapHelper attrs_; | |||
| ProtoAttrMapHelper attrs_; /*lint !e148*/ | |||
| Graph graph_; | |||
| std::shared_ptr<domi::ModelTaskDef> task_; | |||
| TBEKernelStore tbe_kernal_store_; | |||
| CustAICPUKernelStore cust_aicpu_kernal_store_; | |||
| Buffer weights_buffer_; | |||
| std::shared_ptr<domi::ModelTaskDef> task_; /*lint !e148*/ | |||
| TBEKernelStore tbe_kernal_store_; /*lint !e148*/ | |||
| CustAICPUKernelStore cust_aicpu_kernal_store_; /*lint !e148*/ | |||
| Buffer weights_buffer_; /*lint !e148*/ | |||
| std::string name_; | |||
| uint32_t version_ = {0}; | |||
| @@ -14,8 +14,9 @@ | |||
| * limitations under the License. | |||
| */ | |||
| #include "ge_root_model.h" | |||
| #include "common/model/ge_root_model.h" | |||
| #include "graph/debug/ge_attr_define.h" | |||
| namespace ge { | |||
| void GeRootModel::SetSubgraphInstanceNameToModel(string instance_name, GeModelPtr ge_model) { | |||
| subgraph_instance_name_to_model_.insert(std::pair<string, GeModelPtr>(instance_name, ge_model)); | |||
| @@ -15,7 +15,7 @@ | |||
| */ | |||
| #include <map> | |||
| #include "graph/compute_graph.h" | |||
| #include "model/ge_model.h" | |||
| #include "common/model/ge_model.h" | |||
| #ifndef GE_MODEL_GE_ROOT_MODEL_H_ | |||
| #define GE_MODEL_GE_ROOT_MODEL_H_ | |||
| @@ -20,15 +20,13 @@ | |||
| #include <string> | |||
| #include "securec.h" | |||
| #include "common/helper/model_helper.h" | |||
| #include "framework/common/helper/model_helper.h" | |||
| namespace ge { | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY ModelParserBase::ModelParserBase() {} | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY ModelParserBase::~ModelParserBase() {} | |||
| ModelParserBase::ModelParserBase() {} | |||
| ModelParserBase::~ModelParserBase() {} | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ModelParserBase::LoadFromFile(const char *model_path, | |||
| int32_t priority, | |||
| ge::ModelData &model_data) { | |||
| Status ModelParserBase::LoadFromFile(const char *model_path, int32_t priority, ge::ModelData &model_data) { | |||
| std::string real_path = RealPath(model_path); | |||
| if (real_path.empty()) { | |||
| GELOGE(ACL_ERROR_GE_EXEC_MODEL_PATH_INVALID, "[Check][Param]Model file path %s is invalid", | |||
| @@ -81,9 +79,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ModelParserBase::LoadFro | |||
| return SUCCESS; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ModelParserBase::ParseModelContent(const ge::ModelData &model, | |||
| uint8_t *&model_data, | |||
| uint32_t &model_len) { | |||
| Status ModelParserBase::ParseModelContent(const ge::ModelData &model, uint8_t *&model_data, uint32_t &model_len) { | |||
| // Parameter validity check | |||
| GE_CHECK_NOTNULL(model.model_data); | |||
| @@ -29,8 +29,7 @@ | |||
| namespace ge { | |||
| const uint32_t kInteval = 2; | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ModelSaver::SaveJsonToFile(const char *file_path, | |||
| const Json &model) { | |||
| Status ModelSaver::SaveJsonToFile(const char *file_path, const Json &model) { | |||
| Status ret = SUCCESS; | |||
| if (file_path == nullptr || SUCCESS != CheckPath(file_path)) { | |||
| GELOGE(FAILED, "[Check][OutputFile]Failed, file %s", file_path); | |||
| @@ -14,7 +14,7 @@ | |||
| * limitations under the License. | |||
| */ | |||
| #include "graph/common/omg_util.h" | |||
| #include "common/omg_util.h" | |||
| #include "graph/debug/ge_attr_define.h" | |||
| #include "graph/utils/graph_utils.h" | |||
| @@ -59,8 +59,8 @@ Status SetStreamLabel(const ge::NodePtr &node, const std::string &label) { | |||
| if (!AttrUtils::SetStr(tmp_desc, ge::ATTR_NAME_STREAM_LABEL, label)) { | |||
| REPORT_INNER_ERROR("E19999", "Set Attr:%s fail for op:%s(%s)", ATTR_NAME_STREAM_LABEL.c_str(), | |||
| node->GetName().c_str(), node->GetType().c_str()); | |||
| GELOGE(FAILED, "[Set][Attr] %s fail for op:%s(%s)", ATTR_NAME_STREAM_LABEL.c_str(), | |||
| node->GetName().c_str(), node->GetType().c_str()); | |||
| GELOGE(FAILED, "[Set][Attr] %s fail for op:%s(%s)", ATTR_NAME_STREAM_LABEL.c_str(), node->GetName().c_str(), | |||
| node->GetType().c_str()); | |||
| return FAILED; | |||
| } | |||
| @@ -100,8 +100,8 @@ Status SetActiveLabelList(const ge::NodePtr &node, const std::vector<std::string | |||
| if (!AttrUtils::SetListStr(tmp_desc, ge::ATTR_NAME_ACTIVE_LABEL_LIST, active_label_list)) { | |||
| REPORT_INNER_ERROR("E19999", "Set Attr:%s fail for op:%s(%s)", ATTR_NAME_ACTIVE_LABEL_LIST.c_str(), | |||
| node->GetName().c_str(), node->GetType().c_str()); | |||
| GELOGE(FAILED, "[Set][Attr] %s fail for op:%s(%s)", ATTR_NAME_ACTIVE_LABEL_LIST.c_str(), | |||
| node->GetName().c_str(), node->GetType().c_str()); | |||
| GELOGE(FAILED, "[Set][Attr] %s fail for op:%s(%s)", ATTR_NAME_ACTIVE_LABEL_LIST.c_str(), node->GetName().c_str(), | |||
| node->GetType().c_str()); | |||
| return FAILED; | |||
| } | |||
| @@ -163,8 +163,8 @@ Status SetOriginalNodeName(const ge::NodePtr &node, const std::string &orig_name | |||
| if (!AttrUtils::SetStr(tmp_desc, ge::ATTR_NAME_ORIG_NODE_NAME, orig_name)) { | |||
| REPORT_INNER_ERROR("E19999", "Set Attr:%s fail for op:%s(%s)", ATTR_NAME_ORIG_NODE_NAME.c_str(), | |||
| node->GetName().c_str(), node->GetType().c_str()); | |||
| GELOGE(FAILED, "[Set][Attr] %s fail for op:%s(%s)", ATTR_NAME_ORIG_NODE_NAME.c_str(), | |||
| node->GetName().c_str(), node->GetType().c_str()); | |||
| GELOGE(FAILED, "[Set][Attr] %s fail for op:%s(%s)", ATTR_NAME_ORIG_NODE_NAME.c_str(), node->GetName().c_str(), | |||
| node->GetType().c_str()); | |||
| return FAILED; | |||
| } | |||
| @@ -207,8 +207,8 @@ Status SetNextIteration(const NodePtr &node, const NodePtr &next) { | |||
| if (!AttrUtils::SetStr(op_desc, ATTR_NAME_NEXT_ITERATION, name)) { | |||
| REPORT_INNER_ERROR("E19999", "Set Attr:%s fail for op:%s(%s)", ATTR_NAME_NEXT_ITERATION.c_str(), | |||
| op_desc->GetName().c_str(), op_desc->GetType().c_str()); | |||
| GELOGE(FAILED, "[Set][Attr] %s fail for op:%s(%s)", ATTR_NAME_NEXT_ITERATION.c_str(), | |||
| op_desc->GetName().c_str(), op_desc->GetType().c_str()); | |||
| GELOGE(FAILED, "[Set][Attr] %s fail for op:%s(%s)", ATTR_NAME_NEXT_ITERATION.c_str(), op_desc->GetName().c_str(), | |||
| op_desc->GetType().c_str()); | |||
| return FAILED; | |||
| } | |||
| return SUCCESS; | |||
| @@ -274,21 +274,6 @@ bool IsUnknownShapeTensor(const GeTensorDesc &tensor_desc) { | |||
| return false; | |||
| } | |||
| /// | |||
| /// @brief Set Op _force_unknown_shape flag | |||
| /// @param [in] node | |||
| /// @param [in] force_unknown, set attribute if true | |||
| /// @param [in] group_index, condition group index of node. | |||
| /// @return | |||
| /// | |||
| void MarkForceUnknownShape(const NodePtr &node, bool force_unknown, int64_t group_index) { | |||
| if (!force_unknown) { | |||
| return; | |||
| } | |||
| SetControlFlowGroup(node, group_index); | |||
| } | |||
| /// | |||
| /// @brief Set Op _control_flow_group flag | |||
| /// @param [in] node | |||
| @@ -305,8 +290,8 @@ void SetControlFlowGroup(const NodePtr &node, int64_t group) { | |||
| if (!AttrUtils::SetInt(op_desc, ATTR_NAME_CONTROL_FLOW_GROUP, group)) { | |||
| REPORT_INNER_ERROR("E19999", "Set Attr:%s fail for op:%s(%s)", ATTR_NAME_CONTROL_FLOW_GROUP.c_str(), | |||
| node->GetName().c_str(), node->GetType().c_str()); | |||
| GELOGE(FAILED, "[Set][Attr] %s fail for op:%s(%s)", ATTR_NAME_CONTROL_FLOW_GROUP.c_str(), | |||
| node->GetName().c_str(), node->GetType().c_str()); | |||
| GELOGE(FAILED, "[Set][Attr] %s fail for op:%s(%s)", ATTR_NAME_CONTROL_FLOW_GROUP.c_str(), node->GetName().c_str(), | |||
| node->GetType().c_str()); | |||
| } | |||
| } | |||
| } // namespace ge | |||
| @@ -22,16 +22,15 @@ | |||
| #include <utility> | |||
| #include <vector> | |||
| #include "common/types.h" | |||
| #include "common/util.h" | |||
| #include "framework/common/types.h" | |||
| #include "framework/common/util.h" | |||
| #include "graph/node.h" | |||
| namespace ge { | |||
| namespace { | |||
| const int64_t kBufferPoolMemAlignSize = 512; | |||
| const uint32_t kBufferPoolNodeOutIndex = 0; | |||
| const uint32_t kEventReuseThreshold = 65500; | |||
| } // namespace | |||
| static constexpr int64_t kBufferPoolMemAlignSize = 512; | |||
| static constexpr uint32_t kBufferPoolNodeOutIndex = 0; | |||
| static constexpr uint32_t kEventReuseThreshold = 65500; | |||
| /// | |||
| /// @brief get the Original Type of FrameworkOp | |||
| /// @param [in] node | |||
| @@ -125,15 +124,6 @@ Status GetMemorySize(const NodePtr &node, int64_t &output_size); | |||
| /// | |||
| bool IsUnknownShapeTensor(const GeTensorDesc &tensor_desc); | |||
| /// | |||
| /// @brief Set Op _force_unknown_shape flag | |||
| /// @param [in] node | |||
| /// @param [in] force_unknown, set attribute if true | |||
| /// @param [in] group_index, condition group index of node. | |||
| /// @return | |||
| /// | |||
| void MarkForceUnknownShape(const NodePtr &node, bool force_unknown, int64_t group_index); | |||
| /// | |||
| /// @brief Set Op _control_flow_group flag | |||
| /// @param [in] node | |||
| @@ -17,7 +17,7 @@ | |||
| #include "framework/common/op/attr_value_util.h" | |||
| #include "framework/common/debug/log.h" | |||
| #include "framework/common/util.h" | |||
| #include "register/register_types.h" | |||
| #include "external/register/register_types.h" | |||
| namespace ge { | |||
| #define DEFINE_SET_ATTR_VALUE_ONE(ARG_TYPE, FIELD) \ | |||
| @@ -77,37 +77,33 @@ DEFINE_SET_ATTR_VALUE_LIST(const std::string &, s); | |||
| } \ | |||
| } while (0); | |||
| #define DEFINE_ADD_ATTR_VALUE(KEY_TYPE, VALUE_TYPE) \ | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void AddOpAttr(KEY_TYPE map_key, VALUE_TYPE value, OpDef *op_def) { \ | |||
| GE_CHECK_NOTNULL_JUST_RETURN(op_def); \ | |||
| auto attr = op_def->mutable_attr(); \ | |||
| ADD_TO_ATTR_MAP(map_key, value, attr) \ | |||
| } \ | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void AddOpAttr(KEY_TYPE map_key, VALUE_TYPE value, \ | |||
| AttrDefMap *attr_map) { \ | |||
| ADD_TO_ATTR_MAP(map_key, value, attr_map) \ | |||
| } \ | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void AddModelAttr(KEY_TYPE map_key, VALUE_TYPE value, \ | |||
| ModelDef *model_def) { \ | |||
| GE_CHECK_NOTNULL_JUST_RETURN(model_def); \ | |||
| auto attr = model_def->mutable_attr(); \ | |||
| ADD_TO_ATTR_MAP(map_key, value, attr) \ | |||
| #define DEFINE_ADD_ATTR_VALUE(KEY_TYPE, VALUE_TYPE) \ | |||
| void AddOpAttr(KEY_TYPE map_key, VALUE_TYPE value, OpDef *op_def) { \ | |||
| GE_CHECK_NOTNULL_JUST_RETURN(op_def); \ | |||
| auto attr = op_def->mutable_attr(); \ | |||
| ADD_TO_ATTR_MAP(map_key, value, attr) \ | |||
| } \ | |||
| void AddOpAttr(KEY_TYPE map_key, VALUE_TYPE value, AttrDefMap *attr_map) { \ | |||
| ADD_TO_ATTR_MAP(map_key, value, attr_map) \ | |||
| } \ | |||
| void AddModelAttr(KEY_TYPE map_key, VALUE_TYPE value, ModelDef *model_def) { \ | |||
| GE_CHECK_NOTNULL_JUST_RETURN(model_def); \ | |||
| auto attr = model_def->mutable_attr(); \ | |||
| ADD_TO_ATTR_MAP(map_key, value, attr) \ | |||
| } | |||
| #define DEFINE_ADD_ATTR_VALUE_LIST(KEY_TYPE, VALUE_TYPE) \ | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void AddOpAttrList(KEY_TYPE map_key, VALUE_TYPE value, \ | |||
| OpDef *op_def) { \ | |||
| GE_CHECK_NOTNULL_JUST_RETURN(op_def); \ | |||
| auto attr = op_def->mutable_attr(); \ | |||
| ADD_TO_ATTR_MAP_LIST(map_key, value, attr) \ | |||
| } \ | |||
| FMK_FUNC_DEV_VISIBILITY void AddOpAttrList(KEY_TYPE map_key, VALUE_TYPE value, AttrDefMap *attr_map) { \ | |||
| ADD_TO_ATTR_MAP_LIST(map_key, value, attr_map) \ | |||
| } \ | |||
| FMK_FUNC_DEV_VISIBILITY void AddModelAttrList(KEY_TYPE map_key, VALUE_TYPE value, ModelDef *model_def) { \ | |||
| GE_CHECK_NOTNULL_JUST_RETURN(model_def); \ | |||
| auto attr = model_def->mutable_attr(); \ | |||
| ADD_TO_ATTR_MAP_LIST(map_key, value, attr) \ | |||
| #define DEFINE_ADD_ATTR_VALUE_LIST(KEY_TYPE, VALUE_TYPE) \ | |||
| void AddOpAttrList(KEY_TYPE map_key, VALUE_TYPE value, OpDef *op_def) { \ | |||
| GE_CHECK_NOTNULL_JUST_RETURN(op_def); \ | |||
| auto attr = op_def->mutable_attr(); \ | |||
| ADD_TO_ATTR_MAP_LIST(map_key, value, attr) \ | |||
| } \ | |||
| void AddOpAttrList(KEY_TYPE map_key, VALUE_TYPE value, AttrDefMap *attr_map) { \ | |||
| ADD_TO_ATTR_MAP_LIST(map_key, value, attr_map)} FMK_FUNC_DEV_VISIBILITY void \ | |||
| AddModelAttrList(KEY_TYPE map_key, VALUE_TYPE value, ModelDef *model_def) { \ | |||
| GE_CHECK_NOTNULL_JUST_RETURN(model_def); \ | |||
| auto attr = model_def->mutable_attr(); \ | |||
| ADD_TO_ATTR_MAP_LIST(map_key, value, attr) \ | |||
| } | |||
| DEFINE_ADD_ATTR_VALUE(const std::string &, const std::string &); | |||
| @@ -127,46 +123,42 @@ DEFINE_ADD_ATTR_VALUE_LIST(const std::string &, const bool); | |||
| DEFINE_ADD_ATTR_VALUE_LIST(const std::string &, const int64_t); | |||
| DEFINE_ADD_ATTR_VALUE_LIST(const std::string &, const std::string &); | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void AddOpAttr(const std::string &map_key, AttrDef &attr, | |||
| OpDef *op_def) { | |||
| void AddOpAttr(const std::string &map_key, AttrDef &attr, OpDef *op_def) { | |||
| GE_CHECK_NOTNULL_JUST_RETURN(op_def); | |||
| GE_CHECK_NOTNULL_JUST_RETURN(op_def->mutable_attr()); | |||
| (void)op_def->mutable_attr()->insert(AttrDefPair(map_key, attr)); | |||
| } | |||
| #define DEFINE_GET_ATTR_VALUE(ARG_TYPE_KEY, ARG_TYPE_VALUE, FIELD) \ | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY bool GetAttrDefValue(ARG_TYPE_KEY map_key, ARG_TYPE_VALUE value, \ | |||
| const AttrDefMap &attr) { \ | |||
| auto it = attr.find(map_key); \ | |||
| if (it != attr.end()) { \ | |||
| *value = it->second.FIELD(); \ | |||
| return true; \ | |||
| } \ | |||
| return false; \ | |||
| #define DEFINE_GET_ATTR_VALUE(ARG_TYPE_KEY, ARG_TYPE_VALUE, FIELD) \ | |||
| bool GetAttrDefValue(ARG_TYPE_KEY map_key, ARG_TYPE_VALUE value, const AttrDefMap &attr) { \ | |||
| auto it = attr.find(map_key); \ | |||
| if (it != attr.end()) { \ | |||
| *value = it->second.FIELD(); \ | |||
| return true; \ | |||
| } \ | |||
| return false; \ | |||
| } | |||
| #define DEFINE_GET_ATTR_POINT_REF(ARG_TYPE_KEY, ARG_TYPE_VALUE, FIELD) \ | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY bool GetAttrDefValue(ARG_TYPE_KEY map_key, ARG_TYPE_VALUE *&value, \ | |||
| AttrDefMap *attr) { \ | |||
| GE_RT_FALSE_CHECK_NOTNULL(attr); \ | |||
| auto it = attr->find(map_key); \ | |||
| if (it != attr->end()) { \ | |||
| value = it->second.mutable_##FIELD(); \ | |||
| return true; \ | |||
| } \ | |||
| return false; \ | |||
| #define DEFINE_GET_ATTR_POINT_REF(ARG_TYPE_KEY, ARG_TYPE_VALUE, FIELD) \ | |||
| bool GetAttrDefValue(ARG_TYPE_KEY map_key, ARG_TYPE_VALUE *&value, AttrDefMap *attr) { \ | |||
| GE_RT_FALSE_CHECK_NOTNULL(attr); \ | |||
| auto it = attr->find(map_key); \ | |||
| if (it != attr->end()) { \ | |||
| value = it->second.mutable_##FIELD(); \ | |||
| return true; \ | |||
| } \ | |||
| return false; \ | |||
| } | |||
| #define DEFINE_GET_ATTR_CONST_POINT_REF(ARG_TYPE_KEY, ARG_TYPE_VALUE, FIELD) \ | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY bool GetAttrDefValue( \ | |||
| ARG_TYPE_KEY map_key, const ARG_TYPE_VALUE *&value, const AttrDefMap &attr) { \ | |||
| auto it = attr.find(map_key); \ | |||
| if (it == attr.end()) { \ | |||
| return false; \ | |||
| } \ | |||
| \ | |||
| value = &(it->second.FIELD()); \ | |||
| return true; \ | |||
| #define DEFINE_GET_ATTR_CONST_POINT_REF(ARG_TYPE_KEY, ARG_TYPE_VALUE, FIELD) \ | |||
| bool GetAttrDefValue(ARG_TYPE_KEY map_key, const ARG_TYPE_VALUE *&value, const AttrDefMap &attr) { \ | |||
| auto it = attr.find(map_key); \ | |||
| if (it == attr.end()) { \ | |||
| return false; \ | |||
| } \ | |||
| \ | |||
| value = &(it->second.FIELD()); \ | |||
| return true; \ | |||
| } | |||
| #define DEFINE_GET_BYTES_ATTR_VALUE(ARG_TYPE_KEY, ARG_TYPE_VALUE) \ | |||
| @@ -216,16 +208,14 @@ DEFINE_GET_ATTR_CONST_POINT_REF(const std::string &, NamedAttrs, func); | |||
| DEFINE_GET_BYTES_ATTR_VALUE(const std::string &, std::string *); | |||
| #define DEFINE_GET_OP_ATTR(ARG_TYPE_KEY, ARG_TYPE_VALUE) \ | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY bool GetOpAttr(ARG_TYPE_KEY map_key, ARG_TYPE_VALUE value, \ | |||
| const OpDef *op_def) { \ | |||
| GE_RT_FALSE_CHECK_NOTNULL(op_def); \ | |||
| return GetAttrDefValue(map_key, value, op_def->attr()); \ | |||
| } \ | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY bool GetModelAttr(ARG_TYPE_KEY map_key, ARG_TYPE_VALUE value, \ | |||
| const ModelDef *model_def) { \ | |||
| GE_RT_FALSE_CHECK_NOTNULL(model_def); \ | |||
| return GetAttrDefValue(map_key, value, model_def->attr()); \ | |||
| #define DEFINE_GET_OP_ATTR(ARG_TYPE_KEY, ARG_TYPE_VALUE) \ | |||
| bool GetOpAttr(ARG_TYPE_KEY map_key, ARG_TYPE_VALUE value, const OpDef *op_def) { \ | |||
| GE_RT_FALSE_CHECK_NOTNULL(op_def); \ | |||
| return GetAttrDefValue(map_key, value, op_def->attr()); \ | |||
| } \ | |||
| bool GetModelAttr(ARG_TYPE_KEY map_key, ARG_TYPE_VALUE value, const ModelDef *model_def) { \ | |||
| GE_RT_FALSE_CHECK_NOTNULL(model_def); \ | |||
| return GetAttrDefValue(map_key, value, model_def->attr()); \ | |||
| } | |||
| DEFINE_GET_OP_ATTR(const std::string &, std::string *); | |||
| @@ -238,8 +228,7 @@ DEFINE_GET_OP_ATTR(const std::string &, bool *); | |||
| DEFINE_GET_OP_ATTR(const std::string &, AttrDef_ListValue *); | |||
| #define DEFINE_GET_BT_ATTR(ARG_TYPE_KEY, ARG_TYPE_VALUE) \ | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY bool GetBytesAttr(ARG_TYPE_KEY key, ARG_TYPE_VALUE value, \ | |||
| const OpDef *op_def) { \ | |||
| bool GetBytesAttr(ARG_TYPE_KEY key, ARG_TYPE_VALUE value, const OpDef *op_def) { \ | |||
| GE_RT_FALSE_CHECK_NOTNULL(op_def); \ | |||
| return GetBytesValue(key, value, op_def->attr()); \ | |||
| } \ | |||
| @@ -250,7 +239,7 @@ DEFINE_GET_OP_ATTR(const std::string &, AttrDef_ListValue *); | |||
| DEFINE_GET_BT_ATTR(const std::string &, std::string *); | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY bool HasOpAttr(const OpDef *op_def, const std::string &attr_name) { | |||
| bool HasOpAttr(const OpDef *op_def, const std::string &attr_name) { | |||
| if (op_def == nullptr) { | |||
| return false; | |||
| } | |||
| @@ -263,8 +252,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY bool HasOpAttr(const OpDef *op_ | |||
| return false; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void AddModelAttr(const std::string &map_key, const void *value, | |||
| size_t size, ModelDef *model_def) { | |||
| void AddModelAttr(const std::string &map_key, const void *value, size_t size, ModelDef *model_def) { | |||
| if (model_def == nullptr) { | |||
| return; | |||
| } | |||
| @@ -280,8 +268,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void AddModelAttr(const std::st | |||
| } | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void AddOpBytesAttr(const std::string &key, const void *value, | |||
| size_t size, OpDef *op_def) { | |||
| void AddOpBytesAttr(const std::string &key, const void *value, size_t size, OpDef *op_def) { | |||
| if (op_def == nullptr) { | |||
| return; | |||
| } | |||
| @@ -115,8 +115,7 @@ const int NORMAL_TENSOR_SIZE = 4; | |||
| #define AIPP_CONVERT_LIST_FLOAT(KEY, REQUIRED) AIPP_CONVERT_LIST_FORMAT(KEY, float, REQUIRED, GeAttrValue::FLOAT) | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status | |||
| OpUtils::ConvertAippParams(const GeAttrValue::NAMED_ATTRS &aipp_attr, domi::AippOpParams *aipp_params) { | |||
| Status OpUtils::ConvertAippParams(const GeAttrValue::NAMED_ATTRS &aipp_attr, domi::AippOpParams *aipp_params) { | |||
| GE_CHECK_NOTNULL(aipp_params); | |||
| AIPP_CONVERT_FORMAT_EX(aipp_mode, domi::AippOpParams::AippMode, int32_t, GeAttrValue::INT); | |||
| AIPP_CONVERT_INT(related_input_rank); | |||
| @@ -178,8 +177,7 @@ OpUtils::ConvertAippParams(const GeAttrValue::NAMED_ATTRS &aipp_attr, domi::Aipp | |||
| return SUCCESS; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status OpUtils::TransferDim(const std::vector<int64_t> &dim, | |||
| std::vector<int64_t> &dim_vector) { | |||
| Status OpUtils::TransferDim(const std::vector<int64_t> &dim, std::vector<int64_t> &dim_vector) { | |||
| size_t input_shape_size = dim.size(); | |||
| std::list<uint32_t> new_dim_list; | |||
| for (auto dim_temp : dim) { | |||
| @@ -301,9 +299,9 @@ Status OpUtils::SetOutputSliceDataByDataType(void *data, int64_t data_size, cons | |||
| return ret; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status OpUtils::SetOutputSliceData( | |||
| void *data, int64_t data_size, int32_t data_type, std::vector<int64_t> &input_dims, std::vector<int64_t> &begin, | |||
| std::vector<int64_t> &output_dims, GeTensor *output, std::vector<int64_t> &stride) { | |||
| Status OpUtils::SetOutputSliceData(void *data, int64_t data_size, int32_t data_type, std::vector<int64_t> &input_dims, | |||
| std::vector<int64_t> &begin, std::vector<int64_t> &output_dims, GeTensor *output, | |||
| std::vector<int64_t> &stride) { | |||
| if (data == nullptr || output == nullptr) { | |||
| GELOGE(PARAM_INVALID, "[Check][Param]Input param is nullptr"); | |||
| REPORT_INNER_ERROR("E19999", "Input param is nullptr"); | |||
| @@ -352,9 +350,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status OpUtils::SetOutputSliceD | |||
| return ret; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void OpUtils::TransDataHWCK2KCHW(const void *input, int64_t h, | |||
| int64_t w, int64_t c, int64_t k, | |||
| void **output) { | |||
| void OpUtils::TransDataHWCK2KCHW(const void *input, int64_t h, int64_t w, int64_t c, int64_t k, void **output) { | |||
| if (input == nullptr) { | |||
| return; | |||
| } | |||
| @@ -386,9 +382,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void OpUtils::TransDataHWCK2KCH | |||
| *output = buf; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void OpUtils::TransDataKCHW2HWCK(const void *input, int64_t k, | |||
| int64_t c, int64_t h, int64_t w, | |||
| void *output) { | |||
| void OpUtils::TransDataKCHW2HWCK(const void *input, int64_t k, int64_t c, int64_t h, int64_t w, void *output) { | |||
| if ((input == nullptr) || (output == nullptr)) { | |||
| GELOGD("%s[%d]: input param is nullptr.", __FILE__, __LINE__); | |||
| return; | |||
| @@ -417,31 +411,22 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void OpUtils::TransDataKCHW2HWC | |||
| vector<ConstGeTensorPtr> OpUtils::GetWeights(const ge::Node &node) { return OpDescUtils::GetWeights(node); } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY vector<ConstGeTensorPtr> OpUtils::GetWeights(ge::ConstNodePtr node) { | |||
| return OpDescUtils::GetWeights(node); | |||
| } | |||
| vector<ConstGeTensorPtr> OpUtils::GetWeights(ge::ConstNodePtr node) { return OpDescUtils::GetWeights(node); } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY vector<GeTensorPtr> OpUtils::MutableWeights(const ge::Node &node) { | |||
| return OpDescUtils::MutableWeights(node); | |||
| } | |||
| vector<GeTensorPtr> OpUtils::MutableWeights(const ge::Node &node) { return OpDescUtils::MutableWeights(node); } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY vector<GeTensorPtr> OpUtils::MutableWeights(const ge::NodePtr node) { | |||
| return OpDescUtils::MutableWeights(node); | |||
| } | |||
| vector<GeTensorPtr> OpUtils::MutableWeights(const ge::NodePtr node) { return OpDescUtils::MutableWeights(node); } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status OpUtils::SetWeights(ge::Node &node, | |||
| const vector<ge::GeTensorPtr> &weights) { | |||
| Status OpUtils::SetWeights(ge::Node &node, const vector<ge::GeTensorPtr> &weights) { | |||
| return OpDescUtils::SetWeights(node, weights); | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status OpUtils::SetWeights(ge::NodePtr node, | |||
| const vector<ge::GeTensorPtr> &weights) { | |||
| Status OpUtils::SetWeights(ge::NodePtr node, const vector<ge::GeTensorPtr> &weights) { | |||
| return OpDescUtils::SetWeights(node, weights); | |||
| } | |||
| // The caller guarantees that the input sensor is constant | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status | |||
| OpUtils::GetShapeDataFromConstTensor(const ConstGeTensorPtr &tensor, DataType type, std::vector<int64_t> &dims) { | |||
| Status OpUtils::GetShapeDataFromConstTensor(const ConstGeTensorPtr &tensor, DataType type, std::vector<int64_t> &dims) { | |||
| if (tensor == nullptr) { | |||
| GELOGE(PARAM_INVALID, "[Check][Param]Input tensor is nullptr"); | |||
| REPORT_INNER_ERROR("E19999", "Input tensor is nullptr"); | |||
| @@ -14,14 +14,17 @@ | |||
| * limitations under the License. | |||
| */ | |||
| #include "common/profiling/ge_profiling.h" | |||
| #include "framework/common/profiling/ge_profiling.h" | |||
| #include "runtime/base.h" | |||
| #include "common/profiling/profiling_manager.h" | |||
| #include "framework/common/debug/ge_log.h" | |||
| #include "framework/common/debug/log.h" | |||
| #include "graph/load/graph_loader.h" | |||
| #include "graph/ge_context.h" | |||
| #include "init/gelib.h" | |||
| #include "framework/common/ge_inner_error_codes.h" | |||
| #include "common/model/ge_model.h" | |||
| #include "framework/omg/omg_inner_types.h" | |||
| namespace { | |||
| const uint32_t kDeviceListIndex = 3; | |||
| @@ -34,6 +37,7 @@ const std::string kProfilingStop = "prof_stop"; | |||
| const std::string kProfModelSubscribe = "prof_model_subscribe"; | |||
| const std::string kProfModelUnsubscribe = "prof_model_cancel_subscribe"; | |||
| const std::string kRtSetDeviceRegName = "profiling"; | |||
| const std::string kPofilingModelId = "modelId"; | |||
| const std::map<ProfCommandHandleType, std::string> kProfCommandTypeMap = { | |||
| {kProfCommandhandleInit, kProfilingInit}, | |||
| @@ -42,6 +46,26 @@ const std::map<ProfCommandHandleType, std::string> kProfCommandTypeMap = { | |||
| {kProfCommandhandleFinalize, kProfilingFinalize}, | |||
| {kProfCommandhandleModelSubscribe, kProfModelSubscribe}, | |||
| {kProfCommandhandleModelUnsubscribe, kProfModelUnsubscribe}}; | |||
| const uint64_t kModelId = ge::INVALID_MODEL_ID; | |||
| const uint16_t kStepStart = 0; | |||
| const uint16_t kStepEnd = 1; | |||
| ge::Status NeedUnsubscribe(ProfCommandHandleType type, bool is_subscribe, | |||
| uint32_t graph_id, vector<string> &prof_params) { | |||
| if (type == kProfCommandhandleModelUnsubscribe && is_subscribe) { | |||
| prof_params.clear(); | |||
| prof_params.emplace_back(kPofilingModelId); | |||
| uint32_t model_id = 0; | |||
| auto ret = ge::ProfilingManager::Instance().GetModelIdFromGraph(graph_id, model_id); | |||
| if (ret != ge::SUCCESS) { | |||
| GELOGE(ret, "graph_id:%u not not found", graph_id); | |||
| return ret; | |||
| } | |||
| prof_params.emplace_back(std::to_string(model_id)); | |||
| } | |||
| return ge::SUCCESS; | |||
| } | |||
| } // namespace | |||
| bool TransProfConfigToParam(const ProfCommandHandleData &profCommand, vector<string> &prof_config_params) { | |||
| @@ -190,6 +214,24 @@ ge::Status ProfCommandHandle(ProfCommandHandleType type, void *data, uint32_t le | |||
| return ge::PARAM_INVALID; | |||
| } | |||
| } | |||
| auto &profiling_manager = ge::ProfilingManager::Instance(); | |||
| auto is_train = domi::GetContext().train_flag; | |||
| if (type == kProfCommandhandleModelSubscribe && is_train) { | |||
| profiling_manager.SetSubscribeInfo(prof_config_param->profSwitch, prof_config_param->modelId, true); | |||
| return ge::SUCCESS; | |||
| } | |||
| auto is_subscribe = profiling_manager.GetSubscribeInfo().is_subscribe; | |||
| // GraphId is actually stored in prof_config_param | |||
| auto graph_id = prof_config_param->modelId; | |||
| ge::Status ret = NeedUnsubscribe(type, is_subscribe, graph_id, prof_params); | |||
| if (ret != ge::SUCCESS) { | |||
| GELOGE(ret, "graph_id:%u not not found", graph_id); | |||
| REPORT_INPUT_ERROR("E10001", std::vector<std::string>({"value", "parameter", "reason"}), | |||
| std::vector<std::string>({std::to_string(graph_id), | |||
| "GraphToModelMap", | |||
| "graph_id does not exist!"})); | |||
| return ge::FAILED; | |||
| } | |||
| ge::GraphLoader graph_loader; | |||
| ge::Command command; | |||
| command.cmd_params.clear(); | |||
| @@ -203,7 +245,7 @@ ge::Status ProfCommandHandle(ProfCommandHandleType type, void *data, uint32_t le | |||
| if (type == kProfCommandhandleStart || type == kProfCommandhandleStop) { | |||
| GELOGI("Profiling device nums:%s , deviceID:[%s]", prof_params[0].c_str(), prof_params[kDeviceListIndex].c_str()); | |||
| } | |||
| ge::Status ret = graph_loader.CommandHandle(command); | |||
| ret = graph_loader.CommandHandle(command); | |||
| if (ret != ge::SUCCESS) { | |||
| GELOGE(ret, "[Handle][Command]Handle profiling command failed, command type %s, error_code %u", | |||
| iter->second.c_str(), ret); | |||
| @@ -216,6 +258,34 @@ ge::Status ProfCommandHandle(ProfCommandHandleType type, void *data, uint32_t le | |||
| return ge::SUCCESS; | |||
| } | |||
| GE_FUNC_VISIBILITY ge::Status ProfSetStepInfo(uint64_t index_id, uint16_t tag_id, rtStream_t stream) { | |||
| return ge::SUCCESS; | |||
| ge::Status ProfSetStepInfo(uint64_t index_id, uint16_t tag_id, rtStream_t stream) { | |||
| static bool is_first_run = true; | |||
| int32_t device_id = 0; | |||
| rtError_t rt_ret = rtGetDevice(&device_id); | |||
| if (rt_ret != RT_ERROR_NONE) { | |||
| GELOGE(rt_ret, "[Get][LogicDeviceId]Failed, ret 0x%X", rt_ret); | |||
| REPORT_CALL_ERROR("E19999", "Get logic device id failed, ret 0x%X", rt_ret); | |||
| return ge::FAILED; | |||
| } | |||
| auto &profiling_manager = ge::ProfilingManager::Instance(); | |||
| profiling_manager.SetStepInfoIndex(index_id); | |||
| if (is_first_run && tag_id == kStepStart) { | |||
| GE_CHK_STATUS_RET_NOLOG(profiling_manager.ProfileStepInfo(index_id, kModelId, tag_id, stream, device_id)); | |||
| is_first_run = false; | |||
| return ge::SUCCESS; | |||
| } | |||
| if (!is_first_run && tag_id == kStepEnd) { | |||
| GE_CHK_STATUS_RET_NOLOG(profiling_manager.ProfileStepInfo(index_id, kModelId, tag_id, stream, device_id)); | |||
| is_first_run = true; | |||
| return ge::SUCCESS; | |||
| } | |||
| GELOGE(ge::FAILED, "Param tag_id:%u invalid when is_first_run is %d", tag_id, is_first_run); | |||
| REPORT_INPUT_ERROR("E10001", std::vector<std::string>({"value", "parameter", "reason"}), | |||
| std::vector<std::string>({std::to_string(tag_id), "tag_id", | |||
| "tag id must be 0 when first run, must be 1 when second run"})); | |||
| return ge::FAILED; | |||
| } | |||
| ge::Status ProfGetDeviceFormGraphId(uint32_t graph_id, uint32_t &device_id) { | |||
| return ge::ProfilingManager::Instance().GetDeviceIdFromGraph(graph_id, device_id); | |||
| } | |||
| @@ -14,7 +14,7 @@ | |||
| * limitations under the License. | |||
| */ | |||
| #include "common/profiling/ge_runner_profiling.h" | |||
| #include "framework/common/profiling/ge_runner_profiling.h" | |||
| #include "init/gelib.h" | |||
| bool IsInitialize() { | |||
| @@ -21,7 +21,7 @@ | |||
| #include "framework/common/string_util.h" | |||
| #include "graph/ge_context.h" | |||
| #include "graph/utils/type_utils.h" | |||
| #include "graph/types.h" | |||
| #include "external/graph/types.h" | |||
| #include "runtime/base.h" | |||
| #include "graph/load/model_manager/davinci_model.h" | |||
| #include "mmpa/mmpa_api.h" | |||
| @@ -66,19 +66,23 @@ const std::string kIdx = "idx"; | |||
| namespace ge { | |||
| ProfilingManager::ProfilingManager() | |||
| : is_load_profiling_(false), is_execute_profiling_(false), is_training_trace_(false), subscribe_count_(0) { | |||
| prof_cb_.msprofCtrlCallback = nullptr; | |||
| prof_cb_.msprofReporterCallback = nullptr; | |||
| : is_load_profiling_(false), | |||
| is_execute_profiling_(false), | |||
| is_training_trace_(false), | |||
| subscribe_count_(0), | |||
| prof_cb_({nullptr, nullptr}), | |||
| index_id_(UINT64_MAX), | |||
| subscribe_info_({false, 0, 0}) { | |||
| } | |||
| ProfilingManager::~ProfilingManager() {} | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY ProfilingManager &ProfilingManager::Instance() { | |||
| ProfilingManager &ProfilingManager::Instance() { | |||
| static ProfilingManager profiling_manager; | |||
| return profiling_manager; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY ge::Status ProfilingManager::Init(const Options &options) { | |||
| ge::Status ProfilingManager::Init(const Options &options) { | |||
| #ifdef DAVINCI_SUPPORT_PROFILING | |||
| vector<int32_t>().swap(device_id_); | |||
| subscribe_count_ = 0; | |||
| @@ -217,7 +221,7 @@ ge::Status ProfilingManager::ParseOptions(const std::string &options) { | |||
| return ge::SUCCESS; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void ProfilingManager::StopProfiling() { | |||
| void ProfilingManager::StopProfiling() { | |||
| #ifdef DAVINCI_SUPPORT_PROFILING | |||
| uint64_t module = GetProfilingModule(); | |||
| // The following if case will not be executed in normal case, inc case of ProfStopProfiling is abnormal | |||
| @@ -255,8 +259,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void ProfilingManager::StopProf | |||
| #endif | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void ProfilingManager::ProfilingOpInputOutInfo( | |||
| const TaskDescInfo &task, Json &task_json) { | |||
| void ProfilingManager::ProfilingOpInputOutInfo(const TaskDescInfo &task, Json &task_json) { | |||
| #ifdef DAVINCI_SUPPORT_PROFILING | |||
| for (size_t i = 0; i < task.input_format.size(); i++) { | |||
| Json tmp_input; | |||
| @@ -282,8 +285,8 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void ProfilingManager::Profilin | |||
| #endif | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void ProfilingManager::ProfilingTaskDescInfo( | |||
| uint32_t model_id, const std::vector<TaskDescInfo> &task_desc_info, const int32_t &device_id) { | |||
| void ProfilingManager::ProfilingTaskDescInfo(uint32_t model_id, const std::vector<TaskDescInfo> &task_desc_info, | |||
| const int32_t &device_id) { | |||
| #ifdef DAVINCI_SUPPORT_PROFILING | |||
| for (const auto &task : task_desc_info) { | |||
| Json task_info; | |||
| @@ -320,8 +323,8 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void ProfilingManager::Profilin | |||
| #endif | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ProfilingManager::ProfileStepInfo( | |||
| uint64_t index_id, uint64_t model_id, uint16_t tag_id, rtStream_t stream, int32_t device_id) { | |||
| Status ProfilingManager::ProfileStepInfo(uint64_t index_id, uint64_t model_id, uint16_t tag_id, rtStream_t stream, | |||
| int32_t device_id) { | |||
| #ifdef DAVINCI_SUPPORT_PROFILING | |||
| if (!is_load_profiling_ && subscribe_count_ == 0) { | |||
| GELOGD("Profiling is not turned on, no need to profile step info."); | |||
| @@ -381,8 +384,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ProfilingManager::Profil | |||
| return SUCCESS; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void ProfilingManager::ReportData( | |||
| const int32_t &device_id, const string &data, const string &tag_name) { | |||
| void ProfilingManager::ReportData(const int32_t &device_id, const string &data, const string &tag_name) { | |||
| #ifdef DAVINCI_SUPPORT_PROFILING | |||
| ReporterData reporter_data{}; | |||
| int ret = -1; | |||
| @@ -422,8 +424,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void ProfilingManager::ReportDa | |||
| #endif | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void ProfilingManager::ReportProfilingData( | |||
| uint32_t model_id, const std::vector<TaskDescInfo> &task_desc_info) { | |||
| void ProfilingManager::ReportProfilingData(uint32_t model_id, const std::vector<TaskDescInfo> &task_desc_info) { | |||
| #ifdef DAVINCI_SUPPORT_PROFILING | |||
| int32_t logic_device_id = 0; | |||
| rtError_t rt_ret = rtGetDevice(&logic_device_id); | |||
| @@ -439,7 +440,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void ProfilingManager::ReportPr | |||
| #endif | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY uint64_t ProfilingManager::GetProfilingModule() { | |||
| uint64_t ProfilingManager::GetProfilingModule() { | |||
| uint64_t module = PROF_MODEL_EXECUTE_MASK | | |||
| PROF_RUNTIME_API_MASK | | |||
| PROF_RUNTIME_TRACE_MASK | | |||
| @@ -481,8 +482,7 @@ void ProfilingManager::UpdateSubscribeDeviceModuleMap(std::string prof_type, uin | |||
| #endif | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ProfilingManager::ProfModelSubscribe( | |||
| uint64_t module, void *model) { | |||
| Status ProfilingManager::ProfModelSubscribe(uint64_t module, void *model) { | |||
| #ifdef DAVINCI_SUPPORT_PROFILING | |||
| std::lock_guard<std::mutex> lock(mutex_); | |||
| uint64_t model_load_mask = module & PROF_MODEL_LOAD_MASK; | |||
| @@ -522,8 +522,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ProfilingManager::ProfMo | |||
| return SUCCESS; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ProfilingManager::ProfModelUnsubscribe( | |||
| void *model) { | |||
| Status ProfilingManager::ProfModelUnsubscribe(void *model) { | |||
| #ifdef DAVINCI_SUPPORT_PROFILING | |||
| std::lock_guard<std::mutex> lock(mutex_); | |||
| if (subscribe_count_ == 0) { | |||
| @@ -564,7 +563,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ProfilingManager::ProfMo | |||
| return SUCCESS; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ProfilingManager::ProfInit(uint64_t module) { | |||
| Status ProfilingManager::ProfInit(uint64_t module) { | |||
| #ifdef DAVINCI_SUPPORT_PROFILING | |||
| std::lock_guard<std::mutex> lock(mutex_); | |||
| uint64_t model_load_mask = module & PROF_MODEL_LOAD_MASK; | |||
| @@ -598,16 +597,19 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ProfilingManager::ProfIn | |||
| return SUCCESS; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ProfilingManager::ProfFinalize() { | |||
| Status ProfilingManager::ProfFinalize() { | |||
| #ifdef DAVINCI_SUPPORT_PROFILING | |||
| std::lock_guard<std::mutex> lock(mutex_); | |||
| is_load_profiling_ = false; | |||
| is_training_trace_ = false; | |||
| is_execute_profiling_ = false; | |||
| index_id_ = UINT64_MAX; | |||
| // profiling plugin uninit | |||
| PluginUnInit(); | |||
| CleanSubscribeInfo(); | |||
| int32_t dev_num = -1; | |||
| rtError_t rt_ret = rtProfilerStop(PROF_MODEL_LOAD_MASK, dev_num, nullptr); | |||
| if (rt_ret != RT_ERROR_NONE) { | |||
| @@ -630,6 +632,8 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ProfilingManager::ProfFi | |||
| } | |||
| device_id_module_map_.clear(); | |||
| device_id_.clear(); | |||
| device_id_map_.clear(); | |||
| model_id_map_.clear(); | |||
| GELOGI("Prof finalize success."); | |||
| #endif | |||
| return SUCCESS; | |||
| @@ -688,8 +692,8 @@ Status ProfilingManager::ProfParseDeviceId(const std::map<std::string, std::stri | |||
| return SUCCESS; | |||
| } | |||
| Status ProfilingManager::ProfParseParam(const std::map<std::string, std::string> &config_para, | |||
| int32_t &device_num, vector<int32_t> &device_list) { | |||
| Status ProfilingManager::ProfParseParam(const std::map<std::string, std::string> &config_para, int32_t &device_num, | |||
| vector<int32_t> &device_list) { | |||
| #ifdef DAVINCI_SUPPORT_PROFILING | |||
| // device num | |||
| auto iter = config_para.find(kConfigNumsdev); | |||
| @@ -738,8 +742,7 @@ Status ProfilingManager::ProfParseParam(const std::map<std::string, std::string> | |||
| return SUCCESS; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ProfilingManager::ProfStartProfiling( | |||
| uint64_t module, const std::map<std::string, std::string> &config_para) { | |||
| Status ProfilingManager::ProfStartProfiling(uint64_t module, const std::map<std::string, std::string> &config_para) { | |||
| #ifdef DAVINCI_SUPPORT_PROFILING | |||
| std::lock_guard<std::mutex> lock(mutex_); | |||
| uint64_t training_trace_mask = module & PROF_TRAINING_TRACE_MASK; | |||
| @@ -794,8 +797,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ProfilingManager::ProfSt | |||
| return SUCCESS; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ProfilingManager::ProfStopProfiling(uint64_t module, | |||
| const std::map<std::string, std::string> &config_para) { | |||
| Status ProfilingManager::ProfStopProfiling(uint64_t module, const std::map<std::string, std::string> &config_para) { | |||
| #ifdef DAVINCI_SUPPORT_PROFILING | |||
| std::lock_guard<std::mutex> lock(mutex_); | |||
| int32_t device_num = 0; | |||
| @@ -846,8 +848,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ProfilingManager::ProfSt | |||
| return SUCCESS; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void ProfilingManager::UpdateDeviceIdModuleMap(string prof_type, | |||
| uint64_t module, const vector<int32_t> &device_list) { | |||
| void ProfilingManager::UpdateDeviceIdModuleMap(string prof_type, uint64_t module, const vector<int32_t> &device_list) { | |||
| #ifdef DAVINCI_SUPPORT_PROFILING | |||
| if (prof_type == kProfStart) { | |||
| for (uint32_t i = 0; i < device_list.size(); i++) { | |||
| @@ -877,7 +878,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void ProfilingManager::UpdateDe | |||
| #endif | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY bool ProfilingManager::ProfilingModelExecuteOn() const { | |||
| bool ProfilingManager::ProfilingModelExecuteOn() const { | |||
| int32_t logic_device_id = 0; | |||
| rtError_t rt_ret = rtGetDevice(&logic_device_id); | |||
| if (rt_ret != RT_ERROR_NONE) { | |||
| @@ -895,7 +896,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY bool ProfilingManager::Profilin | |||
| return execute_model_prof_on; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ProfilingManager::PluginInit() { | |||
| Status ProfilingManager::PluginInit() { | |||
| if (prof_cb_.msprofReporterCallback == nullptr) { | |||
| GELOGE(ge::PARAM_INVALID, "[Check][Param]MsprofReporterCallback callback is nullptr"); | |||
| REPORT_INNER_ERROR("E19999", "MsprofReporterCallback callback is nullptr"); | |||
| @@ -924,7 +925,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ProfilingManager::Plugin | |||
| return SUCCESS; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void ProfilingManager::PluginUnInit() const { | |||
| void ProfilingManager::PluginUnInit() const { | |||
| #ifdef DAVINCI_SUPPORT_PROFILING | |||
| if (prof_cb_.msprofReporterCallback == nullptr) { | |||
| GELOGE(ge::PARAM_INVALID, "[Check][Param]MsprofReporterCallback callback is nullptr"); | |||
| @@ -941,8 +942,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void ProfilingManager::PluginUn | |||
| #endif | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ProfilingManager::CallMsprofReport( | |||
| ReporterData &reporter_data) const { | |||
| Status ProfilingManager::CallMsprofReport(ReporterData &reporter_data) const { | |||
| if (prof_cb_.msprofReporterCallback == nullptr) { | |||
| GELOGE(ge::PARAM_INVALID, "[Check][Param]MsprofReporterCallback callback is nullptr"); | |||
| REPORT_INNER_ERROR("E19999", "MsprofReporterCallback callback is nullptr"); | |||
| @@ -998,14 +998,12 @@ void ProfilingManager::GetOpOutputInfo(const OpDescPtr &op, TaskDescInfo &task_d | |||
| task_desc_info.output_data_type = output_data_type.empty() ? data_type_default : output_data_type; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void ProfilingManager::GetOpInputOutputInfo( | |||
| const OpDescPtr &op, TaskDescInfo &task_desc_info) const { | |||
| void ProfilingManager::GetOpInputOutputInfo(const OpDescPtr &op, TaskDescInfo &task_desc_info) const { | |||
| GetOpInputInfo(op, task_desc_info); | |||
| GetOpOutputInfo(op, task_desc_info); | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void ProfilingManager::GetFpBpPoint( | |||
| std::string &fp_point, std::string &bp_point) { | |||
| void ProfilingManager::GetFpBpPoint(std::string &fp_point, std::string &bp_point) { | |||
| // Env or options mode, fp_point_/bp_point_ have initiliazed on profiling init | |||
| if (!fp_point_.empty() && !bp_point_.empty()) { | |||
| fp_point = fp_point_; | |||
| @@ -1016,7 +1014,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void ProfilingManager::GetFpBpP | |||
| } | |||
| // ProfApi mode and training trace is set | |||
| // Parse options first | |||
| char env_profiling_options[MSPROF_OPTIONS_DEF_LEN_MAX] = { 0x00 }; | |||
| char env_profiling_options[MSPROF_OPTIONS_DEF_LEN_MAX] = {0x00}; | |||
| bool is_profiling_valid = false; | |||
| std::string profiling_options; | |||
| if (ge::GetContext().GetOption(OPTION_EXEC_PROFILING_OPTIONS, profiling_options) == SUCCESS && | |||
| @@ -1055,4 +1053,40 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void ProfilingManager::GetFpBpP | |||
| return; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ProfilingManager::GetDeviceIdFromGraph( | |||
| uint32_t graph_id, uint32_t &device_id) { | |||
| auto iter = device_id_map_.find(graph_id); | |||
| if (iter != device_id_map_.end()) { | |||
| device_id = iter->second; | |||
| return SUCCESS; | |||
| } | |||
| REPORT_CALL_ERROR("E19999", "graph_id:%u does not exist!", graph_id); | |||
| GELOGE(PARAM_INVALID, "[Check][GraphId]graph_id:%u does not exist!", graph_id); | |||
| return FAILED; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void ProfilingManager::SetSubscribeInfo( | |||
| uint64_t prof_switch, uint32_t model_id, bool is_subscribe) { | |||
| subscribe_info_.is_subscribe = is_subscribe; | |||
| subscribe_info_.prof_switch = prof_switch; | |||
| subscribe_info_.graph_id = model_id; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void ProfilingManager::CleanSubscribeInfo() { | |||
| subscribe_info_.is_subscribe = false; | |||
| subscribe_info_.prof_switch = 0; | |||
| subscribe_info_.graph_id = 0; | |||
| } | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ProfilingManager::GetModelIdFromGraph( | |||
| uint32_t graph_id, uint32_t &model_id) { | |||
| auto iter = model_id_map_.find(graph_id); | |||
| if (iter != model_id_map_.end()) { | |||
| model_id = iter->second; | |||
| return SUCCESS; | |||
| } | |||
| REPORT_CALL_ERROR("E19999", "graph_id:%u does not exist!", graph_id); | |||
| GELOGE(PARAM_INVALID, "[Check][GraphId]graph_id:%u does not exist!", graph_id); | |||
| return FAILED; | |||
| } | |||
| } // namespace ge | |||
| @@ -62,12 +62,18 @@ struct DeviceSubsInfo { | |||
| uint32_t subscribe_count; | |||
| }; | |||
| struct ProfSubscribeInfo { | |||
| bool is_subscribe; | |||
| uint64_t prof_switch; | |||
| uint32_t graph_id; | |||
| }; | |||
| struct MsprofCallback { | |||
| MsprofCtrlCallback msprofCtrlCallback; | |||
| MsprofReporterCallback msprofReporterCallback; | |||
| }; | |||
| class FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY ProfilingManager { | |||
| class ProfilingManager { | |||
| public: | |||
| ProfilingManager(); | |||
| virtual ~ProfilingManager(); | |||
| @@ -101,6 +107,16 @@ class FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY ProfilingManager { | |||
| void GetOpInputOutputInfo(const OpDescPtr &op, TaskDescInfo &task_desc_info) const; | |||
| void ReportData(const int32_t &device_id, const std::string &data, const std::string &tag_name); | |||
| Status ProfileStepInfo(uint64_t index_id, uint64_t model_id, uint16_t tag_id, rtStream_t stream, int32_t device_id); | |||
| void SetStepInfoIndex(uint64_t index_id) { index_id_ = index_id; } | |||
| uint64_t GetStepInfoIndex() const { return index_id_; } | |||
| void SetGraphIdToDeviceMap(uint32_t graph_id, uint32_t device_id) { device_id_map_[graph_id] = device_id; } | |||
| Status GetDeviceIdFromGraph(uint32_t graph_id, uint32_t &device_id); | |||
| void SetSubscribeInfo(uint64_t prof_switch, uint32_t model_id, bool is_subscribe); | |||
| const ProfSubscribeInfo &GetSubscribeInfo() const { return subscribe_info_; } | |||
| void CleanSubscribeInfo(); | |||
| void SetGraphIdToModelMap(uint32_t graph_id, uint32_t model_id) { model_id_map_[graph_id] = model_id; } | |||
| Status GetModelIdFromGraph(uint32_t graph_id, uint32_t &model_id); | |||
| private: | |||
| Status InitFromOptions(const Options &options, MsprofGeOptions &prof_conf); | |||
| Status ParseOptions(const std::string &options); | |||
| @@ -127,6 +143,10 @@ class FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY ProfilingManager { | |||
| std::string fp_point_; | |||
| std::string bp_point_; | |||
| uint32_t reporter_max_len_ = 0; | |||
| uint64_t index_id_; | |||
| std::map<uint32_t, uint32_t> device_id_map_; // key: graph_id, value: device_id | |||
| std::map<uint32_t, uint32_t> model_id_map_; // key: graph_id, value: model_id | |||
| ProfSubscribeInfo subscribe_info_; | |||
| }; | |||
| } // namespace ge | |||
| #endif // GE_COMMON_PROFILING_PROFILING_MANAGER_H_ | |||
| @@ -21,7 +21,7 @@ | |||
| #include <fstream> | |||
| #include "common/ge/ge_util.h" | |||
| #include "common/util.h" | |||
| #include "framework/common/util.h" | |||
| #include "framework/common/debug/ge_log.h" | |||
| #include "framework/common/debug/log.h" | |||
| #include "framework/common/ge_types.h" | |||
| @@ -35,13 +35,13 @@ PropertiesManager::PropertiesManager() : is_inited_(false), delimiter("=") {} | |||
| PropertiesManager::~PropertiesManager() {} | |||
| // singleton | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY PropertiesManager &PropertiesManager::Instance() { | |||
| PropertiesManager &PropertiesManager::Instance() { | |||
| static PropertiesManager instance; | |||
| return instance; | |||
| } | |||
| // Initialize property configuration | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY bool PropertiesManager::Init(const std::string &file_path) { | |||
| bool PropertiesManager::Init(const std::string &file_path) { | |||
| std::lock_guard<std::mutex> lock(mutex_); | |||
| if (is_inited_) { | |||
| GELOGW("Already inited, will be initialized again"); | |||
| @@ -139,8 +139,7 @@ std::string PropertiesManager::Trim(const std::string &str) { | |||
| } | |||
| // Get property value, if not found, return "" | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY std::string PropertiesManager::GetPropertyValue( | |||
| const std::string &map_key) { | |||
| std::string PropertiesManager::GetPropertyValue(const std::string &map_key) { | |||
| std::lock_guard<std::mutex> lock(mutex_); | |||
| auto iter = properties_map_.find(map_key); | |||
| if (properties_map_.end() != iter) { | |||
| @@ -151,21 +150,19 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY std::string PropertiesManager:: | |||
| } | |||
| // Set property value | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void PropertiesManager::SetPropertyValue(const std::string &map_key, | |||
| const std::string &value) { | |||
| void PropertiesManager::SetPropertyValue(const std::string &map_key, const std::string &value) { | |||
| std::lock_guard<std::mutex> lock(mutex_); | |||
| properties_map_[map_key] = value; | |||
| } | |||
| // return properties_map_ | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY std::map<std::string, std::string> | |||
| PropertiesManager::GetPropertyMap() { | |||
| std::map<std::string, std::string> PropertiesManager::GetPropertyMap() { | |||
| std::lock_guard<std::mutex> lock(mutex_); | |||
| return properties_map_; | |||
| } | |||
| // Set separator | |||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void PropertiesManager::SetPropertyDelimiter(const std::string &de) { | |||
| void PropertiesManager::SetPropertyDelimiter(const std::string &de) { | |||
| std::lock_guard<std::mutex> lock(mutex_); | |||
| delimiter = de; | |||
| } | |||
| @@ -25,7 +25,7 @@ | |||
| #include "common/dump/dump_properties.h" | |||
| #include "graph/op_desc.h" | |||
| #include "common/ge_compiler_options.h" | |||
| #include "framework/common/ge_compiler_options.h" | |||
| namespace ge { | |||
| // Configuration property management | |||
| @@ -1,193 +0,0 @@ | |||
| syntax = "proto3"; | |||
| package ge.proto; | |||
| enum DataType | |||
| { | |||
| DT_UNDEFINED = 0; // Used to indicate a DataType field has not been set. | |||
| DT_FLOAT = 1; // float type | |||
| DT_FLOAT16 = 2; // fp16 type | |||
| DT_INT8 = 3; // int8 type | |||
| DT_UINT8 = 4; // uint8 type | |||
| DT_INT16 = 5; // int16 type | |||
| DT_UINT16 = 6; // uint16 type | |||
| DT_INT32 = 7; // | |||
| DT_INT64 = 8; // int64 type | |||
| DT_UINT32 = 9; // unsigned int32 | |||
| DT_UINT64 = 10; // unsigned int64 | |||
| DT_BOOL = 11; // bool type | |||
| DT_DOUBLE = 12; // double type | |||
| DT_STRING = 13; // string type | |||
| DT_DUAL_SUB_INT8 = 14; /**< dual output int8 type */ | |||
| DT_DUAL_SUB_UINT8 = 15; /**< dual output uint8 type */ | |||
| DT_COMPLEX64 = 16; // complex64 type | |||
| DT_COMPLEX128 = 17; // complex128 type | |||
| DT_QINT8 = 18; // qint8 type | |||
| DT_QINT16 = 19; // qint16 type | |||
| DT_QINT32 = 20; // qint32 type | |||
| DT_QUINT8 = 21; // quint8 type | |||
| DT_QUINT16 = 22; // quint16 type | |||
| DT_RESOURCE = 23; // resource type | |||
| DT_STRING_REF = 24; // string_ref type | |||
| DT_DUAL = 25; /**< dual output type */ | |||
| DT_VARIANT = 26; // variant type | |||
| DT_BF16 = 27; // bf16 type | |||
| DT_INT4 = 28; // int4 type | |||
| } | |||
| message AttrDef | |||
| { | |||
| message ListValue | |||
| { | |||
| enum ListValueType{ | |||
| VT_LIST_NONE = 0; | |||
| VT_LIST_STRING = 1; | |||
| VT_LIST_INT = 2; | |||
| VT_LIST_FLOAT = 3; | |||
| VT_LIST_BOOL = 4; | |||
| VT_LIST_BYTES = 5; | |||
| VT_LIST_TENSOR_DESC = 6; | |||
| VT_LIST_TENSOR = 7; | |||
| VT_LIST_GRAPH = 8; | |||
| VT_LIST_NAMED_ATTRS = 9; | |||
| VT_LIST_DATA_TYPE = 10; | |||
| } | |||
| repeated bytes s = 2; // "list(string)" | |||
| repeated int64 i = 3; // "list(int)" | |||
| repeated float f = 4; // "list(float)" | |||
| repeated bool b = 5; // "list(bool)" | |||
| repeated bytes bt = 7; | |||
| repeated TensorDescriptor td = 8; | |||
| repeated TensorDef t = 9; | |||
| repeated GraphDef g = 10; | |||
| repeated NamedAttrs na = 11; | |||
| repeated int64 dt = 12; // list ge::DataType | |||
| ListValueType val_type = 20; | |||
| } | |||
| message ListListInt{ | |||
| message ListInt{ | |||
| repeated int64 list_i = 1; // list int | |||
| } | |||
| repeated ListInt list_list_i = 1; // list list int | |||
| } | |||
| oneof value | |||
| { | |||
| bytes s = 2; // "string" | |||
| int64 i = 3; // "int" | |||
| float f = 4; // "float" | |||
| bool b = 5; // "bool" | |||
| bytes bt = 7; | |||
| ListValue list = 1; // any "list(...)" | |||
| NamedAttrs func = 10; // Used to support attr nesting | |||
| TensorDescriptor td = 11; // GeTensorDesc type | |||
| TensorDef t = 12; // GeTensor type | |||
| GraphDef g = 13; // Graph type | |||
| ListListInt list_list_int = 14; // List List Int type | |||
| int64 dt = 15; // ge::DataType | |||
| } | |||
| } | |||
| // A list of attr names and their values. The whole list is attached | |||
| // with a string name. E.g., MatMul[T=float]. | |||
| message NamedAttrs | |||
| { | |||
| string name = 1; | |||
| map<string, AttrDef> attr = 2; | |||
| } | |||
| // Shape / dimension description, using row-major order | |||
| message ShapeDef | |||
| { | |||
| repeated int64 dim = 1; // Size of each dimension | |||
| } | |||
| // Multidimensional data description | |||
| message TensorDescriptor | |||
| { | |||
| string name = 1; // Optional parameter, tensor name | |||
| DataType dtype = 2; // tensor datatype | |||
| ShapeDef shape = 3; // Shape / dimension | |||
| string layout = 4; // Tensor format, eg: "NCHW", "NHWC", "CHW", "ND" | |||
| bool has_out_attr = 9; | |||
| int64 size = 10; | |||
| int64 weight_size = 11; | |||
| bool reuse_input = 12; | |||
| bool output_tensor = 13; | |||
| string device_type = 14; | |||
| bool input_tensor =15; | |||
| int64 real_dim_cnt = 16; | |||
| int64 reuse_input_index = 17; | |||
| int64 data_offset = 18; | |||
| int64 cmps_size = 19; | |||
| string cmps_tab = 20; | |||
| int64 cmps_tab_offset = 21; | |||
| map<string, AttrDef> attr = 5; // Set of extra parameter fields | |||
| } | |||
| // GeTensor definition | |||
| message TensorDef | |||
| { | |||
| TensorDescriptor desc = 1; // Tensor description | |||
| bytes data = 2; // Tensor data | |||
| } | |||
| // Operator description | |||
| message OpDef | |||
| { | |||
| string name = 1; // name | |||
| string type = 2; // type | |||
| repeated string input = 5; // input original op name + outgoing index. op_name:index | |||
| map<string, AttrDef> attr = 10; // Set of operator parameter fields | |||
| bool has_out_attr = 20; | |||
| int64 id = 21; | |||
| int64 stream_id =22; | |||
| repeated string input_name = 23; | |||
| repeated string src_name = 24; | |||
| repeated int64 src_index = 25; | |||
| repeated string dst_name = 26; | |||
| repeated int64 dst_index = 27; | |||
| repeated int64 input_i = 28; | |||
| repeated int64 output_i = 29; | |||
| repeated int64 workspace = 30; | |||
| repeated int64 workspace_bytes = 31; | |||
| repeated bool is_input_const = 32; | |||
| repeated TensorDescriptor input_desc = 33; | |||
| repeated TensorDescriptor output_desc = 34; | |||
| repeated string subgraph_name = 35; | |||
| } | |||
| // Graph definition | |||
| message GraphDef | |||
| { | |||
| string name = 1; // name | |||
| repeated string input = 4; // Graph input | |||
| repeated string output = 5; // Graph output | |||
| repeated OpDef op = 6; // List of operators | |||
| map<string, AttrDef> attr = 11; // Extended field | |||
| } | |||
| // model definition | |||
| message ModelDef | |||
| { | |||
| string name = 1; // name | |||
| uint32 version = 2; // IR Proto verion | |||
| string custom_version = 3; // User model version number, passed in by user | |||
| repeated GraphDef graph = 7; // Graph definition,graph[0] represents the main diagram in modeldef | |||
| map<string, AttrDef> attr = 11; // Extended field | |||
| } | |||
| @@ -1,140 +0,0 @@ | |||
| syntax = "proto3"; | |||
| package domi; | |||
| message InsertNewOps { | |||
| repeated AippOpParams aipp_op = 1; | |||
| repeated MultiShapeOpParams multi_shape_op = 2; | |||
| } | |||
| message AippOpParams { | |||
| enum InputFormat { | |||
| UNDEFINED = 0; | |||
| YUV420SP_U8 = 1; | |||
| XRGB8888_U8 = 2; | |||
| RGB888_U8 = 3; | |||
| YUV400_U8 = 4; | |||
| NC1HWC0DI_FP16 = 5; | |||
| NC1HWC0DI_S8 = 6; | |||
| ARGB8888_U8 = 7; | |||
| YUYV_U8 = 8; | |||
| YUV422SP_U8 = 9; | |||
| AYUV444_U8 = 10; | |||
| RAW10 = 11; | |||
| RAW12 = 12; | |||
| RAW16 = 13; | |||
| RAW24 = 14; | |||
| RGB16 = 15; | |||
| RGB20 = 16; | |||
| RGB24 = 17; | |||
| RGB8_IR = 18; | |||
| RGB16_IR = 19; | |||
| RGB24_IR = 20; | |||
| } | |||
| enum AippMode { | |||
| undefined = 0; | |||
| static = 1; | |||
| dynamic = 2; | |||
| } | |||
| // AIPP模式,区分静态AIPP和动态AIPP | |||
| AippMode aipp_mode = 1; | |||
| // related_input_rank参数为必填,类型为整型,配置范围>=0, <=输入Data算子的个数,默认值为0。 | |||
| // 标识对模型的第几个输入做AIPP处理,例如模型有两个输入,需要对第2个输入做AIPP,则配置related_input_rank为1。 | |||
| uint32 related_input_rank = 2; | |||
| // related_input_name is optional and the top name of data node which inserts aipp | |||
| string related_input_name = 6; | |||
| // input_edge_idx参数为可选,类型为整型,配置范围为>=0。 | |||
| // 配置该参数的作用,在于对Data算子不同的输出做不同的AIPP处理,如果该参数没有配置,默认对related_input_rank指定的模型输入的所有输出边做AIPP。 | |||
| // 配置值 <= Data算子输出边的个数。 | |||
| repeated uint32 input_edge_idx = 3; | |||
| // [Begin] 动态AIPP参数,配置静态AIPP时无效 | |||
| uint32 max_src_image_size = 4; | |||
| // 是否支持旋转。默认不支持,开启支持旋转时,会有额外的空间和性能损失 | |||
| bool support_rotation = 5; | |||
| // [End] 动态AIPP参数 | |||
| // [Begin] 静态AIPP参数,配置动态AIPP时无效 | |||
| InputFormat input_format = 51; | |||
| bool csc_switch = 52; | |||
| float cpadding_value = 53; | |||
| bool rbuv_swap_switch = 54; | |||
| bool ax_swap_switch = 55; | |||
| bool single_line_mode = 56; | |||
| int32 src_image_size_w = 57; | |||
| int32 src_image_size_h = 58; | |||
| bool crop = 59; | |||
| int32 load_start_pos_w = 60; | |||
| int32 load_start_pos_h = 61; | |||
| int32 crop_size_w = 62; | |||
| int32 crop_size_h = 63; | |||
| bool resize = 64; | |||
| int32 resize_output_w = 65; | |||
| int32 resize_output_h = 66; | |||
| bool padding = 67; | |||
| int32 left_padding_size = 68; | |||
| int32 right_padding_size = 69; | |||
| int32 top_padding_size = 70; | |||
| int32 bottom_padding_size = 71; | |||
| float padding_value = 72; | |||
| int32 mean_chn_0 = 10; | |||
| int32 mean_chn_1 = 11; | |||
| int32 mean_chn_2 = 12; | |||
| int32 mean_chn_3 = 19; | |||
| float min_chn_0 = 13; | |||
| float min_chn_1 = 14; | |||
| float min_chn_2 = 15; | |||
| float min_chn_3 = 20; | |||
| repeated float var_reci_chn_0 = 16; | |||
| repeated float var_reci_chn_1 = 17; | |||
| repeated float var_reci_chn_2 = 18; | |||
| repeated float var_reci_chn_3 = 21; | |||
| repeated int32 matrix_r0c0 = 30; | |||
| repeated int32 matrix_r0c1 = 31; | |||
| repeated int32 matrix_r0c2 = 32; | |||
| repeated int32 matrix_r1c0 = 33; | |||
| repeated int32 matrix_r1c1 = 34; | |||
| repeated int32 matrix_r1c2 = 35; | |||
| repeated int32 matrix_r2c0 = 36; | |||
| repeated int32 matrix_r2c1 = 37; | |||
| repeated int32 matrix_r2c2 = 38; | |||
| repeated int32 output_bias_0 = 39; | |||
| repeated int32 output_bias_1 = 40; | |||
| repeated int32 output_bias_2 = 41; | |||
| repeated int32 input_bias_0 = 42; | |||
| repeated int32 input_bias_1 = 43; | |||
| repeated int32 input_bias_2 = 44; | |||
| // [End] 静态AIPP参数 | |||
| // The n number that is used for raw/rgbir data into f16 transformation. | |||
| // The transformation equation is x/(2^n). If set to 0, no transform is performed. | |||
| uint32 raw_rgbir_to_f16_n = 45; | |||
| } | |||
| message MultiShapeOpParams { | |||
| enum MultiShapeMode { | |||
| batch = 0; //动态batch | |||
| resolution = 1; //动态分辨率,扩展用 | |||
| } | |||
| MultiShapeMode mode = 1; //算子模式 | |||
| uint32 related_input_rank = 2; //新增算子插入到哪个输入 | |||
| repeated uint32 batch_list = 11; //batch_list值,batch_list的个数是2到8之间 | |||
| } | |||
| @@ -1,396 +0,0 @@ | |||
| /* Copyright (C) 2018. Huawei Technologies Co., Ltd. All rights reserved. | |||
| * | |||
| * This program is free software; you can redistribute it and/or modify | |||
| * it under the terms of the Apache License Version 2.0.You may not use this file except in compliance with the License. | |||
| * | |||
| * This program is distributed in the hope that it will be useful, | |||
| * but WITHOUT ANY WARRANTY; without even the implied warranty of | |||
| * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |||
| * Apache License for more details at | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| */ | |||
| syntax = "proto3"; | |||
| package domi; | |||
| enum TargetType | |||
| { | |||
| MINI = 0; | |||
| TINY = 1; | |||
| LITE = 2; | |||
| } | |||
| // offline model | |||
| message ModelDef { | |||
| string name = 1; | |||
| uint32 version = 2; | |||
| uint64 memory_size = 10; | |||
| uint32 stream_num = 11; | |||
| uint32 event_num = 12; | |||
| uint64 weight_size = 13; | |||
| uint32 label_num = 15; | |||
| repeated OpDef op = 20; | |||
| TargetType target_type = 23; | |||
| map<string, AttrDef> attr = 30; | |||
| }; | |||
| // operator define | |||
| message OpDef { | |||
| string name = 1; | |||
| string type = 2; | |||
| uint32 id = 3; | |||
| uint32 stream_id = 4; | |||
| repeated string input_name = 5; | |||
| repeated string src_name = 8; | |||
| repeated int32 src_index = 9; | |||
| repeated int64 input = 10; | |||
| repeated int64 output = 11; | |||
| repeated TensorDescriptor input_desc = 12; | |||
| repeated TensorDescriptor output_desc = 13; | |||
| repeated WeightDef weights = 14; | |||
| repeated string dst_name = 15; | |||
| repeated int32 dst_index = 16; | |||
| repeated int64 workspace = 20; | |||
| repeated uint32 workspace_bytes = 21; | |||
| repeated string weight_name = 22; | |||
| repeated bool is_input_const = 23; | |||
| map<string, AttrDef> attr = 30; | |||
| QuantizeFactorParams quantize_factor = 31; | |||
| oneof op_params { | |||
| // start at 100 here | |||
| SendOpParams sender_param = 100; | |||
| RecvOpParams receiver_param = 200; | |||
| ConvolutionOpParams convolution_param = 300; | |||
| PoolingOpParams pooling_param = 400; | |||
| EltwiseOpParams eltwise_param = 500; | |||
| BatchNormOpParams batchnorm_param = 600; | |||
| ScaleOpParams scale_param = 700; | |||
| FullConnectionOpParams full_connection_param = 800; | |||
| SoftmaxOpParams softmax_param = 900; | |||
| ActivationOpParams activation_param = 1000; | |||
| ReshapeOpParams reshape_param = 1100; | |||
| } | |||
| }; | |||
| message SendOpParams { | |||
| uint32 event_id = 1; | |||
| }; | |||
| message RecvOpParams { | |||
| uint32 event_id = 1; | |||
| }; | |||
| enum QuantizeScaleType | |||
| { | |||
| VECTOR_SCALE = 0; | |||
| SCALAR_SCALE = 1; | |||
| } | |||
| enum QuantizeScaleMode | |||
| { | |||
| NORMAL_MODE = 0; | |||
| SQRT_MODE = 1; | |||
| } | |||
| enum QuantizeAlgorithm | |||
| { | |||
| NON_OFFSET_ALGO = 0; | |||
| HALF_OFFSET_ALGO = 1; | |||
| ALL_OFFSET_ALGO = 2; | |||
| } | |||
| message QuantizeFactor | |||
| { | |||
| QuantizeScaleMode scale_mode = 1; | |||
| bytes scale_value = 2; | |||
| int64 scale_offset = 3; | |||
| bytes offset_data_value = 4; | |||
| int64 offset_data_offset = 5; | |||
| bytes offset_weight_value = 6; | |||
| int64 offset_weight_offset = 7; | |||
| bytes offset_pad_value = 8; | |||
| int64 offset_pad_offset = 9; | |||
| }; | |||
| message QuantizeCalcFactor | |||
| { | |||
| bytes offsetw = 1; | |||
| int64 offsetw_offset = 2; | |||
| bytes offsetd = 3; | |||
| int64 offsetd_offset = 4; | |||
| bytes scalereq = 5; | |||
| int64 scaledreq_offset = 6; | |||
| bytes offsetdnext = 7; | |||
| int64 offsetdnext_offset = 8; | |||
| } | |||
| message QuantizeFactorParams | |||
| { | |||
| QuantizeAlgorithm quantize_algo = 1; | |||
| QuantizeScaleType scale_type = 2; | |||
| QuantizeFactor quantize_param = 3; | |||
| QuantizeFactor dequantize_param = 4; | |||
| QuantizeFactor requantize_param = 5; | |||
| QuantizeCalcFactor quantizecalc_param = 6; | |||
| }; | |||
| message ConvolutionOpParams { | |||
| int32 mode = 1; | |||
| int32 algo = 2; | |||
| int32 pad_mode = 3; | |||
| uint32 group = 4; | |||
| uint32 num_output = 5; | |||
| repeated uint32 pad = 10; | |||
| repeated uint32 stride = 11; | |||
| repeated uint32 dilation = 12; | |||
| repeated uint32 kernel = 13; | |||
| float alpha = 20; | |||
| float beta = 21; | |||
| WeightDef filter = 40; | |||
| WeightDef bias = 41; | |||
| bool relu_flag = 62; | |||
| repeated uint32 adj = 70; | |||
| repeated uint32 target_shape = 71; | |||
| repeated uint32 before_pad = 72; | |||
| }; | |||
| message PoolingOpParams { | |||
| int32 mode = 1; | |||
| int32 nan_opt = 2; | |||
| int32 pad_mode = 3; | |||
| bool global_pooling = 4; | |||
| repeated uint32 window = 10; | |||
| repeated uint32 pad = 11; | |||
| repeated uint32 stride = 12; | |||
| bool ceil_mode = 13; | |||
| int32 data_mode = 14; | |||
| float alpha = 20; | |||
| float beta = 21; | |||
| repeated uint32 before_pad = 22; | |||
| }; | |||
| message EltwiseOpParams { | |||
| int32 mode = 1; | |||
| repeated float coeff = 2; | |||
| float alpha = 3; | |||
| float beta = 4; | |||
| repeated WeightDef weight = 5; | |||
| bool relu_flag = 6; | |||
| }; | |||
| message ActivationOpParams { | |||
| int32 mode = 1; | |||
| float coef = 2; | |||
| float alpha = 3; | |||
| float beta = 4; | |||
| }; | |||
| message BatchNormOpParams { | |||
| int32 mode = 1; | |||
| float alpha = 2; | |||
| float beta = 3; | |||
| double epsilon = 4;//optinal,[default = 1e-5] | |||
| bool use_global_stats = 5; //optinal,by default true,testing mode | |||
| float moving_average_fraction = 6; //optinal,[default = .999]; | |||
| WeightDef estimated_mean = 7; | |||
| WeightDef estimated_variance = 8; | |||
| WeightDef scale = 9; | |||
| WeightDef bias = 10; | |||
| }; | |||
| message ScaleOpParams { | |||
| WeightDef scale = 1; | |||
| WeightDef bias = 2; | |||
| }; | |||
| message ReshapeOpParams { | |||
| float alpha = 1; | |||
| float beta = 2; | |||
| ShapeDef shape = 3; | |||
| int32 axis = 4; | |||
| int32 num_axes = 5; | |||
| int32 format = 6; | |||
| }; | |||
| message SoftmaxOpParams { | |||
| int32 algo = 1; | |||
| int32 mode = 2; | |||
| float alpha = 3; | |||
| float beta = 4; | |||
| }; | |||
| message FullConnectionOpParams { | |||
| WeightDef filter = 1; | |||
| WeightDef bias = 2; | |||
| uint32 num_output = 3; | |||
| bool relu_flag = 12; | |||
| }; | |||
| message FlattenOpParams { | |||
| float alpha = 1; | |||
| float beta = 2; | |||
| int32 start_axis = 3; | |||
| int32 end_axis = 4; | |||
| } | |||
| message AddLimitedOpParams { | |||
| float alpha = 1; | |||
| float beta = 2; | |||
| int32 axis = 3; | |||
| bool broadcast = 4; | |||
| repeated WeightDef weight = 10; | |||
| }; | |||
| message MulLimitedOpParams { | |||
| float alpha = 1; | |||
| float beta = 2; | |||
| int32 axis = 3; | |||
| bool broadcast = 4; | |||
| repeated WeightDef weight = 10; | |||
| }; | |||
| message AddOpParams { | |||
| float alpha = 1; | |||
| float beta = 2; | |||
| repeated WeightDef weight = 10; | |||
| }; | |||
| message MulOpParams { | |||
| float alpha = 1; | |||
| float beta = 2; | |||
| repeated WeightDef weight = 10; | |||
| }; | |||
| message SubOpParams { | |||
| float alpha = 1; | |||
| float beta = 2; | |||
| repeated WeightDef weight = 10; | |||
| }; | |||
| message BiasAddOpParams { | |||
| float alpha = 1; | |||
| float beta = 2; | |||
| WeightDef bias = 10; | |||
| }; | |||
| message MatMulOpParams { | |||
| float alpha = 1; | |||
| float beta = 2; | |||
| bool transposeX = 3; | |||
| bool transposeW = 4; | |||
| WeightDef filter = 10; | |||
| WeightDef bias = 12; | |||
| }; | |||
| message RsqrtOpParams { | |||
| float alpha = 1; | |||
| float beta = 2; | |||
| }; | |||
| message WeightDef { | |||
| int32 format = 1; | |||
| int32 data_type = 2; | |||
| ShapeDef shape = 3; | |||
| bytes data = 4; | |||
| int64 data_offset = 5; | |||
| uint32 cmps_size = 6; | |||
| bytes cmps_tab = 7; | |||
| int64 cmps_tab_offset = 10; | |||
| CompressInfo cmps_info = 8; | |||
| AllOffsetQuantizeInfo alloffset_quantize_info = 11; | |||
| } | |||
| message ShapeDef { | |||
| repeated int64 dim = 1; | |||
| } | |||
| enum DeviceType { | |||
| NPU = 0; // In default, we will use NPU. | |||
| CPU = 1; // CPU | |||
| } | |||
| message AllOffsetQuantizeInfo { | |||
| float scale = 1; | |||
| int32 offset = 2; | |||
| } | |||
| message TensorDescriptor { | |||
| int32 format = 1; | |||
| int32 data_type = 2; | |||
| repeated int64 dim = 3; | |||
| uint32 size = 4; | |||
| bool reuse_input = 5; | |||
| bool output_tensor = 7; | |||
| DeviceType device_type = 8; | |||
| bool input_tensor = 9; | |||
| uint32 real_dim_cnt = 10; | |||
| uint32 reuse_input_index = 11; | |||
| AllOffsetQuantizeInfo alloffset_quantize_info = 12; | |||
| } | |||
| message CompressInfo { | |||
| int32 blockRow = 1; // block row | |||
| int32 blockCol = 2; // block col | |||
| int32 fractalK = 3; // fractal K | |||
| int32 fractalN = 4; // fractal N | |||
| int32 lastFractalK = 5; // K of last fractal | |||
| int32 lastFractalN = 6; // N of last fractal | |||
| int32 cubeSize = 7; // cube's length | |||
| int32 loadDir = 8; // data load directtiono 0:col load 1:row load | |||
| } | |||
| message AttrDef { | |||
| message ListValue { | |||
| repeated string s = 2; // "list(string)" | |||
| repeated int64 i = 3 [packed = true]; // "list(int)" | |||
| repeated float f = 4 [packed = true]; // "list(float)" | |||
| repeated bool b = 5 [packed = true]; // "list(bool)" | |||
| repeated uint32 u = 6 [packed = true]; // "list(uint)" | |||
| repeated bytes bt = 7; | |||
| } | |||
| oneof value { | |||
| string s = 2; // "string" | |||
| int64 i = 3; // "int" | |||
| float f = 4; // "float" | |||
| bool b = 5; // "bool" | |||
| uint32 u = 6; // "uint32" | |||
| bytes bt = 7; | |||
| ListValue list = 1; // any "list(...)" | |||
| NamedAttrs func = 10; | |||
| } | |||
| } | |||
| // A list of attr names and their values. The whole list is attached | |||
| // with a string name. E.g., MatMul[T=float]. | |||
| message NamedAttrs { | |||
| string name = 1; | |||
| map<string, AttrDef> attr = 2; | |||
| } | |||
| @@ -1,75 +0,0 @@ | |||
| syntax = "proto3"; | |||
| package toolkit.aicpu.dump; | |||
| message Shape { | |||
| repeated uint64 dim = 1; | |||
| } | |||
| message Output { | |||
| int32 data_type = 1; | |||
| int32 format = 2; | |||
| Shape shape = 3; | |||
| uint64 address = 4; | |||
| string original_name = 5; | |||
| int32 original_output_index = 6; | |||
| int32 original_output_data_type = 7; | |||
| int32 original_output_format = 8; | |||
| uint64 size = 9; | |||
| Shape origin_shape = 10; | |||
| } | |||
| message Input { | |||
| int32 data_type =1; | |||
| int32 format = 2; | |||
| Shape shape = 3; | |||
| uint64 address = 4; | |||
| uint64 size = 5; | |||
| Shape origin_shape = 6; | |||
| } | |||
| enum BufferType { | |||
| L1 = 0; | |||
| } | |||
| message OpBuffer { | |||
| BufferType buffer_type = 1; | |||
| uint64 address = 2; | |||
| uint64 size = 3; | |||
| } | |||
| message Op { | |||
| string op_name = 1; | |||
| string op_type = 2; | |||
| } | |||
| message Task { | |||
| uint32 task_id = 1; | |||
| uint32 stream_id = 2; | |||
| Op op = 3; | |||
| repeated Output output = 4; | |||
| bool end_graph = 5; | |||
| repeated Input input = 6; | |||
| repeated OpBuffer buffer = 7; | |||
| } | |||
| message OpMappingInfo { | |||
| string dump_path = 1; | |||
| oneof model_name_param { | |||
| string model_name = 2; | |||
| } | |||
| oneof model_id_param { | |||
| uint32 model_id = 3; | |||
| } | |||
| oneof step_id { | |||
| uint64 step_id_addr = 4; | |||
| } | |||
| oneof iterations_per_loop { | |||
| uint64 iterations_per_loop_addr = 5; | |||
| } | |||
| oneof loop_cond { | |||
| uint64 loop_cond_addr = 6; | |||
| } | |||
| uint32 flag = 7; // 0x01 load, 0x00 unload | |||
| repeated Task task = 8; | |||
| string dump_step = 9; | |||
| } | |||
| @@ -1,179 +0,0 @@ | |||
| /* Copyright (C) 2018. Huawei Technologies Co., Ltd. All rights reserved. | |||
| * | |||
| * This program is free software; you can redistribute it and/or modify | |||
| * it under the terms of the Apache License Version 2.0.You may not use this file except in compliance with the License. | |||
| * | |||
| * This program is distributed in the hope that it will be useful, | |||
| * but WITHOUT ANY WARRANTY; without even the implied warranty of | |||
| * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |||
| * Apache License for more details at | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| */ | |||
| syntax = "proto3"; | |||
| package domi; | |||
| message ModelTaskDef { | |||
| string version = 1; | |||
| map<string, string> attr = 9; // Extended field | |||
| repeated TaskDef task = 10; | |||
| uint64 memory_size = 11; | |||
| uint32 stream_num = 12; | |||
| uint32 event_num = 13; | |||
| uint64 weight_size = 14; | |||
| repeated bytes op = 15; // input/output opdef in bytes | |||
| uint64 base_addr = 16; // base addr | |||
| uint64 weight_addr = 17; // weight addr | |||
| uint32 batch_num = 18; | |||
| } | |||
| message TaskDef { | |||
| uint32 id = 1; | |||
| uint32 type = 2; | |||
| uint32 stream_id = 10; | |||
| uint32 event_id = 11; | |||
| KernelDef kernel = 20; | |||
| KernelExDef kernel_ex = 21; | |||
| KernelHcclDef kernel_hccl = 25; | |||
| EventExDef event_ex = 26; | |||
| LogTimeStampDef log_timestamp = 28; | |||
| uint32 label_id = 30; | |||
| MemcpyAsyncDef memcpy_async = 31; | |||
| StreamSwitchDef stream_switch = 32; | |||
| StreamActiveDef stream_active = 33; | |||
| bytes private_def = 34; | |||
| uint64 ops_kernel_store_ptr = 35; // adjustments to other fields in the future | |||
| StreamSwitchNDef stream_switch_n = 36; | |||
| LabelSetDef label_set = 37; | |||
| LabelGotoExDef label_goto_ex = 38; | |||
| LabelSwitchByIndexDef label_switch_by_index = 39; | |||
| KernelDefWithHandle kernel_with_handle = 40; | |||
| } | |||
| message KernelDef { | |||
| KernelContext context = 1; | |||
| string stub_func = 10; | |||
| uint32 block_dim = 11; | |||
| uint32 args_size = 12; | |||
| bytes args = 13; | |||
| bytes sm_desc = 14; | |||
| bytes flowtable = 15; | |||
| string so_name = 16; | |||
| string kernel_name = 17; | |||
| bytes kernel_ext_info = 18; | |||
| uint32 kernel_ext_info_size = 19; | |||
| } | |||
| message KernelDefWithHandle { | |||
| KernelContext context = 1; | |||
| uint64 handle = 10; | |||
| string dev_func = 11; | |||
| uint32 block_dim = 12; | |||
| uint32 args_size = 13; | |||
| bytes args = 14; | |||
| bytes sm_desc = 15; | |||
| string original_kernel_key = 16; | |||
| string node_info = 17; | |||
| } | |||
| message KernelContext { | |||
| uint32 kernel_type = 1; | |||
| uint32 op_id = 2; // OP type in CCE | |||
| uint32 kernel_func_id = 3; | |||
| uint32 op_index = 4; // TE/Custom operator | |||
| bool is_flowtable = 5; // Identify whether args is a flowtable structure | |||
| bytes args_offset = 6; // args offset information | |||
| uint32 args_count = 7; // args count | |||
| repeated uint32 origin_op_index = 8; | |||
| } | |||
| message KernelExDef { | |||
| uint32 flags = 1; | |||
| uint32 op_index = 4; | |||
| uint32 args_size = 12; | |||
| bytes args = 13; | |||
| bytes task_info = 14; // serialized nodeDef, funcDef, inputoutput | |||
| uint32 task_info_size = 15; | |||
| bytes kernel_ext_info = 16; | |||
| uint32 kernel_ext_info_size = 17; | |||
| } | |||
| message KernelHcclDef { | |||
| uint32 op_index = 8; | |||
| string hccl_type = 9; | |||
| } | |||
| message EventExDef { | |||
| uint32 op_index = 1; | |||
| uint32 event_type = 2; | |||
| } | |||
| message LogTimeStampDef { | |||
| uint64 logid = 1; | |||
| bool notify = 2; | |||
| uint32 flat = 3; | |||
| } | |||
| message MemcpyAsyncDef { | |||
| uint64 dst = 1; | |||
| uint64 dst_max = 2; | |||
| uint64 src = 3; | |||
| uint64 count = 4; | |||
| uint32 kind = 5; | |||
| uint32 op_index = 6; | |||
| } | |||
| message StreamSwitchDef { | |||
| uint32 op_index = 1; | |||
| uint32 true_stream_id = 2; | |||
| int64 value = 3; | |||
| uint64 value_ptr = 4; | |||
| uint32 data_type = 5; | |||
| } | |||
| message StreamActiveDef { | |||
| uint32 op_index = 1; | |||
| uint32 active_stream_id = 2; | |||
| } | |||
| message StreamSwitchNDef { | |||
| uint32 op_index = 1; | |||
| uint32 size = 2; | |||
| repeated int64 target_value = 3; | |||
| repeated uint32 true_stream_id = 4; | |||
| uint32 element_size = 5; | |||
| uint32 data_type = 6; | |||
| } | |||
| message LabelSetDef { | |||
| uint32 op_index = 1; | |||
| uint32 label_id = 2; | |||
| uint32 model_id = 3; | |||
| } | |||
| message LabelGotoExDef { | |||
| uint32 op_index = 1; | |||
| uint32 label_id = 2; | |||
| uint32 model_id = 3; | |||
| } | |||
| message LabelSwitchByIndexDef { | |||
| uint32 op_index = 1; | |||
| uint32 label_max = 2; | |||
| } | |||
| @@ -1,70 +0,0 @@ | |||
| /** | |||
| * This file is part of Open Source Software TensorFlow, version 1.15.0 https://github.com/tensorflow/tensorflow | |||
| * | |||
| * This file is included by GraphEngine so as to support model format conversion from tensorflow model to GraphEngine model. | |||
| * This file in this distribution may have been modified by Huawei Technologies Co., Ltd ("Huawei Modifications"). | |||
| * All Huawei Modifications are Copyright 2019-2020 Huawei Technologies Co., Ltd. | |||
| */ | |||
| syntax = "proto3"; | |||
| package domi.tensorflow; | |||
| option cc_enable_arenas = true; | |||
| option java_outer_classname = "AttrValueProtos"; | |||
| option java_multiple_files = true; | |||
| option java_package = "org.tensorflow.framework"; | |||
| import "tensor.proto"; | |||
| import "tensor_shape.proto"; | |||
| import "types.proto"; | |||
| // Protocol buffer representing the value for an attr used to configure an Op. | |||
| // Comment indicates the corresponding attr type. Only the field matching the | |||
| // attr type may be filled. | |||
| message AttrValue { | |||
| // LINT.IfChange | |||
| message ListValue { | |||
| repeated bytes s = 2; // "list(string)" | |||
| repeated int64 i = 3 [packed = true]; // "list(int)" | |||
| repeated float f = 4 [packed = true]; // "list(float)" | |||
| repeated bool b = 5 [packed = true]; // "list(bool)" | |||
| repeated DataType type = 6 [packed = true]; // "list(type)" | |||
| repeated TensorShapeProto shape = 7; // "list(shape)" | |||
| repeated TensorProto tensor = 8; // "list(tensor)" | |||
| repeated NameAttrList func = 9; // "list(attr)" | |||
| } | |||
| // LINT.ThenChange(https://www.tensorflow.org/code/tensorflow/c/c_api.cc) | |||
| oneof value { | |||
| bytes s = 2; // "string" | |||
| int64 i = 3; // "int" | |||
| float f = 4; // "float" | |||
| bool b = 5; // "bool" | |||
| DataType type = 6; // "type" | |||
| TensorShapeProto shape = 7; // "shape" | |||
| TensorProto tensor = 8; // "tensor" | |||
| ListValue list = 1; // any "list(...)" | |||
| // "func" represents a function. func.name is a function's name or | |||
| // a primitive op's name. func.attr.first is the name of an attr | |||
| // defined for that function. func.attr.second is the value for | |||
| // that attr in the instantiation. | |||
| NameAttrList func = 10; | |||
| // This is a placeholder only used in nodes defined inside a | |||
| // function. It indicates the attr value will be supplied when | |||
| // the function is instantiated. For example, let us suppose a | |||
| // node "N" in function "FN". "N" has an attr "A" with value | |||
| // placeholder = "foo". When FN is instantiated with attr "foo" | |||
| // set to "bar", the instantiated node N's attr A will have been | |||
| // given the value "bar". | |||
| string placeholder = 9; | |||
| } | |||
| } | |||
| // A list of attr names and their values. The whole list is attached | |||
| // with a string name. E.g., MatMul[T=float]. | |||
| message NameAttrList { | |||
| string name = 1; | |||
| map<string, AttrValue> attr = 2; | |||
| } | |||
| @@ -1,108 +0,0 @@ | |||
| /** | |||
| * This file is part of Open Source Software TensorFlow, version 1.15.0 https://github.com/tensorflow/tensorflow | |||
| * | |||
| * This file is included by GraphEngine so as to support model format conversion from tensorflow model to GraphEngine model. | |||
| * This file in this distribution may have been modified by Huawei Technologies Co., Ltd ("Huawei Modifications"). | |||
| * All Huawei Modifications are Copyright 2019-2020 Huawei Technologies Co., Ltd. | |||
| */ | |||
| syntax = "proto3"; | |||
| package domi.tensorflow; | |||
| option cc_enable_arenas = true; | |||
| option java_outer_classname = "FunctionProtos"; | |||
| option java_multiple_files = true; | |||
| option java_package = "org.tensorflow.framework"; | |||
| import "attr_value.proto"; | |||
| import "node_def.proto"; | |||
| import "op_def.proto"; | |||
| // A library is a set of named functions. | |||
| message FunctionDefLibrary { | |||
| repeated FunctionDef function = 1; | |||
| repeated GradientDef gradient = 2; | |||
| } | |||
| // A function can be instantiated when the runtime can bind every attr | |||
| // with a value. When a GraphDef has a call to a function, it must | |||
| // have binding for every attr defined in the signature. | |||
| // * device spec, etc. | |||
| message FunctionDef { | |||
| // The definition of the function's name, arguments, return values, | |||
| // attrs etc. | |||
| OpDef signature = 1; | |||
| // Attributes specific to this function definition. | |||
| map<string, AttrValue> attr = 5; | |||
| // NOTE: field id 2 deleted on Jan 11, 2017, GraphDef version 21. | |||
| reserved 2; | |||
| // In both of the following fields, there is the need to specify an | |||
| // output that is used as either the input to another node (in | |||
| // `node_def`) or as a return value of the function (in `ret`). | |||
| // Unlike the NodeDefs in GraphDef, we need to be able to specify a | |||
| // list in some cases (instead of just single outputs). Also, we | |||
| // need to be able to deal with lists of unknown length (so the | |||
| // output index may not be known at function definition time). So | |||
| // we use the following format instead: | |||
| // * "fun_in" where "fun_in" is the name of a function input arg in | |||
| // the `signature` field above. This represents that input, whether | |||
| // it is a single tensor or a list. | |||
| // * "fun_in:0" gives the first element of a function input arg (a | |||
| // non-list input is considered a list of length 1 for these | |||
| // purposes). | |||
| // * "node:out" where "node" is the name of a node in `node_def` and | |||
| // "out" is the name one of its op's output arguments (the name | |||
| // comes from the OpDef of the node's op). This represents that | |||
| // node's output, whether it is a single tensor or a list. | |||
| // Note: We enforce that an op's output arguments are never | |||
| // renamed in the backwards-compatibility test. | |||
| // * "node:out:0" gives the first element of a node output arg (a | |||
| // non-list output is considered a list of length 1 for these | |||
| // purposes). | |||
| // | |||
| // NOT CURRENTLY SUPPORTED (but may be in the future): | |||
| // * "node:out:-1" gives last element in a node output list | |||
| // * "node:out:1:" gives a list with all but the first element in a | |||
| // node output list | |||
| // * "node:out::-1" gives a list with all but the last element in a | |||
| // node output list | |||
| // The body of the function. Unlike the NodeDefs in a GraphDef, attrs | |||
| // may have values of type `placeholder` and the `input` field uses | |||
| // the "output" format above. | |||
| // By convention, "op" in node_def is resolved by consulting with a | |||
| // user-defined library first. If not resolved, "func" is assumed to | |||
| // be a builtin op. | |||
| repeated NodeDef node_def = 3; | |||
| // A mapping from the output arg names from `signature` to the | |||
| // outputs from `node_def` that should be returned by the function. | |||
| map<string, string> ret = 4; | |||
| } | |||
| // GradientDef defines the gradient function of a function defined in | |||
| // a function library. | |||
| // | |||
| // A gradient function g (specified by gradient_func) for a function f | |||
| // (specified by function_name) must follow the following: | |||
| // | |||
| // The function 'f' must be a numerical function which takes N inputs | |||
| // and produces M outputs. Its gradient function 'g', which is a | |||
| // function taking N + M inputs and produces N outputs. | |||
| // | |||
| // I.e. if we have | |||
| // (y1, y2, ..., y_M) = f(x1, x2, ..., x_N), | |||
| // then, g is | |||
| // (dL/dx1, dL/dx2, ..., dL/dx_N) = g(x1, x2, ..., x_N, | |||
| // dL/dy1, dL/dy2, ..., dL/dy_M), | |||
| // where L is a scalar-value function of (x1, x2, ..., xN) (e.g., the | |||
| // loss function). dL/dx_i is the partial derivative of L with respect | |||
| // to x_i. | |||
| message GradientDef { | |||
| string function_name = 1; // The function name. | |||
| string gradient_func = 2; // The gradient function's name. | |||
| } | |||
| @@ -1,64 +0,0 @@ | |||
| /** | |||
| * This file is part of Open Source Software TensorFlow, version 1.15.0 https://github.com/tensorflow/tensorflow | |||
| * | |||
| * This file is included by GraphEngine so as to support model format conversion from tensorflow model to GraphEngine model. | |||
| * This file in this distribution may have been modified by Huawei Technologies Co., Ltd ("Huawei Modifications"). | |||
| * All Huawei Modifications are Copyright 2019-2020 Huawei Technologies Co., Ltd. | |||
| */ | |||
| syntax = "proto3"; | |||
| package domi.tensorflow; | |||
| option cc_enable_arenas = true; | |||
| option java_outer_classname = "GraphProtos"; | |||
| option java_multiple_files = true; | |||
| option java_package = "org.tensorflow.framework"; | |||
| import "node_def.proto"; | |||
| import "function.proto"; | |||
| import "versions.proto"; | |||
| // Represents the graph of operations | |||
| message GraphDef { | |||
| repeated NodeDef node = 1; | |||
| // Compatibility versions of the graph. See core/public/version.h for version | |||
| // history. The GraphDef version is distinct from the TensorFlow version, and | |||
| // each release of TensorFlow will support a range of GraphDef versions. | |||
| VersionDef versions = 4; | |||
| // Deprecated single version field; use versions above instead. Since all | |||
| // GraphDef changes before "versions" was introduced were forward | |||
| // compatible, this field is entirely ignored. | |||
| int32 version = 3 [deprecated = true]; | |||
| // EXPERIMENTAL. DO NOT USE OR DEPEND ON THIS YET. | |||
| // | |||
| // "library" provides user-defined functions. | |||
| // | |||
| // Naming: | |||
| // * library.function.name are in a flat namespace. | |||
| // NOTE: We may need to change it to be hierarchical to support | |||
| // different orgs. E.g., | |||
| // { "/google/nn", { ... }}, | |||
| // { "/google/vision", { ... }} | |||
| // { "/org_foo/module_bar", { ... }} | |||
| // map<string, FunctionDefLib> named_lib; | |||
| // * If node[i].op is the name of one function in "library", | |||
| // node[i] is deemed as a function call. Otherwise, node[i].op | |||
| // must be a primitive operation supported by the runtime. | |||
| // | |||
| // | |||
| // Function call semantics: | |||
| // | |||
| // * The callee may start execution as soon as some of its inputs | |||
| // are ready. The caller may want to use Tuple() mechanism to | |||
| // ensure all inputs are ready in the same time. | |||
| // | |||
| // * The consumer of return values may start executing as soon as | |||
| // the return values the consumer depends on are ready. The | |||
| // consumer may want to use Tuple() mechanism to ensure the | |||
| // consumer does not start until all return values of the callee | |||
| // function are ready. | |||
| FunctionDefLibrary library = 2; | |||
| }; | |||
| @@ -1,22 +0,0 @@ | |||
| /** | |||
| * This file is part of Open Source Software TensorFlow, version 1.15.0 https://github.com/tensorflow/tensorflow | |||
| * | |||
| * This file is included by GraphEngine so as to support model format conversion from tensorflow model to GraphEngine model. | |||
| * This file in this distribution may have been modified by Huawei Technologies Co., Ltd ("Huawei Modifications"). | |||
| * All Huawei Modifications are Copyright 2019-2020 Huawei Technologies Co., Ltd. | |||
| */ | |||
| syntax = "proto3"; | |||
| package domi.tensorflow; | |||
| import "graph.proto"; | |||
| message GeGraphDef { | |||
| string name = 1; | |||
| GraphDef graph = 2; | |||
| } | |||
| message GraphDefLibrary { | |||
| repeated GeGraphDef graph_def = 1; | |||
| }; | |||
| @@ -1,71 +0,0 @@ | |||
| /** | |||
| * This file is part of Open Source Software TensorFlow, version 1.15.0 https://github.com/tensorflow/tensorflow | |||
| * | |||
| * This file is included by GraphEngine so as to support model format conversion from tensorflow model to GraphEngine model. | |||
| * This file in this distribution may have been modified by Huawei Technologies Co., Ltd ("Huawei Modifications"). | |||
| * All Huawei Modifications are Copyright 2019-2020 Huawei Technologies Co., Ltd. | |||
| */ | |||
| syntax = "proto3"; | |||
| package domi.tensorflow; | |||
| option cc_enable_arenas = true; | |||
| option java_outer_classname = "NodeProto"; | |||
| option java_multiple_files = true; | |||
| option java_package = "org.tensorflow.framework"; | |||
| import "attr_value.proto"; | |||
| message NodeDef { | |||
| // The name given to this operator. Used for naming inputs, | |||
| // logging, visualization, etc. Unique within a single GraphDef. | |||
| // Must match the regexp "[A-Za-z0-9.][A-Za-z0-9_./]*". | |||
| string name = 1; | |||
| // The operation name. There may be custom parameters in attrs. | |||
| // Op names starting with an underscore are reserved for internal use. | |||
| string op = 2; | |||
| // Each input is "node:src_output" with "node" being a string name and | |||
| // "src_output" indicating which output tensor to use from "node". If | |||
| // "src_output" is 0 the ":0" suffix can be omitted. Regular inputs | |||
| // may optionally be followed by control inputs that have the format | |||
| // "^node". | |||
| repeated string input = 3; | |||
| // A (possibly partial) specification for the device on which this | |||
| // node should be placed. | |||
| // The expected syntax for this string is as follows: | |||
| // | |||
| // DEVICE_SPEC ::= PARTIAL_SPEC | |||
| // | |||
| // PARTIAL_SPEC ::= ("/" CONSTRAINT) * | |||
| // CONSTRAINT ::= ("job:" JOB_NAME) | |||
| // | ("replica:" [1-9][0-9]*) | |||
| // | ("task:" [1-9][0-9]*) | |||
| // | ("device:" [A-Za-z]* ":" ([1-9][0-9]* | "*") ) | |||
| // | |||
| // Valid values for this string include: | |||
| // * "/job:worker/replica:0/task:1/device:GPU:3" (full specification) | |||
| // * "/job:worker/device:GPU:3" (partial specification) | |||
| // * "" (no specification) | |||
| // | |||
| // If the constraints do not resolve to a single device (or if this | |||
| // field is empty or not present), the runtime will attempt to | |||
| // choose a device automatically. | |||
| string device = 4; | |||
| // Operation-specific graph-construction-time configuration. | |||
| // Note that this should include all attrs defined in the | |||
| // corresponding OpDef, including those with a value matching | |||
| // the default -- this allows the default to change and makes | |||
| // NodeDefs easier to interpret on their own. However, if | |||
| // an attr with a default is not specified in this list, the | |||
| // default will be used. | |||
| // The "names" (keys) must match the regexp "[a-z][a-z0-9_]+" (and | |||
| // one of the names from the corresponding OpDef's attr field). | |||
| // The values must have a type matching the corresponding OpDef | |||
| // attr's type field. | |||
| // Add some examples here showing best practices. | |||
| map<string, AttrValue> attr = 5; | |||
| }; | |||
| @@ -1,172 +0,0 @@ | |||
| /** | |||
| * This file is part of Open Source Software TensorFlow, version 1.15.0 https://github.com/tensorflow/tensorflow | |||
| * | |||
| * This file is included by GraphEngine so as to support model format conversion from tensorflow model to GraphEngine model. | |||
| * This file in this distribution may have been modified by Huawei Technologies Co., Ltd ("Huawei Modifications"). | |||
| * All Huawei Modifications are Copyright 2019-2020 Huawei Technologies Co., Ltd. | |||
| */ | |||
| syntax = "proto3"; | |||
| package domi.tensorflow; | |||
| option cc_enable_arenas = true; | |||
| option java_outer_classname = "OpDefProtos"; | |||
| option java_multiple_files = true; | |||
| option java_package = "org.tensorflow.framework"; | |||
| import "attr_value.proto"; | |||
| import "types.proto"; | |||
| // Defines an operation. A NodeDef in a GraphDef specifies an Op by | |||
| // using the "op" field which should match the name of a OpDef. | |||
| // LINT.IfChange | |||
| message OpDef { | |||
| // Op names starting with an underscore are reserved for internal use. | |||
| // Names should be CamelCase and match the regexp "[A-Z][a-zA-Z0-9_]*". | |||
| string name = 1; | |||
| // For describing inputs and outputs. | |||
| message ArgDef { | |||
| // Name for the input/output. Should match the regexp "[a-z][a-z0-9_]*". | |||
| string name = 1; | |||
| // Human readable description. | |||
| string description = 2; | |||
| // Describes the type of one or more tensors that are accepted/produced | |||
| // by this input/output arg. The only legal combinations are: | |||
| // * For a single tensor: either the "type" field is set or the | |||
| // "type_attr" field is set to the name of an attr with type "type". | |||
| // * For a sequence of tensors with the same type: the "number_attr" | |||
| // field will be set to the name of an attr with type "int", and | |||
| // either the "type" or "type_attr" field will be set as for | |||
| // single tensors. | |||
| // * For a sequence of tensors, the "type_list_attr" field will be set | |||
| // to the name of an attr with type "list(type)". | |||
| DataType type = 3; | |||
| string type_attr = 4; // if specified, attr must have type "type" | |||
| string number_attr = 5; // if specified, attr must have type "int" | |||
| // If specified, attr must have type "list(type)", and none of | |||
| // type, type_attr, and number_attr may be specified. | |||
| string type_list_attr = 6; | |||
| // For inputs: if true, the inputs are required to be refs. | |||
| // By default, inputs can be either refs or non-refs. | |||
| // For outputs: if true, outputs are refs, otherwise they are not. | |||
| bool is_ref = 16; | |||
| }; | |||
| // Description of the input(s). | |||
| repeated ArgDef input_arg = 2; | |||
| // Description of the output(s). | |||
| repeated ArgDef output_arg = 3; | |||
| // Description of the graph-construction-time configuration of this | |||
| // Op. That is to say, this describes the attr fields that will | |||
| // be specified in the NodeDef. | |||
| message AttrDef { | |||
| // A descriptive name for the argument. May be used, e.g. by the | |||
| // Python client, as a keyword argument name, and so should match | |||
| // the regexp "[a-z][a-z0-9_]+". | |||
| string name = 1; | |||
| // One of the type names from attr_value.proto ("string", "list(string)", | |||
| // "int", etc.). | |||
| string type = 2; | |||
| // A reasonable default for this attribute if the user does not supply | |||
| // a value. If not specified, the user must supply a value. | |||
| AttrValue default_value = 3; | |||
| // Human-readable description. | |||
| string description = 4; | |||
| // --- Constraints --- | |||
| // These constraints are only in effect if specified. Default is no | |||
| // constraints. | |||
| // For type == "int", this is a minimum value. For "list(___)" | |||
| // types, this is the minimum length. | |||
| bool has_minimum = 5; | |||
| int64 minimum = 6; | |||
| // The set of allowed values. Has type that is the "list" version | |||
| // of the "type" field above (uses the "list" field of AttrValue). | |||
| // If type == "type" or "list(type)" above, then the "type" field | |||
| // of "allowed_values.list" has the set of allowed DataTypes. | |||
| // If type == "string" or "list(string)", then the "s" field of | |||
| // "allowed_values.list" has the set of allowed strings. | |||
| AttrValue allowed_values = 7; | |||
| } | |||
| repeated AttrDef attr = 4; | |||
| // Optional deprecation based on GraphDef versions. | |||
| OpDeprecation deprecation = 8; | |||
| // One-line human-readable description of what the Op does. | |||
| string summary = 5; | |||
| // Additional, longer human-readable description of what the Op does. | |||
| string description = 6; | |||
| // ------------------------------------------------------------------------- | |||
| // Which optimizations this operation can participate in. | |||
| // True if the operation is commutative ("op(a,b) == op(b,a)" for all inputs) | |||
| bool is_commutative = 18; | |||
| // If is_aggregate is true, then this operation accepts N >= 2 | |||
| // inputs and produces 1 output all of the same type. Should be | |||
| // associative and commutative, and produce output with the same | |||
| // shape as the input. The optimizer may replace an aggregate op | |||
| // taking input from multiple devices with a tree of aggregate ops | |||
| // that aggregate locally within each device (and possibly within | |||
| // groups of nearby devices) before communicating. | |||
| bool is_aggregate = 16; // for things like add | |||
| // Other optimizations go here, like | |||
| // can_alias_input, rewrite_when_output_unused, partitioning_strategy, etc. | |||
| // ------------------------------------------------------------------------- | |||
| // Optimization constraints. | |||
| // Ops are marked as stateful if their behavior depends on some state beyond | |||
| // their input tensors (e.g. variable reading op) or if they have | |||
| // a side-effect (e.g. printing or asserting ops). Equivalently, stateless ops | |||
| // must always produce the same output for the same input and have | |||
| // no side-effects. | |||
| // | |||
| // By default Ops may be moved between devices. Stateful ops should | |||
| // either not be moved, or should only be moved if that state can also | |||
| // be moved (e.g. via some sort of save / restore). | |||
| // Stateful ops are guaranteed to never be optimized away by Common | |||
| // Subexpression Elimination (CSE). | |||
| bool is_stateful = 17; // for things like variables, queue | |||
| // ------------------------------------------------------------------------- | |||
| // Non-standard options. | |||
| // By default, all inputs to an Op must be initialized Tensors. Ops | |||
| // that may initialize tensors for the first time should set this | |||
| // field to true, to allow the Op to take an uninitialized Tensor as | |||
| // input. | |||
| bool allows_uninitialized_input = 19; // for Assign, etc. | |||
| }; | |||
| // LINT.ThenChange( | |||
| // https://www.tensorflow.org/code/tensorflow/core/framework/op_def_util.cc) | |||
| // Information about version-dependent deprecation of an op | |||
| message OpDeprecation { | |||
| // First GraphDef version at which the op is disallowed. | |||
| int32 version = 1; | |||
| // Explanation of why it was deprecated and what to use instead. | |||
| string explanation = 2; | |||
| }; | |||
| // A collection of OpDefs | |||
| message OpList { | |||
| repeated OpDef op = 1; | |||
| }; | |||
| @@ -1,37 +0,0 @@ | |||
| /** | |||
| * This file is part of Open Source Software TensorFlow, version 1.15.0 https://github.com/tensorflow/tensorflow | |||
| * | |||
| * This file is included by GraphEngine so as to support model format conversion from tensorflow model to GraphEngine model. | |||
| * This file in this distribution may have been modified by Huawei Technologies Co., Ltd ("Huawei Modifications"). | |||
| * All Huawei Modifications are Copyright 2019-2020 Huawei Technologies Co., Ltd. | |||
| */ | |||
| syntax = "proto3"; | |||
| package domi.tensorflow; | |||
| option cc_enable_arenas = true; | |||
| option java_outer_classname = "ResourceHandle"; | |||
| option java_multiple_files = true; | |||
| option java_package = "org.tensorflow.framework"; | |||
| // Protocol buffer representing a handle to a tensorflow resource. Handles are | |||
| // not valid across executions, but can be serialized back and forth from within | |||
| // a single run. | |||
| message ResourceHandleProto { | |||
| // Unique name for the device containing the resource. | |||
| string device = 1; | |||
| // Container in which this resource is placed. | |||
| string container = 2; | |||
| // Unique name of this resource. | |||
| string name = 3; | |||
| // Hash code for the type of the resource. Is only valid in the same device | |||
| // and in the same execution. | |||
| uint64 hash_code = 4; | |||
| // For debug-only, the name of the type pointed to by this handle, if | |||
| // available. | |||
| string maybe_type_name = 5; | |||
| }; | |||
| @@ -1,102 +0,0 @@ | |||
| /** | |||
| * This file is part of Open Source Software TensorFlow, version 1.15.0 https://github.com/tensorflow/tensorflow | |||
| * | |||
| * This file is included by GraphEngine so as to support model format conversion from tensorflow model to GraphEngine model. | |||
| * This file in this distribution may have been modified by Huawei Technologies Co., Ltd ("Huawei Modifications"). | |||
| * All Huawei Modifications are Copyright 2019-2020 Huawei Technologies Co., Ltd. | |||
| */ | |||
| syntax = "proto3"; | |||
| package domi.tensorflow; | |||
| option cc_enable_arenas = true; | |||
| option java_outer_classname = "TensorProtos"; | |||
| option java_multiple_files = true; | |||
| option java_package = "org.tensorflow.framework"; | |||
| import "resource_handle.proto"; | |||
| import "tensor_shape.proto"; | |||
| import "types.proto"; | |||
| // Protocol buffer representing a tensor. | |||
| message TensorProto { | |||
| DataType dtype = 1; | |||
| // Shape of the tensor. | |||
| TensorShapeProto tensor_shape = 2; | |||
| // Only one of the representations below is set, one of "tensor_contents" and | |||
| // the "xxx_val" attributes. We are not using oneof because as oneofs cannot | |||
| // contain repeated fields it would require another extra set of messages. | |||
| // Version number. | |||
| // | |||
| // In version 0, if the "repeated xxx" representations contain only one | |||
| // element, that element is repeated to fill the shape. This makes it easy | |||
| // to represent a constant Tensor with a single value. | |||
| int32 version_number = 3; | |||
| // Serialized raw tensor content from either Tensor::AsProtoTensorContent or | |||
| // memcpy in tensorflow::grpc::EncodeTensorToByteBuffer. This representation | |||
| // can be used for all tensor types. The purpose of this representation is to | |||
| // reduce serialization overhead during RPC call by avoiding serialization of | |||
| // many repeated small items. | |||
| bytes tensor_content = 4; | |||
| // Type specific representations that make it easy to create tensor protos in | |||
| // all languages. Only the representation corresponding to "dtype" can | |||
| // be set. The values hold the flattened representation of the tensor in | |||
| // row major order. | |||
| // DT_HALF, DT_BFLOAT16. Note that since protobuf has no int16 type, we'll | |||
| // have some pointless zero padding for each value here. | |||
| repeated int32 half_val = 13 [packed = true]; | |||
| // DT_FLOAT. | |||
| repeated float float_val = 5 [packed = true]; | |||
| // DT_DOUBLE. | |||
| repeated double double_val = 6 [packed = true]; | |||
| // DT_INT32, DT_INT16, DT_INT8, DT_UINT8. | |||
| repeated int32 int_val = 7 [packed = true]; | |||
| // DT_STRING | |||
| repeated bytes string_val = 8; | |||
| // DT_COMPLEX64. scomplex_val(2*i) and scomplex_val(2*i+1) are real | |||
| // and imaginary parts of i-th single precision complex. | |||
| repeated float scomplex_val = 9 [packed = true]; | |||
| // DT_INT64 | |||
| repeated int64 int64_val = 10 [packed = true]; | |||
| // DT_BOOL | |||
| repeated bool bool_val = 11 [packed = true]; | |||
| // DT_COMPLEX128. dcomplex_val(2*i) and dcomplex_val(2*i+1) are real | |||
| // and imaginary parts of i-th double precision complex. | |||
| repeated double dcomplex_val = 12 [packed = true]; | |||
| // DT_RESOURCE | |||
| repeated ResourceHandleProto resource_handle_val = 14; | |||
| // DT_VARIANT | |||
| repeated VariantTensorDataProto variant_val = 15; | |||
| // DT_UINT32 | |||
| repeated uint32 uint32_val = 16 [packed = true]; | |||
| // DT_UINT64 | |||
| repeated uint64 uint64_val = 17 [packed = true]; | |||
| }; | |||
| // Protocol buffer representing the serialization format of DT_VARIANT tensors. | |||
| message VariantTensorDataProto { | |||
| // Name of the type of objects being serialized. | |||
| string type_name = 1; | |||
| // Portions of the object that are not Tensors. | |||
| bytes metadata = 2; | |||
| // Tensors contained within objects being serialized. | |||
| repeated TensorProto tensors = 3; | |||
| } | |||
| @@ -1,53 +0,0 @@ | |||
| /** | |||
| * This file is part of Open Source Software TensorFlow, version 1.15.0 https://github.com/tensorflow/tensorflow | |||
| * | |||
| * This file is included by GraphEngine so as to support model format conversion from tensorflow model to GraphEngine model. | |||
| * This file in this distribution may have been modified by Huawei Technologies Co., Ltd ("Huawei Modifications"). | |||
| * All Huawei Modifications are Copyright 2019-2020 Huawei Technologies Co., Ltd. | |||
| */ | |||
| // Protocol buffer representing the shape of tensors. | |||
| syntax = "proto3"; | |||
| option cc_enable_arenas = true; | |||
| option java_outer_classname = "TensorShapeProtos"; | |||
| option java_multiple_files = true; | |||
| option java_package = "org.tensorflow.framework"; | |||
| package domi.tensorflow; | |||
| // Dimensions of a tensor. | |||
| message TensorShapeProto { | |||
| // One dimension of the tensor. | |||
| message Dim { | |||
| // Size of the tensor in that dimension. | |||
| // This value must be >= -1, but values of -1 are reserved for "unknown" | |||
| // shapes (values of -1 mean "unknown" dimension). Certain wrappers | |||
| // that work with TensorShapeProto may fail at runtime when deserializing | |||
| // a TensorShapeProto containing a dim value of -1. | |||
| int64 size = 1; | |||
| // Optional name of the tensor dimension. | |||
| string name = 2; | |||
| }; | |||
| // Dimensions of the tensor, such as {"input", 30}, {"output", 40} | |||
| // for a 30 x 40 2D tensor. If an entry has size -1, this | |||
| // corresponds to a dimension of unknown size. The names are | |||
| // optional. | |||
| // | |||
| // The order of entries in "dim" matters: It indicates the layout of the | |||
| // values in the tensor in-memory representation. | |||
| // | |||
| // The first entry in "dim" is the outermost dimension used to layout the | |||
| // values, the last entry is the innermost dimension. This matches the | |||
| // in-memory layout of RowMajor Eigen tensors. | |||
| // | |||
| // If "dim.size()" > 0, "unknown_rank" must be false. | |||
| repeated Dim dim = 2; | |||
| // If true, the number of dimensions in the shape is unknown. | |||
| // | |||
| // If true, "dim.size()" must be 0. | |||
| bool unknown_rank = 3; | |||
| }; | |||
| @@ -1,82 +0,0 @@ | |||
| /** | |||
| * This file is part of Open Source Software TensorFlow, version 1.15.0 https://github.com/tensorflow/tensorflow | |||
| * | |||
| * This file is included by GraphEngine so as to support model format conversion from tensorflow model to GraphEngine model. | |||
| * This file in this distribution may have been modified by Huawei Technologies Co., Ltd ("Huawei Modifications"). | |||
| * All Huawei Modifications are Copyright 2019-2020 Huawei Technologies Co., Ltd. | |||
| */ | |||
| syntax = "proto3"; | |||
| package domi.tensorflow; | |||
| option cc_enable_arenas = true; | |||
| option java_outer_classname = "TypesProtos"; | |||
| option java_multiple_files = true; | |||
| option java_package = "org.tensorflow.framework"; | |||
| // LINT.IfChange | |||
| enum DataType { | |||
| // Not a legal value for DataType. Used to indicate a DataType field | |||
| // has not been set. | |||
| DT_INVALID = 0; | |||
| // Data types that all computation devices are expected to be | |||
| // capable to support. | |||
| DT_FLOAT = 1; | |||
| DT_DOUBLE = 2; | |||
| DT_INT32 = 3; | |||
| DT_UINT8 = 4; | |||
| DT_INT16 = 5; | |||
| DT_INT8 = 6; | |||
| DT_STRING = 7; | |||
| DT_COMPLEX64 = 8; // Single-precision complex | |||
| DT_INT64 = 9; | |||
| DT_BOOL = 10; | |||
| DT_QINT8 = 11; // Quantized int8 | |||
| DT_QUINT8 = 12; // Quantized uint8 | |||
| DT_QINT32 = 13; // Quantized int32 | |||
| DT_BFLOAT16 = 14; // Float32 truncated to 16 bits. Only for cast ops. | |||
| DT_QINT16 = 15; // Quantized int16 | |||
| DT_QUINT16 = 16; // Quantized uint16 | |||
| DT_UINT16 = 17; | |||
| DT_COMPLEX128 = 18; // Double-precision complex | |||
| DT_HALF = 19; | |||
| DT_RESOURCE = 20; | |||
| DT_VARIANT = 21; // Arbitrary C++ data types | |||
| DT_UINT32 = 22; | |||
| DT_UINT64 = 23; | |||
| // Do not use! These are only for parameters. Every enum above | |||
| // should have a corresponding value below (verified by types_test). | |||
| DT_FLOAT_REF = 101; | |||
| DT_DOUBLE_REF = 102; | |||
| DT_INT32_REF = 103; | |||
| DT_UINT8_REF = 104; | |||
| DT_INT16_REF = 105; | |||
| DT_INT8_REF = 106; | |||
| DT_STRING_REF = 107; | |||
| DT_COMPLEX64_REF = 108; | |||
| DT_INT64_REF = 109; | |||
| DT_BOOL_REF = 110; | |||
| DT_QINT8_REF = 111; | |||
| DT_QUINT8_REF = 112; | |||
| DT_QINT32_REF = 113; | |||
| DT_BFLOAT16_REF = 114; | |||
| DT_QINT16_REF = 115; | |||
| DT_QUINT16_REF = 116; | |||
| DT_UINT16_REF = 117; | |||
| DT_COMPLEX128_REF = 118; | |||
| DT_HALF_REF = 119; | |||
| DT_RESOURCE_REF = 120; | |||
| DT_VARIANT_REF = 121; | |||
| DT_UINT32_REF = 122; | |||
| DT_UINT64_REF = 123; | |||
| } | |||
| // LINT.ThenChange( | |||
| // https://www.tensorflow.org/code/tensorflow/c/c_api.h, | |||
| // https://www.tensorflow.org/code/tensorflow/go/tensor.go, | |||
| // https://www.tensorflow.org/code/tensorflow/core/framework/tensor.cc, | |||
| // https://www.tensorflow.org/code/tensorflow/core/framework/types.h, | |||
| // https://www.tensorflow.org/code/tensorflow/core/framework/types.cc, | |||
| // https://www.tensorflow.org/code/tensorflow/python/framework/dtypes.py, | |||
| // https://www.tensorflow.org/code/tensorflow/python/framework/function.py) | |||
| @@ -1,39 +0,0 @@ | |||
| /** | |||
| * This file is part of Open Source Software TensorFlow, version 1.15.0 https://github.com/tensorflow/tensorflow | |||
| * | |||
| * This file is included by GraphEngine so as to support model format conversion from tensorflow model to GraphEngine model. | |||
| * This file in this distribution may have been modified by Huawei Technologies Co., Ltd ("Huawei Modifications"). | |||
| * All Huawei Modifications are Copyright 2019-2020 Huawei Technologies Co., Ltd. | |||
| */ | |||
| syntax = "proto3"; | |||
| package domi.tensorflow; | |||
| option cc_enable_arenas = true; | |||
| option java_outer_classname = "VersionsProtos"; | |||
| option java_multiple_files = true; | |||
| option java_package = "org.tensorflow.framework"; | |||
| // Version information for a piece of serialized data | |||
| // | |||
| // There are different types of versions for each type of data | |||
| // (GraphDef, etc.), but they all have the same common shape | |||
| // described here. | |||
| // | |||
| // Each consumer has "consumer" and "min_producer" versions (specified | |||
| // elsewhere). A consumer is allowed to consume this data if | |||
| // | |||
| // producer >= min_producer | |||
| // consumer >= min_consumer | |||
| // consumer not in bad_consumers | |||
| // | |||
| message VersionDef { | |||
| // The version of the code that produced this data. | |||
| int32 producer = 1; | |||
| // Any consumer below this version is not allowed to consume this data. | |||
| int32 min_consumer = 2; | |||
| // Specific consumer versions which are disallowed (e.g. due to bugs). | |||
| repeated int32 bad_consumers = 3; | |||
| }; | |||
| @@ -21,7 +21,7 @@ | |||
| namespace ge { | |||
| class FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY TBEKernelStore : public KernelStore { | |||
| class TBEKernelStore : public KernelStore { | |||
| public: | |||
| TBEKernelStore(); | |||
| ~TBEKernelStore() {} | |||