Browse Source

Merge branch 'master' of gitee.com:mindspore/graphengine into master

pull/1777/head
宇世康 Gitee 4 years ago
parent
commit
02edc65d7c
69 changed files with 648 additions and 789 deletions
  1. +6
    -6
      cmake/external_libs/protobuf_shared.cmake
  2. +4
    -6
      cmake/external_libs/protobuf_static.cmake
  3. +5
    -7
      cmake/external_libs/protoc.cmake
  4. +0
    -5
      ge/common/auth/file_saver.cc
  5. +4
    -5
      ge/common/ge/plugin_manager.cc
  6. +1
    -2
      ge/common/helper/model_cache_helper.cc
  7. +5
    -5
      ge/common/helper/model_helper.cc
  8. +4
    -19
      ge/common/model_parser/model_parser.cc
  9. +1
    -2
      ge/common/model_parser/model_parser.h
  10. +2
    -2
      ge/common/model_saver.cc
  11. +3
    -0
      ge/common/profiling/ge_profiling.cc
  12. +70
    -51
      ge/common/util.cc
  13. +1
    -15
      ge/engine_manager/dnnengine_manager.cc
  14. +3
    -5
      ge/executor/ge_executor.cc
  15. +9
    -8
      ge/graph/build/memory/graph_mem_assigner.cc
  16. +0
    -15
      ge/graph/common/omg_util.cc
  17. +0
    -9
      ge/graph/common/omg_util.h
  18. +4
    -10
      ge/graph/load/graph_loader.cc
  19. +1
    -2
      ge/graph/load/graph_loader.h
  20. +17
    -15
      ge/graph/load/model_manager/davinci_model.cc
  21. +1
    -1
      ge/graph/load/model_manager/davinci_model.h
  22. +11
    -24
      ge/graph/manager/graph_manager.cc
  23. +1
    -1
      ge/graph/manager/graph_manager.h
  24. +0
    -2
      ge/graph/optimize/graph_optimize.cc
  25. +22
    -1
      ge/graph/partition/dynamic_shape_partition.cc
  26. +1
    -1
      ge/graph/partition/dynamic_shape_partition.h
  27. +8
    -30
      ge/graph/passes/mark_force_unknown_for_cond_pass.cc
  28. +6
    -0
      ge/graph/passes/mark_graph_unknown_status_pass.cc
  29. +2
    -3
      ge/graph/passes/merge_to_stream_merge_pass.cc
  30. +2
    -2
      ge/graph/passes/multi_batch_clone_pass.cc
  31. +9
    -1
      ge/graph/passes/next_iteration_pass.cc
  32. +8
    -8
      ge/graph/passes/switch_to_stream_switch_pass.cc
  33. +52
    -52
      ge/graph/preprocess/graph_preprocess.cc
  34. +4
    -3
      ge/graph/preprocess/insert_op/ge_aipp_op.cc
  35. +4
    -4
      ge/graph/preprocess/insert_op/util_insert_aipp_op.cc
  36. +4
    -4
      ge/graph/preprocess/multi_batch_copy_graph.cc
  37. +4
    -7
      ge/graph/preprocess/multi_batch_options.cc
  38. +58
    -8
      ge/hybrid/executor/node_state.cc
  39. +6
    -8
      ge/hybrid/executor/node_state.h
  40. +19
    -8
      ge/hybrid/executor/subgraph_context.cc
  41. +3
    -2
      ge/hybrid/executor/subgraph_context.h
  42. +3
    -15
      ge/hybrid/executor/subgraph_executor.cc
  43. +0
    -1
      ge/hybrid/executor/subgraph_executor.h
  44. +2
    -3
      ge/hybrid/model/node_item.cc
  45. +2
    -3
      ge/hybrid/model/node_item.h
  46. +7
    -10
      ge/hybrid/node_executor/task_context.cc
  47. +1
    -3
      ge/hybrid/node_executor/task_context.h
  48. +5
    -4
      ge/ir_build/attr_options/keep_dtype_option.cc
  49. +1
    -2
      ge/ir_build/ge_ir_build.cc
  50. +22
    -20
      ge/ir_build/option_utils.cc
  51. +7
    -40
      ge/offline/main.cc
  52. +21
    -21
      ge/offline/single_op_parser.cc
  53. +15
    -15
      ge/session/omg.cc
  54. +2
    -2
      ge/single_op/single_op_manager.cc
  55. +2
    -0
      inc/framework/common/profiling/ge_profiling.h
  56. +0
    -10
      inc/framework/common/util.h
  57. +1
    -1
      metadef
  58. +1
    -1
      parser
  59. +0
    -112
      tests/ut/common/graph/testcase/ge_graph/ge_graph_anchor_unittest.cc
  60. +2
    -1
      tests/ut/common/graph/testcase/ge_graph/ge_model_serialize_unittest.cc
  61. +1
    -17
      tests/ut/common/graph/testcase/ge_graph/ge_tensor_unittest.cc
  62. +1
    -0
      tests/ut/ge/CMakeLists.txt
  63. +4
    -17
      tests/ut/ge/graph/manager/graph_manager_unittest.cc
  64. +155
    -44
      tests/ut/ge/graph/partition/dynamic_shape_partition_unittest.cc
  65. +11
    -18
      tests/ut/ge/hybrid/executor/worker/execution_engine_unittest.cc
  66. +14
    -11
      tests/ut/ge/hybrid/ge_hybrid_unittest.cc
  67. +0
    -5
      tests/ut/ge/hybrid/node_executor/ge_local/ge_local_node_executor_unittest.cc
  68. +3
    -14
      tests/ut/ge/hybrid/node_executor/hccl/hccl_node_executor_unittest.cc
  69. +0
    -40
      tests/ut/ge/hybrid/node_executor/rts/rts_node_task_unittest.cc

+ 6
- 6
cmake/external_libs/protobuf_shared.cmake View File

@@ -11,14 +11,14 @@ if ((${CMAKE_INSTALL_PREFIX} STREQUAL /usr/local) OR
message(STATUS "No install prefix selected, default to ${CMAKE_INSTALL_PREFIX}.")
endif()
if (GE_PB_PKG)
set(REQ_URL "${GE_PB_PKG}/libs/protobuf/v3.8.0.tar.gz")
set(REQ_URL "${GE_PB_PKG}/libs/protobuf/v3.13.0.tar.gz")
else()
if (ENABLE_GITEE)
set(REQ_URL "https://gitee.com/mirrors/protobuf_source/repository/archive/v3.8.0.tar.gz")
set(MD5 "eba86ae9f07ba5cfbaf8af3bc4e84236")
set(REQ_URL "https://gitee.com/mirrors/protobuf_source/repository/archive/v3.13.0.tar.gz")
set(MD5 "f4489cb88922ad9c58cbe3308d59cee5")
else()
set(REQ_URL "https://github.com/protocolbuffers/protobuf/archive/v3.8.0.tar.gz")
set(MD5 "3d9e32700639618a4d2d342c99d4507a")
set(REQ_URL "https://github.com/protocolbuffers/protobuf/archive/v3.13.0.tar.gz")
set(MD5 "1a6274bc4a65b55a6fa70e264d796490")
endif ()
endif()

@@ -58,7 +58,7 @@ target_include_directories(ascend_protobuf INTERFACE ${PROTOBUF_SHARED_PKG_DIR}/
set(INSTALL_BASE_DIR "")
set(INSTALL_LIBRARY_DIR lib)

install(FILES ${PROTOBUF_SHARED_PKG_DIR}/${CMAKE_INSTALL_LIBDIR}/ascend_protobuf.so.3.8.0.0 OPTIONAL
install(FILES ${PROTOBUF_SHARED_PKG_DIR}/${CMAKE_INSTALL_LIBDIR}/ascend_protobuf.so.3.13.0.0 OPTIONAL
DESTINATION ${INSTALL_LIBRARY_DIR})
install(FILES ${PROTOBUF_SHARED_PKG_DIR}/${CMAKE_INSTALL_LIBDIR}/ascend_protobuf.so OPTIONAL
DESTINATION ${INSTALL_LIBRARY_DIR})


+ 4
- 6
cmake/external_libs/protobuf_static.cmake View File

@@ -16,11 +16,11 @@ if(GE_PB_PKG)
set(REQ_URL "${GE_PB_PKG}/libs/protobuf/v3.8.0.tar.gz")
else()
if (ENABLE_GITEE)
set(REQ_URL "https://gitee.com/mirrors/protobuf_source/repository/archive/v3.8.0.tar.gz")
set(MD5 "eba86ae9f07ba5cfbaf8af3bc4e84236")
set(REQ_URL "https://gitee.com/mirrors/protobuf_source/repository/archive/v3.13.0.tar.gz")
set(MD5 "f4489cb88922ad9c58cbe3308d59cee5")
else()
set(REQ_URL "https://github.com/protocolbuffers/protobuf/archive/v3.8.0.tar.gz")
set(MD5 "3d9e32700639618a4d2d342c99d4507a")
set(REQ_URL "https://github.com/protocolbuffers/protobuf/archive/v3.13.0.tar.gz")
set(MD5 "1a6274bc4a65b55a6fa70e264d796490")
endif ()
endif()

@@ -29,8 +29,6 @@ set(protobuf_LDFLAGS "-Wl,-z,relro,-z,now,-z,noexecstack")
set(PROTOBUF_STATIC_PKG_DIR ${CMAKE_INSTALL_PREFIX}/protobuf_static)
ExternalProject_Add(protobuf_static_build
URL ${REQ_URL}
#URL /home/txd/workspace/linux_cmake/pkg/protobuf-3.8.0.tar.gz
#SOURCE_DIR ${METADEF_DIR}/../../third_party/protobuf/src/protobuf-3.8.0
TLS_VERIFY OFF
CONFIGURE_COMMAND ${CMAKE_COMMAND}
-DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}


+ 5
- 7
cmake/external_libs/protoc.cmake View File

@@ -13,14 +13,14 @@ if ((${CMAKE_INSTALL_PREFIX} STREQUAL /usr/local) OR
endif()

if(GE_PB_PKG)
set(REQ_URL "${GE_PB_PKG}/libs/protobuf/v3.8.0.tar.gz")
set(REQ_URL "${GE_PB_PKG}/libs/protobuf/v3.13.0.tar.gz")
else()
if (ENABLE_GITEE)
set(REQ_URL "https://gitee.com/mirrors/protobuf_source/repository/archive/v3.8.0.tar.gz")
set(MD5 "eba86ae9f07ba5cfbaf8af3bc4e84236")
set(REQ_URL "https://gitee.com/mirrors/protobuf_source/repository/archive/v3.13.0.tar.gz")
set(MD5 "f4489cb88922ad9c58cbe3308d59cee5")
else()
set(REQ_URL "https://github.com/protocolbuffers/protobuf/archive/v3.8.0.tar.gz")
set(MD5 "3d9e32700639618a4d2d342c99d4507a")
set(REQ_URL "https://github.com/protocolbuffers/protobuf/archive/v3.13.0.tar.gz")
set(MD5 "1a6274bc4a65b55a6fa70e264d796490")
endif ()
endif()

@@ -28,8 +28,6 @@ set(protobuf_CXXFLAGS "-Wno-maybe-uninitialized -Wno-unused-parameter -fPIC -fst
set(protobuf_LDFLAGS "-Wl,-z,relro,-z,now,-z,noexecstack")
ExternalProject_Add(protoc_build
URL ${REQ_URL}
#URL /home/txd/workspace/linux_cmake/pkg/protobuf-3.8.0.tar.gz
#SOURCE_DIR ${GE_CODE_DIR}/../third_party/protobuf/src/protobuf-3.8.0
TLS_VERIFY OFF
CONFIGURE_COMMAND ${CMAKE_COMMAND} -Dprotobuf_WITH_ZLIB=OFF -Dprotobuf_BUILD_TESTS=OFF -DBUILD_SHARED_LIBS=OFF -DCMAKE_CXX_FLAGS=${protobuf_CXXFLAGS} -DCMAKE_CXX_LDFLAGS=${protobuf_LDFLAGS} -DCMAKE_INSTALL_PREFIX=${CMAKE_INSTALL_PREFIX}/protoc <SOURCE_DIR>/cmake
BUILD_COMMAND $(MAKE)


+ 0
- 5
ge/common/auth/file_saver.cc View File

@@ -290,7 +290,6 @@ FileSaver::SaveToFile(const string &file_path, const ge::ModelData &model, const
copy_header_ret);

file_header.length = model.model_len;
file_header.is_encrypt = ModelEncryptType::UNENCRYPTED;

const Status ret = SaveWithFileHeader(file_path, file_header, model.model_data, file_header.length);
if (ret != SUCCESS) {
@@ -305,8 +304,6 @@ FileSaver::SaveToFile(const string &file_path, const ge::ModelData &model, const
FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status
FileSaver::SaveToFile(const string &file_path, ModelFileHeader &file_header, ModelPartitionTable &model_partition_table,
const std::vector<ModelPartition> &partition_datas) {
file_header.is_encrypt = ModelEncryptType::UNENCRYPTED;

const Status ret = SaveWithFileHeader(file_path, file_header, model_partition_table, partition_datas);
GE_CHK_BOOL_RET_STATUS(ret == SUCCESS, FAILED, "save file failed, file_path:%s, file header len:%u.",
file_path.c_str(), file_header.length);
@@ -317,8 +314,6 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status
FileSaver::SaveToFile(const string &file_path, ModelFileHeader &file_header,
vector<ModelPartitionTable *> &model_partition_tables,
const vector<vector<ModelPartition>> &all_partition_datas) {
file_header.is_encrypt = ModelEncryptType::UNENCRYPTED;

const Status ret = SaveWithFileHeader(file_path, file_header, model_partition_tables, all_partition_datas);
GE_CHK_BOOL_RET_STATUS(ret == SUCCESS, FAILED, "save file failed, file_path:%s, file header len:%u.",
file_path.c_str(), file_header.length);


+ 4
- 5
ge/common/ge/plugin_manager.cc View File

@@ -126,8 +126,8 @@ Status PluginManager::LoadSo(const string &path, const vector<string> &func_chec
if (handle == nullptr) {
const char *error = mmDlerror();
GE_IF_BOOL_EXEC(error == nullptr, error = "");
ErrorManager::GetInstance().ATCReportErrMessage("E19012", {"function", "reason"},
{"mmDlopen", "shared library path is " + FmtToStr(file_path_dlopen) + ". Errormessage" + FmtToStr(error)});
REPORT_INNER_ERROR("E19999", "DLOpen SharedLibraryPath failed, path[%s]. Errormessage[%s]!",
file_path_dlopen.c_str(), error);
GELOGE(ACL_ERROR_GE_PLGMGR_PATH_INVALID,
"[DLOpen][SharedLibraryPath]Failed, path[%s]. Errormessage[%s]!",
file_path_dlopen.c_str(), error);
@@ -141,9 +141,8 @@ Status PluginManager::LoadSo(const string &path, const vector<string> &func_chec
if (real_fn == nullptr) {
const char *error = mmDlerror();
GE_IF_BOOL_EXEC(error == nullptr, error = "");
ErrorManager::GetInstance().ATCReportErrMessage("E19012", {"function", "reason"},
{"mmDlsym", FmtToStr(func_name) + " is skipped since function" +
FmtToStr(func_name) + " is not existed!"});
REPORT_INNER_ERROR("E19999", "[Check][So]%s is skipped since function %s is not existed! errmsg:%s",
func_name.c_str(), func_name.c_str(), error);
GELOGE(ACL_ERROR_GE_PLGMGR_PATH_INVALID,
"[Check][So]%s is skipped since function %s is not existed! errmsg:%s",
func_name.c_str(), func_name.c_str(), error);


+ 1
- 2
ge/common/helper/model_cache_helper.cc View File

@@ -1672,10 +1672,9 @@ Status ModelCacheHelper::LoadOmModelFromCache(GeModelPtr &ge_model) const {
}
GELOGI("load model data from file: %s", om_path.c_str());
Status ret;
string key_path;
int32_t priority = 0;
ModelData model_data;
ret = ModelParserBase::LoadFromFile(om_path.c_str(), key_path.c_str(), priority, model_data);
ret = ModelParserBase::LoadFromFile(om_path.c_str(), priority, model_data);
if (ret != SUCCESS) {
GELOGW("LoadOmModelFromCache: Load model from file failed. ret = %u", ret);
return ret;


+ 5
- 5
ge/common/helper/model_helper.cc View File

@@ -39,8 +39,6 @@ Status ModelHelper::SaveModelPartition(std::shared_ptr<OmFileSaveHelper> &om_fil
const uint8_t *data, size_t size, size_t model_index) {
if (size < 1 || size > UINT32_MAX) {
GELOGE(PARAM_INVALID, "[Add][ModelPartition]Failed, partition size %zu invalid", size);
REPORT_INNER_ERROR("E19999", "Add model partition failed, partition size %zu "
"invalid", size);
if (size > UINT32_MAX) {
string item = "item";
if (type == MODEL_DEF) {
@@ -57,6 +55,8 @@ Status ModelHelper::SaveModelPartition(std::shared_ptr<OmFileSaveHelper> &om_fil
ErrorManager::GetInstance().ATCReportErrMessage("E19023", {"size", "item", "maxsize"},
{std::to_string(size), item, std::to_string(UINT32_MAX)});
}
REPORT_INNER_ERROR("E19999", "Add model partition failed, partition size %zu "
"invalid", size);
return PARAM_INVALID;
}
if (data == nullptr) {
@@ -1013,7 +1013,7 @@ Status ModelTool::GetModelInfoFromOm(const char *model_file, ge::proto::ModelDef
ge::ModelData model;
int32_t priority = 0;

Status ret = ModelParserBase::LoadFromFile(model_file, "", priority, model);
Status ret = ModelParserBase::LoadFromFile(model_file, priority, model);
if (ret != SUCCESS) {
GELOGE(ret, "[Load][ModelInfo]Failed from file %s, error_code %u", model_file, ret);
REPORT_CALL_ERROR("E19999", "Load model info failed from file %s, error_code %u",
@@ -1033,7 +1033,7 @@ Status ModelTool::GetModelInfoFromOm(const char *model_file, ge::proto::ModelDef
ret = ModelParserBase::ParseModelContent(model, model_data, model_len);
if (ret != SUCCESS) {
ErrorManager::GetInstance().ATCReportErrMessage("E10003",
{"parameter", "value", "reason"}, {"om", model_file, "invalid om file"});
{"parameter", "value", "reason"}, {"om", model_file, "invalid om file, can't be parsed"});
GELOGE(ACL_ERROR_GE_PARAM_INVALID,
"[Parse][ModelContent]Failed because of invalid om file %s, please check om param",
model_file);
@@ -1072,7 +1072,7 @@ Status ModelTool::GetModelInfoFromPbtxt(const char *model_file, ge::proto::Model
ge::ModelData model;
int32_t priority = 0;

Status ret = ModelParserBase::LoadFromFile(model_file, "", priority, model);
Status ret = ModelParserBase::LoadFromFile(model_file, priority, model);
auto free_model_data = [](void **ptr) -> void {
if (ptr != nullptr && *ptr != nullptr) {
delete[] reinterpret_cast<char *>(*ptr);


+ 4
- 19
ge/common/model_parser/model_parser.cc View File

@@ -27,7 +27,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY ModelParserBase::ModelParserBas
FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY ModelParserBase::~ModelParserBase() {}

FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ModelParserBase::LoadFromFile(const char *model_path,
const char *key, int32_t priority,
int32_t priority,
ge::ModelData &model_data) {
std::string real_path = RealPath(model_path);
if (real_path.empty()) {
@@ -77,7 +77,6 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ModelParserBase::LoadFro
model_data.model_data = data;
model_data.model_len = len;
model_data.priority = priority;
model_data.key = (key == nullptr) ? "" : key;

return SUCCESS;
}
@@ -113,23 +112,9 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ModelParserBase::ParseMo

// Get data address
uint8_t *data = reinterpret_cast<uint8_t *>(model.model_data) + sizeof(ModelFileHeader);
if (file_header->is_encrypt == ModelEncryptType::UNENCRYPTED) { // Unencrypted model
if (!model.key.empty()) {
REPORT_INPUT_ERROR("E10003", std::vector<std::string>({"parameter", "value", "reason"}),
std::vector<std::string>({"om", model.om_name.c_str(), "invalid om file"}));
GELOGE(ACL_ERROR_GE_PARAM_INVALID,
"[Check][Param] Invalid param, model is unencrypted, but key is not empty.");
return ACL_ERROR_GE_PARAM_INVALID;
}
model_data = data;
model_len = file_header->length;
GELOGD("Model_len is %u, model_file_head_len is %zu.", model_len, sizeof(ModelFileHeader));
} else {
GELOGE(ACL_ERROR_GE_PARAM_INVALID, "[Check][Param]Invalid, model encrypt type not supported");
REPORT_INPUT_ERROR("E10003", std::vector<std::string>({"parameter", "value", "reason"}),
std::vector<std::string>({"om", model.om_name.c_str(), "invalid om file"}));
res = ACL_ERROR_GE_PARAM_INVALID;
}
model_data = data;
model_len = file_header->length;
GELOGD("Model_len is %u, model_file_head_len is %zu.", model_len, sizeof(ModelFileHeader));

return res;
}


+ 1
- 2
ge/common/model_parser/model_parser.h View File

@@ -43,12 +43,11 @@ class ModelParserBase {
* @ingroup hiai
* @brief Parsing a model file
* @param [in] model_file model path
* @param [in] model_key model secret key
* @param [in] priority modle priority
* @param [out] model_data model data
* @return Status result
*/
static Status LoadFromFile(const char *model_file, const char *model_key, int32_t priority,
static Status LoadFromFile(const char *model_file, int32_t priority,
ge::ModelData &model_data);

/**


+ 2
- 2
ge/common/model_saver.cc View File

@@ -41,12 +41,12 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ModelSaver::SaveJsonToFi
try {
model_str = model.dump(kInteval, ' ', false, Json::error_handler_t::ignore);
} catch (std::exception &e) {
ErrorManager::GetInstance().ATCReportErrMessage("E19007", {"exception"}, {e.what()});
REPORT_INNER_ERROR("E19999", "Failed to convert JSON to string, reason: %s, savefile:%s.", e.what(), file_path);
GELOGE(FAILED, "[Convert][File]Failed to convert JSON to string, file %s, reason %s",
file_path, e.what());
return FAILED;
} catch (...) {
ErrorManager::GetInstance().ATCReportErrMessage("E19008");
REPORT_INNER_ERROR("E19999", "Failed to convert JSON to string, savefile:%s.", file_path);
GELOGE(FAILED, "[Convert][File]Failed to convert JSON to string, file %s", file_path);
return FAILED;
}


+ 3
- 0
ge/common/profiling/ge_profiling.cc View File

@@ -216,3 +216,6 @@ ge::Status ProfCommandHandle(ProfCommandHandleType type, void *data, uint32_t le
return ge::SUCCESS;
}

GE_FUNC_VISIBILITY ge::Status ProfSetStepInfo(uint64_t index_id, uint16_t tag_id, rtStream_t stream) {
return ge::SUCCESS;
}

+ 70
- 51
ge/common/util.cc View File

@@ -70,39 +70,6 @@ static bool ReadProtoFromCodedInputStream(CodedInputStream &coded_stream, Messag
return proto->ParseFromCodedStream(&coded_stream);
}

FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY bool ReadProtoFromBinaryFile(const char *file, Message *proto) {
GE_CHK_BOOL_TRUE_EXEC_WITH_LOG((file == nullptr || proto == nullptr), return false,
"Input parameter file or proto is nullptr!");

std::string real_path = RealPath(file);
GE_CHK_BOOL_TRUE_EXEC_WITH_LOG(real_path.empty(), return false, "pb file path '%s' not valid", file);

GE_CHK_BOOL_TRUE_EXEC_WITH_LOG(GetFileLength(real_path) == kFileSizeOutLimitedOrOpenFailed, return false,
"file size not valid.");

std::ifstream fs(real_path, std::ifstream::in | std::ifstream::binary);
if (!fs.is_open()) {
ErrorManager::GetInstance().ATCReportErrMessage("E19001", {"file", "errmsg"}, {file, "ifstream is_open failed"});
GELOGE(ge::FAILED, "[Open][File]Failed, file path %s", file);
return false;
}

google::protobuf::io::IstreamInputStream istream(&fs);
google::protobuf::io::CodedInputStream coded_stream(&istream);

bool ret = ReadProtoFromCodedInputStream(coded_stream, proto);

fs.close();

if (!ret) {
ErrorManager::GetInstance().ATCReportErrMessage("E19005", {"file"}, {file});
GELOGE(ge::FAILED, "[Parse][File]Failed, file %s", file);
return ret;
}

return ret;
}

FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY bool ReadProtoFromArray(const void *data, int size, Message *proto) {
GE_CHK_BOOL_TRUE_EXEC_WITH_LOG((proto == nullptr || data == nullptr || size == 0), return false,
"incorrect parameter. proto is nullptr || data is nullptr || size is 0");
@@ -125,13 +92,13 @@ long GetFileLength(const std::string &input_file) {
return kFileSizeOutLimitedOrOpenFailed, "Open file[%s] failed. errmsg:%s", input_file.c_str(), strerror(errno));

GE_CHK_BOOL_TRUE_EXEC_WITH_LOG((file_length == 0),
ErrorManager::GetInstance().ATCReportErrMessage("E19015", {"filepath"}, {input_file});
REPORT_INNER_ERROR("E19999", "file:%s size is 0, not valid", input_file.c_str());
return -1, "File[%s] size is 0, not valid.", input_file.c_str());

GE_CHK_BOOL_TRUE_EXEC_WITH_LOG(
file_length > kMaxFileSizeLimit, ErrorManager::GetInstance().ATCReportErrMessage(
"E19016", {"filepath", "filesize", "maxlen"},
{input_file, std::to_string(file_length), std::to_string(kMaxFileSizeLimit)});
file_length > kMaxFileSizeLimit,
REPORT_INNER_ERROR("E19999", "file:%s size:%lld is out of limit: %d.", input_file.c_str(), file_length,
kMaxFileSizeLimit);
return kFileSizeOutLimitedOrOpenFailed, "File[%s] size %lld is out of limit: %d.", input_file.c_str(), file_length,
kMaxFileSizeLimit);
return static_cast<long>(file_length);
@@ -227,7 +194,9 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY int CreateDirectory(const std::
int32_t ret = mmMkdir(tmp_dir_path, M_IRUSR | M_IWUSR | M_IXUSR); // 700
if (ret != 0) {
if (errno != EEXIST) {
ErrorManager::GetInstance().ATCReportErrMessage("E19006", {"path"}, {directory_path});
REPORT_CALL_ERROR("E19999",
"Can not create directory %s. Make sure the directory exists and writable. errmsg:%s",
directory_path.c_str(), strerror(errno));
GELOGW("Can not create directory %s. Make sure the directory exists and writable. errmsg:%s",
directory_path.c_str(), strerror(errno));
return ret;
@@ -239,7 +208,9 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY int CreateDirectory(const std::
int32_t ret = mmMkdir(const_cast<char *>(directory_path.c_str()), M_IRUSR | M_IWUSR | M_IXUSR); // 700
if (ret != 0) {
if (errno != EEXIST) {
ErrorManager::GetInstance().ATCReportErrMessage("E19006", {"path"}, {directory_path});
REPORT_CALL_ERROR("E19999",
"Can not create directory %s. Make sure the directory exists and writable. errmsg:%s",
directory_path.c_str(), strerror(errno));
GELOGW("Can not create directory %s. Make sure the directory exists and writable. errmsg:%s",
directory_path.c_str(), strerror(errno));
return ret;
@@ -279,7 +250,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY bool ReadProtoFromText(const ch
std::ifstream fs(real_path.c_str(), std::ifstream::in);

if (!fs.is_open()) {
ErrorManager::GetInstance().ATCReportErrMessage("E19017", {"realpth", "protofile"}, {real_path, file});
REPORT_INNER_ERROR("E19999", "open file:%s failed", real_path.c_str());
GELOGE(ge::FAILED, "[Open][ProtoFile]Failed, real path %s, orginal file path %s",
real_path.c_str(), file);
return false;
@@ -374,14 +345,24 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY bool CheckInputPathValid(const
// The specified path is empty
std::map<std::string, std::string> args_map;
if (file_path.empty()) {
ErrorManager::GetInstance().ATCReportErrMessage("E10004", {"parameter"}, {atc_param});
if (atc_param != "") {
ErrorManager::GetInstance().ATCReportErrMessage("E10004", {"parameter"}, {atc_param});
} else {
REPORT_INNER_ERROR("E19999", "Param file_path is empty, check invalid");
}
GELOGW("Input parameter %s is empty.", file_path.c_str());
return false;
}
std::string real_path = RealPath(file_path.c_str());
// Unable to get absolute path (does not exist or does not have permission to access)
if (real_path.empty()) {
ErrorManager::GetInstance().ATCReportErrMessage("E19000", {"path", "errmsg"}, {file_path, strerror(errno)});
if (atc_param != "") {
std::string reason = "realpath error, errmsg:" + std::string(strerror(errno));
ErrorManager::GetInstance().ATCReportErrMessage("E10001", {"parameter", "value", "reason"},
{atc_param, file_path, reason});
} else {
REPORT_INNER_ERROR("E19999", "Path[%s]'s realpath is empty, errmsg[%s]", file_path.c_str(), strerror(errno));
}
GELOGW("Path[%s]'s realpath is empty, errmsg[%s]", file_path.c_str(), strerror(errno));
return false;
}
@@ -397,13 +378,23 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY bool CheckInputPathValid(const

GE_CHK_BOOL_TRUE_EXEC_WITH_LOG(
!ValidateStr(real_path, mode),
ErrorManager::GetInstance().ATCReportErrMessage("E10001", {"parameter", "value", "reason"},
{atc_param, real_path, kPathValidReason});
if (atc_param != "") {
ErrorManager::GetInstance().ATCReportErrMessage("E10001", {"parameter", "value", "reason"},
{atc_param, real_path, kPathValidReason});
} else {
REPORT_INNER_ERROR("E19999", "Path[%s] has invalid char, %s", file_path.c_str(), kPathValidReason);
}
return false, "Invalid value for %s[%s], %s.", atc_param.c_str(), real_path.c_str(), kPathValidReason);

// The absolute path points to a file that is not readable
if (mmAccess2(real_path.c_str(), M_R_OK) != EN_OK) {
ErrorManager::GetInstance().ATCReportErrMessage("E19003", {"file", "errmsg"}, {file_path.c_str(), strerror(errno)});
if (atc_param != "") {
std::string reason = "cat not access, errmsg:" + std::string(strerror(errno));
ErrorManager::GetInstance().ATCReportErrMessage("E10001", {"parameter", "value", "reason"},
{atc_param, file_path, reason});
} else {
REPORT_INNER_ERROR("E19999", "Path[%s] can't acccess, errmsg:%s", file_path.c_str(), strerror(errno));
}
GELOGW("Read file[%s] failed, errmsg[%s]", file_path.c_str(), strerror(errno));
return false;
}
@@ -415,14 +406,27 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY bool CheckOutputPathValid(const
const std::string &atc_param) {
// The specified path is empty
if (file_path.empty()) {
if (atc_param != "") {
ErrorManager::GetInstance().ATCReportErrMessage("E10004", {"parameter"}, {atc_param});
} else {
REPORT_INNER_ERROR("E19999", "Param file_path is empty, check invalid");
}
ErrorManager::GetInstance().ATCReportErrMessage("E10004", {"parameter"}, {atc_param});
GELOGW("Input parameter's value is empty.");
return false;
}

GE_CHK_BOOL_TRUE_EXEC_WITH_LOG(strlen(file_path.c_str()) >= MMPA_MAX_PATH,
ErrorManager::GetInstance().ATCReportErrMessage(
"E19002", {"filepath", "size"}, {file_path, std::to_string(MMPA_MAX_PATH)});
if (atc_param != "") {
std::string reason = "len is too long, it must be less than " +
std::to_string(MMPA_MAX_PATH);
ErrorManager::GetInstance().ATCReportErrMessage(
"E10001", {"parameter", "value", "reason"},
{atc_param, file_path, reason});
} else {
REPORT_INNER_ERROR("E19999", "Path[%s] len is too long, it must be less than %d",
file_path.c_str(), MMPA_MAX_PATH);
}
return "", "Path[%s] len is too long, it must be less than %d", file_path.c_str(),
MMPA_MAX_PATH);

@@ -437,8 +441,12 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY bool CheckOutputPathValid(const

GE_CHK_BOOL_TRUE_EXEC_WITH_LOG(
!ValidateStr(file_path, mode),
ErrorManager::GetInstance().ATCReportErrMessage("E10001", {"parameter", "value", "reason"},
{atc_param, file_path, kPathValidReason});
if (atc_param != "") {
ErrorManager::GetInstance().ATCReportErrMessage("E10001", {"parameter", "value", "reason"},
{atc_param, file_path, kPathValidReason});
} else {
REPORT_INNER_ERROR("E19999", "Path[%s] has invalid char, %s", file_path.c_str(), kPathValidReason);
}
return false, "Invalid value for %s[%s], %s.", atc_param.c_str(), file_path.c_str(), kPathValidReason);

std::string real_path = RealPath(file_path.c_str());
@@ -446,7 +454,13 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY bool CheckOutputPathValid(const
if (!real_path.empty()) {
// File is not readable or writable
if (mmAccess2(real_path.c_str(), M_W_OK | M_F_OK) != EN_OK) {
ErrorManager::GetInstance().ATCReportErrMessage("E19004", {"file", "errmsg"}, {real_path, strerror(errno)});
if (atc_param != "") {
std::string reason = "cat not access, errmsg:" + std::string(strerror(errno));
ErrorManager::GetInstance().ATCReportErrMessage("E10001", {"parameter", "value", "reason"},
{atc_param, file_path, reason});
} else {
REPORT_INNER_ERROR("E19999", "Path[%s] can't acccess, errmsg:%s", file_path.c_str(), strerror(errno));
}
GELOGW("Write file[%s] failed, errmsg[%s]", real_path.c_str(), strerror(errno));
return false;
}
@@ -465,7 +479,12 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY bool CheckOutputPathValid(const
std::string prefix_path = std::string(file_path).substr(0, static_cast<size_t>(path_split_pos));
// Determine whether the specified path is valid by creating the path
if (CreateDirectory(prefix_path) != 0) {
ErrorManager::GetInstance().ATCReportErrMessage("E19006", {"path"}, {file_path});
if (atc_param != "") {
ErrorManager::GetInstance().ATCReportErrMessage("E10001", {"parameter", "value", "reason"},
{atc_param, file_path, "Can not create directory"});
} else {
REPORT_INNER_ERROR("E19999", "Path[%s] Can not create directory", file_path.c_str());
}
GELOGW("Can not create directory[%s].", file_path.c_str());
return false;
}


+ 1
- 15
ge/engine_manager/dnnengine_manager.cc View File

@@ -45,7 +45,6 @@ const char *const kAttch = "attach";
const char *const kVectorCore = "VectorCore";
const char *const kVectorEngine = "VectorEngine";
const char *const kAIcoreEngine = "AIcoreEngine";
const char *const kCustomOpFlag = "_custom_op_flag";
const char *const kHostCpuEngineName = "DNN_VM_HOST_CPU";
const char *const kHostCpuOpKernelLibName = "DNN_VM_HOST_CPU_OP_STORE";
} // namespace
@@ -248,19 +247,6 @@ std::string DNNEngineManager::GetDNNEngineName(const ge::NodePtr &node_ptr) {
return it.engine;
} else {
checksupport_cost_[kernel_name] += GetCurrentTimestamp() - start_time;
bool is_custom_op = false;
if ((ge::AttrUtils::GetBool(op_desc, kCustomOpFlag, is_custom_op)) && is_custom_op) {
ErrorManager::GetInstance().ATCReportErrMessage("E13001", {"kernelname", "optype", "opname"},
{kernel_name, op_desc->GetType(), op_desc->GetName()});
GELOGE(FAILED,
"[Check][Param]The custom operator registered by the user does not support "
"the logic function delivered by this network, kernel_name %s, op type %s, "
"op name %s",
kernel_name.c_str(), op_desc->GetType().c_str(), op_desc->GetName().c_str());
std::string error_info = "The custom operator registered by the user does not support the logic function"
"delivered by this network";
return "";
}
unsupported_reasons.emplace(kernel_name, unsupported_reason);
GELOGI("DNNEngineManager:Check support failed, kernel_name is %s, op type is %s, op name is %s",
kernel_name.c_str(), op_desc->GetType().c_str(), op_desc->GetName().c_str());
@@ -283,7 +269,7 @@ std::string DNNEngineManager::GetDNNEngineName(const ge::NodePtr &node_ptr) {
ErrorManager::GetInstance().ATCReportErrMessage(
"E13002", {"optype", "opskernel", "reason"}, {op_desc->GetType(), it.first, it.second});
GELOGE(GE_GRAPH_ASSIGN_ENGINE_FAILED, "[Check][OpSupported]Op type %s of ops kernel %s "
"is unsupported, reason %s",
"is unsupported, reason : %s",
op_desc->GetType().c_str(), it.first.c_str(), it.second.c_str());
}



+ 3
- 5
ge/executor/ge_executor.cc View File

@@ -209,7 +209,7 @@ static void InitOpsProtoManager() {
string file_path = RealPath(path.c_str());
if (file_path.empty()) {
GELOGE(FAILED, "[Check][EnvPath]ASCEND_OPP_PATH path [%s] is invalid.", path.c_str());
REPORT_INPUT_ERROR("E68016", {"ASCEND_OPP_PATH", path});
REPORT_INPUT_ERROR("E68016", {"ASCEND_OPP_PATH", path});
return;
}
opsproto_path = (path + "/op_proto/custom/" + ":") + (path + "/op_proto/built-in/");
@@ -804,9 +804,8 @@ Status GeExecutor::LoadDataFromFile(const std::string &path, ModelData &model_da
return ACL_ERROR_GE_EXEC_MODEL_PATH_INVALID;
}
GELOGI("load modelData from file: %s.", path.c_str());
std::string key_path;
int32_t priority = 0;
Status ret = GraphLoader::LoadDataFromFile(path, key_path, priority, model_data);
Status ret = GraphLoader::LoadDataFromFile(path, priority, model_data);
if (ret != SUCCESS) {
if (model_data.model_data != nullptr) {
delete[] static_cast<char *>(model_data.model_data);
@@ -932,8 +931,7 @@ Status GeExecutor::GetMemAndWeightSize(const std::string &path, size_t &mem_size
}

ModelData model;
std::string key;
Status ret = ge::GraphLoader::LoadDataFromFile(path, key, 0, model);
Status ret = ge::GraphLoader::LoadDataFromFile(path, 0, model);
if ((ret != SUCCESS) || (model.model_data == nullptr)) {
GELOGE(ret, "Load data from file failed. ret = %d", ret);
return ret;


+ 9
- 8
ge/graph/build/memory/graph_mem_assigner.cc View File

@@ -268,12 +268,13 @@ Status GraphMemoryAssigner::ReAssignMemory(bool is_loop_graph, map<uint64_t, siz
total_mem_offset, VarManager::Instance(session_id)->GetGraphMemoryMaxSize(),
compute_graph_->GetGraphID(), compute_graph_->GetName().c_str());
for (auto iter : mem_type_to_offset) {
ErrorManager::GetInstance().ATCReportErrMessage("E19022", {"memType", "size", "item", "maxsize"},
{std::to_string(iter.first), std::to_string(iter.second), "featuremap",
std::to_string(VarManager::Instance(session_id)->GetGraphMemoryMaxSize())});
GEEVENT("[IMAS]AfterAssignMemory : %s memoffset[%zu], memtype[%ld]", compute_graph_->GetName().c_str(),
iter.second, iter.first);
}
REPORT_INPUT_ERROR(
"E19022", std::vector<std::string>({"size", "item", "maxsize"}),
std::vector<std::string>({std::to_string(total_mem_offset), "featuremap",
std::to_string(VarManager::Instance(session_id)->GetGraphMemoryMaxSize())}));
return ge::FAILED;
}
return SUCCESS;
@@ -1838,17 +1839,17 @@ bool GraphMemoryAssigner::CheckContinuousMemType(vector<int64_t> mem_type_list)
int64_t mem_type_tmp = mem_type_list[0];
for (auto mem_type : mem_type_list) {
if (mem_type != mem_type_tmp) {
std::string error = "The memory is continuous, but the type of the input memory is inconsistent. They are " +
FmtToStr(mem_type_tmp) + " and " + FmtToStr(mem_type);
ErrorManager::GetInstance().ATCReportErrMessage("E10043", {"reason"}, {error});
REPORT_INNER_ERROR(
"E19999",
"The memory is continuous, but the type of the input memory is inconsistent. They are %s and %s",
FmtToStr(mem_type_tmp).c_str(), FmtToStr(mem_type).c_str());
GELOGW("The memory is continuous, but the type of the input memory is inconsistent. They are [%ld] and [%ld].",
mem_type_tmp, mem_type);
return false;
}
}
if (memory_offset_.find(mem_type_tmp) == memory_offset_.end()) {
std::string error = "Memory offset map does not have memory type" + FmtToStr(mem_type_tmp);
ErrorManager::GetInstance().ATCReportErrMessage("E10043", {"reason"}, {error});
REPORT_INNER_ERROR("E19999", "Memory offset map does not have memory type %s", FmtToStr(mem_type_tmp).c_str());
GELOGW("Memory offset map does not have memory type[%ld].", mem_type_tmp);
return false;
}


+ 0
- 15
ge/graph/common/omg_util.cc View File

@@ -274,21 +274,6 @@ bool IsUnknownShapeTensor(const GeTensorDesc &tensor_desc) {
return false;
}

///
/// @brief Set Op _force_unknown_shape flag
/// @param [in] node
/// @param [in] force_unknown, set attribute if true
/// @param [in] group_index, condition group index of node.
/// @return
///
void MarkForceUnknownShape(const NodePtr &node, bool force_unknown, int64_t group_index) {
if (!force_unknown) {
return;
}

SetControlFlowGroup(node, group_index);
}

///
/// @brief Set Op _control_flow_group flag
/// @param [in] node


+ 0
- 9
ge/graph/common/omg_util.h View File

@@ -125,15 +125,6 @@ Status GetMemorySize(const NodePtr &node, int64_t &output_size);
///
bool IsUnknownShapeTensor(const GeTensorDesc &tensor_desc);

///
/// @brief Set Op _force_unknown_shape flag
/// @param [in] node
/// @param [in] force_unknown, set attribute if true
/// @param [in] group_index, condition group index of node.
/// @return
///
void MarkForceUnknownShape(const NodePtr &node, bool force_unknown, int64_t group_index);

///
/// @brief Set Op _control_flow_group flag
/// @param [in] node


+ 4
- 10
ge/graph/load/graph_loader.cc View File

@@ -123,23 +123,17 @@ Status GraphLoader::GetMaxUsedMemory(uint32_t model_id, uint64_t &max_size) {
return SUCCESS;
}

Status GraphLoader::LoadDataFromFile(const std::string &path, const std::string &key_path, int32_t priority,
ModelData &model_data) {
if (!CheckInputPathValid(path)) {
Status GraphLoader::LoadDataFromFile(const std::string &path, int32_t priority, ModelData &model_data) {
if (!CheckInputPathValid(path, "model_file")) {
GELOGE(ACL_ERROR_GE_EXEC_MODEL_PATH_INVALID, "[Check][Param] model path is invalid:%s", path.c_str());
return ACL_ERROR_GE_EXEC_MODEL_PATH_INVALID;
}

GELOGI("Load model begin, model path is: %s", path.c_str());
if (!key_path.empty() && !CheckInputPathValid(key_path)) {
REPORT_INNER_ERROR("E19999", "Param key_path:%s empty or invalid", key_path.c_str());
GELOGE(ACL_ERROR_GE_PARAM_INVALID, "[Check][Param] decrypt_key path is invalid:%s", key_path.c_str());
return ACL_ERROR_GE_PARAM_INVALID;
}

Status ret = ModelParserBase::LoadFromFile(path.c_str(), key_path.c_str(), priority, model_data);
Status ret = ModelParserBase::LoadFromFile(path.c_str(), priority, model_data);
if (ret != SUCCESS) {
GELOGE(ret, "[Call][LoadFromFile] failed. ret = %u, path:%s, key path:%s", ret, path.c_str(), key_path.c_str());
GELOGE(ret, "[Call][LoadFromFile] failed. ret = %u, path:%s", ret, path.c_str());
if (model_data.model_data != nullptr) {
delete[] static_cast<char *>(model_data.model_data);
model_data.model_data = nullptr;


+ 1
- 2
ge/graph/load/graph_loader.h View File

@@ -48,8 +48,7 @@ class GraphLoader {

static Status GetMemoryInfo(int64_t &free);

static Status LoadDataFromFile(const std::string &path, const std::string &key_path, int32_t priority,
ModelData &model_data);
static Status LoadDataFromFile(const std::string &path, int32_t priority, ModelData &model_data);

static Status LoadModelFromData(uint32_t &model_id, const ModelData &model_data, void *dev_ptr, size_t mem_size,
void *weight_ptr, size_t weight_size);


+ 17
- 15
ge/graph/load/model_manager/davinci_model.cc View File

@@ -3436,37 +3436,39 @@ void DavinciModel::SetZeroCopyAddr(const OpDescPtr &op_desc, const std::vector<v
/// @param [in] is_dynamic: dynamic batch input flag.
/// @return true if success
///
bool DavinciModel::CheckInputAndModelSize(const int64_t &input_size, const int64_t &op_size, bool is_dynamic) {
bool DavinciModel::CheckUserAndModelSize(const int64_t &size, const int64_t &op_size,
bool is_input, bool is_dynamic) {
const std::string input_or_output = is_input ? "input" : "output";
if (is_dynamic) { // dynamic is max size.
GELOGI("No need to check input and model size.");
GELOGI("No need to check user %s and model size.", input_or_output.c_str());
return true;
}

if (input_size > op_size) {
if (size > op_size) {
GELOGW(
"Input size [%ld] is bigger than om size need [%ld], "
"User %s size [%ld] is bigger than om size need [%ld], "
"MAY cause inference result ERROR, please check model input",
input_size, op_size);
input_or_output.c_str(), size, op_size);
}

if (is_dynamic_aipp_) {
GELOGI("This is dynamic aipp model, no need to judge smaller input size");
GELOGI("This is dynamic aipp model, no need to judge smaller user size");
return true;
}
// Judge overflow first
if (input_size > (INT64_MAX - kDataMemAlignSizeCompare)) {
GELOGI("The Input size [%ld] is smaller than model size [%ld] and is in the range of 64 bytes", input_size,
op_size);
if (size > (INT64_MAX - kDataMemAlignSizeCompare)) {
GELOGI("The user %s size [%ld] is smaller than model size [%ld] and is in the range of 64 bytes",
input_or_output.c_str(), size, op_size);
return true;
}
// The input and model input size can not be exactly equal because user input is not definite.
if ((input_size + kDataMemAlignSizeCompare) < op_size) {
REPORT_INNER_ERROR("E19999", "input size:%ld from user add align:%u > input_op_size:%ld in model, model_id:%u, "
if ((size + kDataMemAlignSizeCompare) < op_size) {
REPORT_INNER_ERROR("E19999", "%s size:%ld from user add align:%u < input_op_size:%ld in model, model_id:%u, "
"check invalid",
input_size, kDataMemAlignSizeCompare, op_size, model_id_);
input_or_output.c_str(), size, kDataMemAlignSizeCompare, op_size, model_id_);
GELOGE(ACL_ERROR_GE_PARAM_INVALID,
"[Check][Param] input size:%ld from user add align:%u > input_op_size:%ld in model, model_id:%u",
input_size, kDataMemAlignSizeCompare, op_size, model_id_);
"[Check][Param] %s size:%ld from user add align:%u < input_op_size:%ld in model, model_id:%u",
input_or_output.c_str(), size, kDataMemAlignSizeCompare, op_size, model_id_);
return false;
}
return true;
@@ -3544,7 +3546,7 @@ Status DavinciModel::UpdateIoTaskArgs(const std::map<uint32_t, ZeroCopyOffset> &
return ACL_ERROR_GE_PARAM_INVALID;
}

if (!CheckInputAndModelSize(buffer.length, data.second.GetDataSize(), is_dynamic)) {
if (!CheckUserAndModelSize(buffer.length, data.second.GetDataSize(), is_input, is_dynamic)) {
GELOGE(ACL_ERROR_GE_PARAM_INVALID, "[Call][CheckInputAndModelSize] failed, op[%s]",
data.second.GetOpName().c_str());
return ACL_ERROR_GE_PARAM_INVALID;


+ 1
- 1
ge/graph/load/model_manager/davinci_model.h View File

@@ -611,7 +611,7 @@ class DavinciModel {
/// @param [in] is_dynamic: dynamic batch input flag.
/// @return true if success
///
bool CheckInputAndModelSize(const int64_t &input_size, const int64_t &op_size, bool is_dynamic);
bool CheckUserAndModelSize(const int64_t &size, const int64_t &op_size, bool is_input, bool is_dynamic);

///
/// @ingroup ge


+ 11
- 24
ge/graph/manager/graph_manager.cc View File

@@ -120,7 +120,6 @@ const char *const kCheckPointForGetVar = "CheckPointGraphForGetVar";
const char *const kCheckPointGraph = "checkpoint_graph";
const char *const kVectorEngine = "VectorEngine";
const char *const kAIcoreEngine = "AIcoreEngine";
const char *const kRunFlagOffline = "0";
const int32_t kDynamicDimsTypeIsGetNext = 0;
const int32_t kDynamicDimsTypeIsData = 1;
const char *const kGetNextName = "IteratorV2";
@@ -1789,8 +1788,7 @@ Status GraphManager::ParseOptions(const std::map<std::string, std::string> &opti
return GE_GRAPH_OPTIONS_INVALID);

// ge.graphType
ret =
ParseTrainGraphFlag(options_.run_graph_flag, options_.train_graph_flag);
ret = ParseTrainGraphFlag(options_.run_graph_flag, options_.train_graph_flag);
GE_IF_BOOL_EXEC(ret != SUCCESS,
GELOGE(GE_GRAPH_OPTIONS_INVALID, "[Parse][TrainGraphFlag] Key:ge.runFlag value is invalid");
return GE_GRAPH_OPTIONS_INVALID);
@@ -2436,6 +2434,8 @@ Status GraphManager::RemoveIsolatedConstInThisGraph(ge::ComputeGraphPtr &compute
continue;
}
if (n->GetOpDesc()->GetType() == CONSTANT || n->GetOpDesc()->GetType() == CONSTANTOP) {
// reset const type depend on train_flag
options_.train_graph_flag ? n->GetOpDesc()->SetType(CONSTANTOP) : n->GetOpDesc()->SetType(CONSTANT);
if (n->GetOutAllNodes().empty() && n->GetInAllNodes().empty()) {
// it is an isolated constant, just remove it
if (GraphUtils::RemoveJustNode(compute_graph, n) != GRAPH_SUCCESS) {
@@ -2762,35 +2762,22 @@ Status GraphManager::OptimizeStage2(ge::ComputeGraphPtr &compute_graph) {
"Please pay attention to it.");
}

GE_CHK_STATUS_RET(ChangeConstType(compute_graph));
ChangeConstTypeWhenTraining(compute_graph);

GELOGI("End optimize after merge sub graph.");
return SUCCESS;
}

Status GraphManager::ChangeConstType(const ComputeGraphPtr &compute_graph) {
// run_flag off means offline, on means online
string run_flag;
(void)ge::GetContext().GetOption(ge::RUN_FLAG, run_flag);
// The constant for online is CONSTANTOP, and is CONSTANT for offline. They will be unified in future.
if (run_flag == kRunFlagOffline) {
GELOGI("Offline mode, change all Constant to Const.");
} else {
GELOGI("Online mode, change all Const to Constant.");
}
for (NodePtr &n : compute_graph->GetAllNodes()) {
GE_CHECK_NOTNULL(n);
if (n->GetType() == CONSTANT || n->GetType() == CONSTANTOP) {
auto op_desc = n->GetOpDesc();
GE_CHECK_NOTNULL(op_desc);
if (run_flag == kRunFlagOffline) {
op_desc->SetType(CONSTANT);
} else {
op_desc->SetType(CONSTANTOP);
void GraphManager::ChangeConstTypeWhenTraining(const ComputeGraphPtr &compute_graph) {
// The constant for train is CONSTANTOP, and is CONSTANT for inference. They will be unified in future.
if (options_.train_graph_flag) {
for (NodePtr &n : compute_graph->GetAllNodes()) {
// This can ensure that n is not a null pointer
if (n->GetOpDesc()->GetType() == CONSTANT) {
n->GetOpDesc()->SetType(CONSTANTOP);
}
}
}
return SUCCESS;
}

Status GraphManager::LoadGraphAsync(const GeRootModelPtr &ge_root_model, const GraphNodePtr &graph_node) {


+ 1
- 1
ge/graph/manager/graph_manager.h View File

@@ -375,7 +375,7 @@ class GraphManager {
static void ReturnError(GraphManager *graph_manager, GraphNodePtr &graph_node, RunAsyncCallback callback,
Status ret, const string &log);

Status ChangeConstType(const ComputeGraphPtr &compute_graph);
void ChangeConstTypeWhenTraining(const ComputeGraphPtr &compute_graph);

Status PreRunOptimizeOriginalGraph(const GraphNodePtr &graph_node, const std::vector<GeTensor> &inputs,
ge::ComputeGraphPtr &compute_graph, uint64_t session_id);


+ 0
- 2
ge/graph/optimize/graph_optimize.cc View File

@@ -336,10 +336,8 @@ Status GraphOptimize::OptimizeAfterStage1(ComputeGraphPtr &compute_graph) {
GELOGI("[OptimizeAfterStage1]: engine type will exclude:%s.", exclude_core_type.c_str());
continue;
}
#ifndef ONLY_COMPILE_OPEN_SRC
GELOGI("Begin to optimize graph after stage1 by engine %s.", iter->first.c_str());
ret = (iter->second)->OptimizeAfterStage1(*compute_graph);
#endif
if (ret != SUCCESS) {
REPORT_INNER_ERROR("E19999", "Call OptimizeAfterStage1 failed, ret:%d, engine_name:%s, "
"graph_name:%s.", ret, iter->first.c_str(), compute_graph->GetName().c_str());


+ 22
- 1
ge/graph/partition/dynamic_shape_partition.cc View File

@@ -364,6 +364,7 @@ static std::string ToString(const std::vector<ClusterPtr> &clusters) {
}

void DynamicShapePartitioner::MergeClustersControlFlow() {
std::unordered_set<ClusterPtr> all_merged_clusters;
for (const auto &item : control_clusters_) {
const auto &control_cluster = item.second;
auto rit = control_cluster.rbegin();
@@ -373,17 +374,32 @@ void DynamicShapePartitioner::MergeClustersControlFlow() {
}

const auto &cluster = *rit;
if (all_merged_clusters.count(cluster) > 0) {
continue;
}

bool is_unknown_cluster = cluster->IsUnknownShape();
for (++rit; rit != control_cluster.rend(); ++rit) {
const auto &cluster_from = *rit;
if (all_merged_clusters.count(cluster_from) > 0) {
continue;
}

auto merged_clusters = cluster->MergeAllPathFrom(cluster_from);
GELOGD("Merge all path cluster from %lu to %lu %s.", cluster_from->Id(), cluster->Id(),
ToString(merged_clusters).c_str());
for (const auto &merged_cluster : merged_clusters) {
all_merged_clusters.emplace(merged_cluster);
for (const auto &node : merged_cluster->Nodes()) {
node_2_cluster_[node] = cluster;
}
}
}

if (!is_unknown_cluster && cluster->IsUnknownShape()) {
GELOGD("Add to ordered cluster: %s", cluster->DebugString().c_str());
ordered_cluster_.push_back(cluster);
}
}
}

@@ -703,7 +719,12 @@ void Cluster::Merge(ClusterPtr other) {
if (other->min_ < min_) {
min_ = other->min_;
}
};

if (!IsUnknownShape() && other->IsUnknownShape()) {
type_ = UNKNOWN_SHAPE;
}
}

bool Cluster::TryMerge(ClusterPtr other) {
std::queue<ClusterPtr> forward_reached;
forward_reached.push(other);


+ 1
- 1
ge/graph/partition/dynamic_shape_partition.h View File

@@ -161,7 +161,7 @@ class DynamicShapePartitioner {
ge::ComputeGraphPtr root_graph_; // The original graph to partition
std::unordered_map<NodePtr, std::shared_ptr<Cluster>> node_2_cluster_; // Record nodes and the cluster it belongs to
// V1 control flow cluster, need merge to one Graph.
std::unordered_map<int64_t, std::vector<std::shared_ptr<Cluster>>> control_clusters_;
std::map<int64_t, std::vector<std::shared_ptr<Cluster>>> control_clusters_;
// topological sorted clusters, this field will change with the splitting.
// When partitioning UNKNOWN_SHAPE cluster, it is a collection of all topological sorted UNKNOWN_SHAPE clusters
// When partitioning KNOWN_SHAPE cluster, it is a collection of all topological sorted KNOWN_SHAPE clusters


+ 8
- 30
ge/graph/passes/mark_force_unknown_for_cond_pass.cc View File

@@ -132,39 +132,17 @@ void MarkForceUnknownForCondPass::MarkUnknownForSwitch(const NodePtr &node, std:
/// @return
///
void MarkForceUnknownForCondPass::MarkUnknownForSwitch(const std::map<NodePtr, std::vector<NodePtr>> &switch_groups) {
std::function<bool(const NodePtr &)> callback = [](const NodePtr &n) {
return n->GetOpDesc()->HasAttr(ATTR_NAME_CONTROL_FLOW_GROUP);
};

for (auto it1 = switch_groups.begin(); it1 != switch_groups.end(); ++it1) {
const auto &op_node1 = it1->first;
const auto &op_desc1 = op_node1->GetOpDesc();
if (op_desc1->HasAttr(ATTR_NAME_CONTROL_FLOW_GROUP)) {
for (auto it = switch_groups.begin(); it != switch_groups.end(); ++it) {
const auto &op_node = it->first;
const auto &op_desc = op_node->GetOpDesc();
if (op_desc->HasAttr(ATTR_NAME_CONTROL_FLOW_GROUP)) {
continue;
}

if (IsUnknownShapeTensor(op_desc1->GetOutputDesc(0))) {
int64_t group_index = op_desc1->GetId();
GELOGI("Mark %s as unknown shape control flow, group index: %ld", op_desc1->GetName().c_str(), group_index);
MarkForceUnknownShape(op_node1, true, group_index);
for (const auto &n : it1->second) {
MarkForceUnknownShape(n, true, group_index);
}

for (auto it2 = switch_groups.begin(); it2 != switch_groups.end(); ++it2) {
const auto &op_node2 = it2->first;
const auto &op_desc2 = op_node2->GetOpDesc();
if (op_desc2->HasAttr(ATTR_NAME_CONTROL_FLOW_GROUP)) {
continue;
}

if (std::any_of(it2->second.begin(), it2->second.end(), callback)) {
MarkForceUnknownShape(op_node2, true, group_index);
for (const auto &n : it2->second) {
MarkForceUnknownShape(n, true, group_index);
}
}
}
int64_t group_index = op_desc->GetId();
SetControlFlowGroup(op_node, group_index);
for (const auto &n : it->second) {
SetControlFlowGroup(n, group_index);
}
}
}


+ 6
- 0
ge/graph/passes/mark_graph_unknown_status_pass.cc View File

@@ -40,6 +40,12 @@ Status MarkGraphUnknownStatusPass::Run(ComputeGraphPtr graph) {
}
}

const auto &node = graph->GetParentNode();
if (!is_unknown_shape && node != nullptr && node->GetType() == PARTITIONEDCALL) {
GE_CHK_GRAPH_STATUS_RET(NodeUtils::GetNodeUnknownShapeStatus(*node, is_unknown_shape),
"[Get][ShapeStatus] of node[%s] failed!", node->GetName().c_str());
}

for (const auto &node : graph->GetDirectNode()) {
GELOGD("Set OwnerGraphIsUnknown attr to node[%s]", node->GetName().c_str());
(void)AttrUtils::SetBool(node->GetOpDesc(), kOwnerGraphIsUnknown, is_unknown_shape);


+ 2
- 3
ge/graph/passes/merge_to_stream_merge_pass.cc View File

@@ -89,8 +89,7 @@ Status MergeToStreamMergePass::AddActiveNodes(const ComputeGraphPtr &graph, cons
REPORT_INNER_ERROR("E19999", "Param node is nullptr, check invalid");
return FAILED, "[Check][Param] Param of pre node is nullptr.");
int64_t group_index = -1;
bool force_unknown = AttrUtils::GetInt(node->GetOpDesc(), ATTR_NAME_CONTROL_FLOW_GROUP, group_index);
MarkForceUnknownShape(node, force_unknown, group_index);
(void)AttrUtils::GetInt(node->GetOpDesc(), ATTR_NAME_CONTROL_FLOW_GROUP, group_index);
for (const InDataAnchorPtr &in_data_anchor : node->GetAllInDataAnchors()) {
OutDataAnchorPtr peer_out_anchor = in_data_anchor->GetPeerOutAnchor();
GE_IF_BOOL_EXEC(peer_out_anchor == nullptr, continue);
@@ -109,7 +108,7 @@ Status MergeToStreamMergePass::AddActiveNodes(const ComputeGraphPtr &graph, cons
GELOGE(FAILED, "[Set][ActiveLabelList] for node %s failed.", active_node->GetName().c_str());
return FAILED;
}
MarkForceUnknownShape(active_node, force_unknown, group_index);
SetControlFlowGroup(active_node, group_index);
}

return SUCCESS;


+ 2
- 2
ge/graph/passes/multi_batch_clone_pass.cc View File

@@ -220,8 +220,8 @@ Status MultiBatchClonePass::CheckAndParseDynamicData() {
GELOGE(PARAM_INVALID, "[Check][DynamicImageSizeShape] of %s failed.", data_name.c_str());
return PARAM_INVALID);
} else if (!GetLocalOmgContext().dynamic_dims.empty()) {
ErrorManager::GetInstance().ATCReportErrMessage("E10001", {"parameter", "reason"},
{"--input_shape", "all dynamic data must be set in --input_shape"});
ErrorManager::GetInstance().ATCReportErrMessage("E10001", {"parameter", "value", "reason"},
{"--dynamic_dims", data_name, "all dynamic node must be set in --input_shape, please check"});
GELOGE(INTERNAL_ERROR, "[Check][Param] data:%s shape:%s must be set int --input_shape",
node->GetName().c_str(), data_shape.ToString().c_str());
return INTERNAL_ERROR;


+ 9
- 1
ge/graph/passes/next_iteration_pass.cc View File

@@ -284,13 +284,21 @@ Status NextIterationPass::HandleWhileGroup(ComputeGraphPtr &graph) {
/// @return void
///
void NextIterationPass::HandleSwitchExitNodes(const LoopCondGroup &loop_group, int64_t group_index) {
std::string node_type;
for (const auto &switch_node : loop_group.switch_nodes) {
SetControlFlowGroup(switch_node, group_index);
for (const auto &node : switch_node->GetOutDataNodes()) {
std::string node_type;
(void)GetOriginalType(node, node_type);
if (kExitOpTypes.count(node_type) > 0) {
SetControlFlowGroup(node, group_index);
} else {
// For: Switch -> Cast -> Exit
for (const auto &n : node->GetOutDataNodes()) {
(void)GetOriginalType(n, node_type);
if (kExitOpTypes.count(node_type) > 0) {
SetControlFlowGroup(n, group_index);
}
}
}
}
}


+ 8
- 8
ge/graph/passes/switch_to_stream_switch_pass.cc View File

@@ -395,8 +395,8 @@ NodePtr SwitchToStreamSwitchPass::CreateStreamSwitchNode(const ComputeGraphPtr &
peer_cond_anchor->GetOwnerNode()->GetName().c_str(), stream_switch->GetName().c_str());

int64_t group_index = -1;
bool force_unknown = AttrUtils::GetInt(switch_node->GetOpDesc(), ATTR_NAME_CONTROL_FLOW_GROUP, group_index);
MarkForceUnknownShape(stream_switch, force_unknown, group_index);
(void)AttrUtils::GetInt(switch_node->GetOpDesc(), ATTR_NAME_CONTROL_FLOW_GROUP, group_index);
SetControlFlowGroup(stream_switch, group_index);
return stream_switch;
}

@@ -491,8 +491,8 @@ int64_t SwitchToStreamSwitchPass::GetGroupId(const NodePtr &node) {
Status SwitchToStreamSwitchPass::CombineSwitchNode(const ComputeGraphPtr &graph) {
for (auto iter = cond_node_map_.begin(); iter != cond_node_map_.end(); ++iter) {
for (auto group_iter = iter->second.begin(); group_iter != iter->second.end(); ++group_iter) {
std::list<NodePtr> false_switch_list = group_iter->second[SWITCH_FALSE_OUTPUT];
std::list<NodePtr> true_switch_list = group_iter->second[SWITCH_TRUE_OUTPUT];
const std::list<NodePtr> &false_switch_list = group_iter->second[SWITCH_FALSE_OUTPUT];
const std::list<NodePtr> &true_switch_list = group_iter->second[SWITCH_TRUE_OUTPUT];
std::set<NodePtr> same_cond_switch;
same_cond_switch.insert(false_switch_list.begin(), false_switch_list.end());
same_cond_switch.insert(true_switch_list.begin(), true_switch_list.end());
@@ -524,13 +524,13 @@ Status SwitchToStreamSwitchPass::CombineSwitchNode(const ComputeGraphPtr &graph)
std::function<bool(const NodePtr &)> callback = [&group_index](const NodePtr &n) {
return AttrUtils::GetInt(n->GetOpDesc(), ATTR_NAME_CONTROL_FLOW_GROUP, group_index);
};
bool is_unknown_shape = std::any_of(same_cond_switch.begin(), same_cond_switch.end(), callback);
MarkForceUnknownShape(active_node, is_unknown_shape, group_index);
(void)std::any_of(same_cond_switch.begin(), same_cond_switch.end(), callback);
SetControlFlowGroup(active_node, group_index);

const std::string &cond_group = cond_node->GetName();
for (uint32_t i = 0; i < SWITCH_OUTPUT_NUM; ++i) {
bool true_branch_flag = (i == SWITCH_TRUE_OUTPUT);
std::list<NodePtr> &switch_list = (true_branch_flag ? true_switch_list : false_switch_list);
const std::list<NodePtr> &switch_list = (true_branch_flag ? true_switch_list : false_switch_list);
GE_IF_BOOL_EXEC(switch_list.empty(), continue);

// select first stream_switch
@@ -559,7 +559,7 @@ Status SwitchToStreamSwitchPass::CombineSwitchNode(const ComputeGraphPtr &graph)
"[Add][Edge] between %s and %s failed.",
cast_node->GetName().c_str(), stream_switch->GetName().c_str());

MarkForceUnknownShape(stream_switch, is_unknown_shape, group_index);
SetControlFlowGroup(stream_switch, group_index);
for (const NodePtr &node : switch_list) {
GE_IF_BOOL_EXEC(node != stream_switch, {
GE_CHK_STATUS(GraphUtils::RemoveEdge(peer_cond_anchor, node->GetInDataAnchor(0)),


+ 52
- 52
ge/graph/preprocess/graph_preprocess.cc View File

@@ -195,9 +195,8 @@ NodePtr CreateTransNode(const std::string &name, const std::string &node_type, c

auto index = TransOpUtil::GetTransOpDataIndex(node_type);
if (index < 0) {
ErrorManager::GetInstance().ATCReportErrMessage(
"E19025", {"situation", "reason"},
{"The trans node type[" + node_type + "]", "it must be " + TransOpUtil::TransopMapToString()});
REPORT_INNER_ERROR("E19999", "The trans node type %s does not exists, it must be %s",
node_type.c_str(), TransOpUtil::TransopMapToString().c_str());
GELOGE(INTERNAL_ERROR, "[Check][Param] The trans node type %s does not exists", node_type.c_str());
return nullptr;
}
@@ -421,8 +420,8 @@ Status RecoverTransRoadForVar(const NodePtr &var, const VarTransRoad &road) {
auto trans_name = var->GetName() + "_trans_" + std::to_string(index++);
auto ret = RecoverOneTransNodeForVar(trans_name, *iter, last_node, last_node);
if (ret != SUCCESS) {
ErrorManager::GetInstance().ATCReportErrMessage(
"E15001", {"variable", "index", "type"}, {var->GetName(), std::to_string(index), iter->node_type});
REPORT_CALL_ERROR("E19999", "Failed to recover trans node for variable %s, index %d, type %s",
var->GetName().c_str(), index, iter->node_type.c_str());
GELOGE(INTERNAL_ERROR, "[Recover][TransNode] for variable %s, index %d, type %s", var->GetName().c_str(),
index, iter->node_type.c_str());
return INTERNAL_ERROR;
@@ -467,8 +466,8 @@ Status RecoverTransRoadForVarRef(const std::set<NodePtr> &nodes, const VarTransR
auto trans_name = var->GetName() + "_trans_" + std::to_string(index++);
auto ret = RecoverOneTransNodeForVarRef(trans_name, *iter, last_node, last_node);
if (ret != SUCCESS) {
ErrorManager::GetInstance().ATCReportErrMessage(
"E15001", {"variable", "index", "type"}, {var->GetName(), std::to_string(index), iter->node_type});
REPORT_CALL_ERROR("E19999", "Failed to recover trans node for variable %s, index %d, type %s",
var->GetName().c_str(), index, iter->node_type.c_str());
GELOGE(INTERNAL_ERROR, "[Recover][TransNode] for variable %s failed, index %d, type %s",
var->GetName().c_str(), index, iter->node_type.c_str());
return INTERNAL_ERROR;
@@ -643,8 +642,8 @@ Status CheckIfDynamicBatchScene(NodePtr &data_node, bool &is_dynamic_batch, Node
std::string related_node_name;
if (AttrUtils::GetStr(data_node->GetOpDesc(), kMbatchSwitchnName, related_node_name)) {
if (related_node_name.empty()) {
ErrorManager::GetInstance().ATCReportErrMessage(
"E15002", {"opname", "value", "reason"}, {data_node->GetName(), "flag", "but the value is empty"});
REPORT_INNER_ERROR("E19999", "The data node %s has switchn node flag, but the value is empty",
data_node->GetName().c_str());
GELOGE(INTERNAL_ERROR, "[Check][Param] The data node %s has switchn node flag, but the value is empty",
data_node->GetName().c_str());
return INTERNAL_ERROR;
@@ -660,9 +659,8 @@ Status CheckIfDynamicBatchScene(NodePtr &data_node, bool &is_dynamic_batch, Node
}

if (mbatch_node == nullptr) {
ErrorManager::GetInstance().ATCReportErrMessage(
"E15002", {"opname", "value", "reason"},
{data_node->GetName(), related_node_name, "but can not find it on the graph"});
REPORT_INNER_ERROR("E19999", "The data node %s has switchn node %s, but can not find it on the graph",
data_node->GetName().c_str(), related_node_name.c_str());
GELOGE(INTERNAL_ERROR, "[Check][Param] The data node %s has switchn node %s, but can not find it on the graph",
data_node->GetName().c_str(), related_node_name.c_str());
return INTERNAL_ERROR;
@@ -836,10 +834,10 @@ Status ProcessInputNC1HWC0DynShape(NodePtr &node_ptr, bool &is_dynamic_batch, No
ge::GeShape old_shape = input->GetShape();
bool support = ((old_format == FORMAT_NC1HWC0) || (old_format == FORMAT_NCHW) || (old_format == FORMAT_NHWC));
if (!support) {
ErrorManager::GetInstance().ATCReportErrMessage(
"E19014", {"opname", "value", "reason"},
{op_desc->GetName(), "format[" + TypeUtils::FormatToSerialString(old_format) + "]",
"only support FORMAT_NC1HWC0,FORMAT_NCHW,FORMAT_NHWC"});
REPORT_INNER_ERROR("E19999",
"The format:%s of op:%s(%s) is unsupported, only support FORMAT_NC1HWC0,FORMAT_NCHW,FORMAT_NHWC",
TypeUtils::FormatToSerialString(old_format).c_str(),
op_desc->GetName().c_str(), op_desc->GetType().c_str());
GELOGE(INTERNAL_ERROR, "[Check][Param] The format [%s] is unsupported, op:%s",
TypeUtils::FormatToSerialString(old_format).c_str(), op_desc->GetName().c_str());
return FAILED;
@@ -1086,10 +1084,9 @@ Status ProcessNetoutputNodeDynShape(NodePtr &node) {
// check if is_output_adjust_hw_layout is set
if (NeedUpdateFormatByOutputTypeParm(op_desc, index)) {
if ((old_format != FORMAT_NCHW) && (old_format != FORMAT_NHWC) && (old_format != FORMAT_NC1HWC0)) {
ErrorManager::GetInstance().ATCReportErrMessage(
"E19014", {"opname", "value", "reason"},
{op_desc->GetName(), "format[" + TypeUtils::FormatToSerialString(old_format) + "]",
"only support FORMAT_NC1HWC0,FORMAT_NCHW,FORMAT_NHWC"});
REPORT_INNER_ERROR("E19999", "Format:%s of op:%s(%s) is not one of NCHW, NHWC, NC1HWC0.",
TypeUtils::FormatToSerialString(old_format).c_str(),
op_desc->GetName().c_str(), op_desc->GetType().c_str());
GELOGE(INTERNAL_ERROR, "[Check][Param] Format is not one of NCHW, NHWC, NC1HWC0.");
return FAILED;
}
@@ -1329,9 +1326,9 @@ Status GraphPrepare::CheckRefInputNode(const NodePtr &node, const std::string &i
}
bool is_acceptable = (acceptable_types.find(input_type) != acceptable_types.end());
if (!is_acceptable) {
ErrorManager::GetInstance().ATCReportErrMessage(
"E15005", {"opname", "optype", "opname1", "optype1"},
{op_desc->GetName(), node->GetType(), input_op_desc->GetName(), input_op_desc->GetType()});
REPORT_INNER_ERROR("E19999", "The ref input of ref node %s[%s] must be ref node or variable, but %s[%s]isn't.",
node->GetName().c_str(), node->GetType().c_str(), input_op_desc->GetName().c_str(),
input_op_desc->GetType().c_str());
GELOGE(PARAM_INVALID, "[Check][Param] The ref input of ref node %s[%s] must be ref node or variable, "
"but %s[%s]isn't.", node->GetName().c_str(), node->GetType().c_str(), input_op_desc->GetName().c_str(),
input_op_desc->GetType().c_str());
@@ -1406,8 +1403,8 @@ Status GraphPrepare::AdjustDataOpOutput(const NodePtr &node) {
int64_t tensor_size = 0;
graphStatus graph_status = TensorUtils::GetTensorMemorySizeInBytes(output, tensor_size);
if (graph_status != GRAPH_SUCCESS) {
ErrorManager::GetInstance().ATCReportErrMessage(
"E19012", {"function", "reason"}, {"GetTensorMemorySizeInBytes", "opname is " + node->GetName()});
REPORT_CALL_ERROR("E19999", "GetTensorMemorySize by ouput index:0 of op:%s(%s) failed",
op_desc_ptr->GetName().c_str(), op_desc_ptr->GetType().c_str());
GELOGE(graph_status, "[Call][GetTensorMemorySizeInBytes] failed, op:%s", node->GetName().c_str());
return FAILED;
}
@@ -1430,10 +1427,10 @@ Status GraphPrepare::CheckInternalFormat(const NodePtr &input_node, const GeTens
if (need_check_internal_format) {
bool is_internal = TypeUtils::IsInternalFormat(format) || TypeUtils::IsInternalFormat(origin_format);
if (is_internal) {
ErrorManager::GetInstance().ATCReportErrMessage("E19025", {"situation", "reason"}, {"Input format[" +
TypeUtils::FormatToSerialString(format) + "] or origin_format[" +
TypeUtils::FormatToSerialString(origin_format) + "]",
"it is not support"});
std::string reason = "Input format[" + TypeUtils::FormatToSerialString(format) + "] or origin_format[" +
TypeUtils::FormatToSerialString(origin_format) + "] of op:" + input_node->GetName() +
" is not support";
REPORT_INPUT_ERROR("E19025", std::vector<std::string>({"reason"}), std::vector<std::string>({reason}));
GELOGE(PARAM_INVALID, "[Check][Param] Input format %s or origin_format %s is not support.",
TypeUtils::FormatToSerialString(format).c_str(), TypeUtils::FormatToSerialString(origin_format).c_str());
return FAILED;
@@ -1461,9 +1458,9 @@ Status GraphPrepare::UpdateInput(const std::vector<GeTensor> &user_input,
}

if ((index < 0) || (static_cast<size_t>(index) >= user_input.size())) {
std::string situation = "data op index[" + std::to_string(index) + "]";
std::string reason = "it must less than user_input size[" + std::to_string(user_input.size()) + "]";
ErrorManager::GetInstance().ATCReportErrMessage("E19025", {"situation", "reason"}, {situation, reason});
std::string reason = "exist data op:" + input_node->GetName() + " index " + std::to_string(index) +
" bigger than input tensor size[" + std::to_string(user_input.size()) + "], check invalid";
REPORT_INPUT_ERROR("E19025", std::vector<std::string>({"reason"}), std::vector<std::string>({reason}));
GELOGE(PARAM_INVALID, "[Check][Param] user_input size = %zu, graph data op index = %ld.",
user_input.size(), index);
return FAILED;
@@ -1484,8 +1481,9 @@ Status GraphPrepare::UpdateInput(const std::vector<GeTensor> &user_input,
uint32_t length = 1;
bool type_ret = TypeUtils::GetDataTypeLength(data_type, length);
if (!type_ret) {
ErrorManager::GetInstance().ATCReportErrMessage("E19025", {"situation", "reason"},
{"Input datatype[" + TypeUtils::DataTypeToSerialString(data_type) + "]", "it is not support"});
std::string reason = "Input datatype[" + TypeUtils::DataTypeToSerialString(data_type) + "] of index:" +
std::to_string(index) + " input tensor is not support";
REPORT_INPUT_ERROR("E19025", std::vector<std::string>({"reason"}), std::vector<std::string>({reason}));
GELOGE(PARAM_INVALID, "[Check][Param] Input datatype %s is not support.",
TypeUtils::DataTypeToSerialString(data_type).c_str());
return FAILED;
@@ -1501,10 +1499,9 @@ Status GraphPrepare::UpdateInput(const std::vector<GeTensor> &user_input,
return FAILED);
bool size_check = (size != 0 && shape_size != size);
if (size_check) {
std::string situation = "input data size[" + std::to_string(size) +
"] and shape_size[" + std::to_string(size) + "]";
std::string reason = "because size != 0 and shape_size != size";
ErrorManager::GetInstance().ATCReportErrMessage("E19025", {"situation", "reason"}, {situation, reason});
std::string reason = "input tensor[index:" + std::to_string(index) + "]'s data size[" + std::to_string(size) +
"] != shape_size[" + std::to_string(size) + "], check invalid";
REPORT_INPUT_ERROR("E19025", std::vector<std::string>({"reason"}), std::vector<std::string>({reason}));
GELOGE(PARAM_INVALID, "[Check][Param] input data size = %ld, shape_size = %ld.", size, shape_size);
return FAILED;
}
@@ -1884,8 +1881,8 @@ Status GraphPrepare::VerifyConstOp(const NodePtr &node) {
uint32_t length = 1;
bool type_ret = TypeUtils::GetDataTypeLength(data_type, length);
if (!type_ret) {
ErrorManager::GetInstance().ATCReportErrMessage("E19025", {"situation", "reason"},
{"Input datatype[" + TypeUtils::DataTypeToSerialString(data_type) + "]", "it is not support"});
REPORT_INNER_ERROR("E19999", "const node:%s's input datatype:%s it is not support",
node->GetName().c_str(), TypeUtils::DataTypeToSerialString(data_type).c_str());
GELOGE(PARAM_INVALID, "[Check][Param] Input datatype %s is not support.",
TypeUtils::DataTypeToSerialString(data_type).c_str());
return FAILED;
@@ -1897,19 +1894,22 @@ Status GraphPrepare::VerifyConstOp(const NodePtr &node) {
if (ge_tensor_desc.GetShape().GetDims().size() == 0) {
// shape = [], means it's a sclar tensor.
GE_CHK_BOOL_EXEC(data_size / length == 1,
ErrorManager::GetInstance().ATCReportErrMessage("E10043", {"reason"}, {"Const is invalid scalar tensor."});
REPORT_INNER_ERROR("E19999", "Const Node:%s is invalid, data size:%zu not equal to tensor size:%u",
node->GetName().c_str(), data_size, length);
return PARAM_INVALID, "[Check][Param] Const is invalid scalar tensor.");
} else {
// shape = [x, y, 0,...], means it's a vector tensor that value is [].
GE_CHK_BOOL_EXEC(data_size == 0,
ErrorManager::GetInstance().ATCReportErrMessage("E10043", {"reason"}, {"Const is invalid vector scalar."});
REPORT_INNER_ERROR("E19999", "Const Node:%s is invalid, data size:%zu not equal to tensor size:0",
node->GetName().c_str(), data_size);
return PARAM_INVALID, "[Check][Param] Const is invalid vector scalar.");
}
} else {
GE_CHK_BOOL_EXEC(data_size == static_cast<size_t>(shape_size * length) && data_size != 0,
ErrorManager::GetInstance().ATCReportErrMessage(
"E10043", {"reason"}, {"Const input data size is not equal with tensor desc shape"});
return PARAM_INVALID, "[Check][Param] Const input data size is not equal with tensor desc shape");
GE_CHK_BOOL_EXEC(
data_size == static_cast<size_t>(shape_size * length) && data_size != 0,
REPORT_INNER_ERROR("E19999", "Const Node:%s is invalid, data size:%zu not equal to tensor size:%ld",
node->GetName().c_str(), data_size, shape_size * length);
return PARAM_INVALID, "[Check][Param] Const input data size is not equal with tensor desc shape");
}
return SUCCESS;
}
@@ -1952,9 +1952,9 @@ Status GraphPrepare::CheckUserInput(const std::vector<GeTensor> &user_input) {
return GE_GRAPH_INIT_FAILED;
}
if ((index < 0) || (static_cast<size_t>(index) >= user_input.size())) {
std::string situation = "data op index[" + std::to_string(index) + "]";
std::string reason = "it must less than user_input size[" + std::to_string(user_input.size()) + "]";
ErrorManager::GetInstance().ATCReportErrMessage("E19025", {"situation", "reason"}, {situation, reason});
std::string reason = "exist data op:" + input_node->GetName() + " index " + std::to_string(index) +
" bigger than input tensor size[" + std::to_string(user_input.size()) + "], check invalid";
REPORT_INPUT_ERROR("E19025", std::vector<std::string>({"reason"}), std::vector<std::string>({reason}));
GELOGE(GE_GRAPH_INIT_FAILED, "[Check][Param] user_input size:%zu must larger than data op index:%ld.",
user_input.size(), index);
return GE_GRAPH_INIT_FAILED;
@@ -1967,10 +1967,10 @@ Status GraphPrepare::CheckUserInput(const std::vector<GeTensor> &user_input) {
for (size_t i = 0; i < desc.GetShape().GetDimNum(); ++i) {
int64_t dim = desc.GetShape().GetDim(i);
if (dim < UNKNOWN_DIM_NUM) {
std::string situation = "data dim[" + std::to_string(i) + "][" + std::to_string(dim) + "]" ;
std::string reason = "it need >= -2";
std::string reason = "data dim[" + std::to_string(i) + "][" + std::to_string(dim) + "] of index:" +
std::to_string(index) + " input tensor it need >= -2";
REPORT_INPUT_ERROR(
"E19025", std::vector<std::string>({"situation", "reason"}), std::vector<std::string>({situation, reason}));
"E19025", std::vector<std::string>({"reason"}), std::vector<std::string>({reason}));
GELOGE(GE_GRAPH_INIT_FAILED, "[Check][InputDim]data dim %zu is not supported, need >= -2, real:%ld.", i, dim);
return GE_GRAPH_INIT_FAILED;
}


+ 4
- 3
ge/graph/preprocess/insert_op/ge_aipp_op.cc View File

@@ -114,8 +114,9 @@ Status GetDataDimN(const ge::NodePtr &data_node, ge::Format format, int64_t &bat
std::vector<std::string>({
data_node->GetName() + " format",
TypeUtils::FormatToSerialString(format),
"only format " + TypeUtils::FormatToSerialString(FORMAT_NCHW) + " and "
+ TypeUtils::FormatToSerialString(FORMAT_NHWC) + " supported"}));
"only format " + TypeUtils::FormatToSerialString(FORMAT_NCHW) + " and "+
TypeUtils::FormatToSerialString(FORMAT_NHWC) +
" supported which dynamic aipp is linked"}));
GELOGE(PARAM_INVALID, "[Check][Param] Not support data format:%s, node:%s",
TypeUtils::FormatToSerialString(format).c_str(), data_node->GetName().c_str());
return PARAM_INVALID;
@@ -475,7 +476,7 @@ Status AippOp::ConvertRelatedInputNameToRank() {
string error_msg = "Top name " + related_input_name + "convert rank failed, Please"
" ensure top name in aipp config is the top name of data node.";
GELOGE(PARAM_INVALID, "[Check][InputParam]%s", error_msg.c_str());
REPORT_INPUT_ERROR("E19021", std::vector<std::string>({"reason"}), std::vector<std::string>({error_msg}));
REPORT_INPUT_ERROR("E10052", std::vector<std::string>({"reason"}), std::vector<std::string>({error_msg}));
return PARAM_INVALID;
}



+ 4
- 4
ge/graph/preprocess/insert_op/util_insert_aipp_op.cc View File

@@ -126,14 +126,14 @@ Status InsertNewOpUtil::CheckInputNamePositionNotRepeat() {
string error_msg = "Can not both set related_input_name and related_input_rank!"
" Please ensure param is the same with the first aipp config(related_input_name).";
GELOGE(PARAM_INVALID, "[Check][InputParam]%s", error_msg.c_str());
REPORT_INPUT_ERROR("E19021", std::vector<std::string>({"reason"}), std::vector<std::string>({error_msg}));
REPORT_INPUT_ERROR("E10052", std::vector<std::string>({"reason"}), std::vector<std::string>({error_msg}));
return PARAM_INVALID;
}
if (item->related_input_name() == another_item->related_input_name()) {
string error_msg = "Can not insert aipp to the same postion! Please ensure related_input_name"
" param is different in different aipp config.";
GELOGE(PARAM_INVALID, "[Check][InputParam]%s", error_msg.c_str());
REPORT_INPUT_ERROR("E19021", std::vector<std::string>({"reason"}), std::vector<std::string>({error_msg}));
REPORT_INPUT_ERROR("E10052", std::vector<std::string>({"reason"}), std::vector<std::string>({error_msg}));
return PARAM_INVALID;
}
}
@@ -154,14 +154,14 @@ Status InsertNewOpUtil::CheckInputRankPositionNoRepeat() {
string error_msg = "Can not both set related_input_rank and related_input_name!"
" Please ensure param is the same with the first aipp config(related_input_rank).";
GELOGE(PARAM_INVALID, "[Check][InputParam]%s", error_msg.c_str());
REPORT_INPUT_ERROR("E19021", std::vector<std::string>({"reason"}), std::vector<std::string>({error_msg}));
REPORT_INPUT_ERROR("E10052", std::vector<std::string>({"reason"}), std::vector<std::string>({error_msg}));
return PARAM_INVALID;
}
if (item->related_input_rank() == another_item->related_input_rank()) {
string error_msg = "Can not insert aipp to the same postion! Please ensure related_input_rank"
" param is different in different aipp config.";
GELOGE(PARAM_INVALID, "[Check][InputParam]%s", error_msg.c_str());
REPORT_INPUT_ERROR("E19021", std::vector<std::string>({"reason"}), std::vector<std::string>({error_msg}));
REPORT_INPUT_ERROR("E10052", std::vector<std::string>({"reason"}), std::vector<std::string>({error_msg}));
return PARAM_INVALID;
}
}


+ 4
- 4
ge/graph/preprocess/multi_batch_copy_graph.cc View File

@@ -764,8 +764,8 @@ Status MultiBatchGraphCopyer::CheckAndParseDynamicData(){
data_name.c_str()); return PARAM_INVALID);
} else if (dynamic_type_ == DynamicType::kDynamicDims) {
ErrorManager::GetInstance().ATCReportErrMessage("E10001",
{"parameter", "reason"},
{"--input_shape",
{"parameter", "value" "reason"},
{"--dynamic_dims", data_name,
"all dynamic data must be set in --input_shape"});
GELOGE(INTERNAL_ERROR, "[Check][Param] data:%s shape:%s must be set int --input_shape",
node->GetName().c_str(), data_shape.ToString().c_str());
@@ -1205,8 +1205,8 @@ Status MultiBatchGraphCopyer::CheckCopyResult(const std::vector<NodePtr> &start_
}
auto dims = NodeUtils::GetOutputDesc(*node, kDataOutIndex).GetShape().GetDims();
if (!IsAllDimsPositive(dims)) {
ErrorManager::GetInstance().ATCReportErrMessage("E15004", {"opname", "shape"},
{node->GetName(), formats::ShapeToString(dims)});
REPORT_CALL_ERROR("E19999", "Failed to copy multi batch graph, the node %s still has unknown shape %s",
node->GetName().c_str(), formats::ShapeToString(dims).c_str());
GELOGE(INTERNAL_ERROR, "[Check][Param] Failed to copy multi batch graph, the node %s still has unknown shape %s",
node->GetName().c_str(), formats::ShapeToString(dims).c_str());
return INTERNAL_ERROR;


+ 4
- 7
ge/graph/preprocess/multi_batch_options.cc View File

@@ -550,10 +550,8 @@ Status CalcShape(const std::vector<int64_t> &batch_shape, GeShape &data_shape) {
for (size_t i = 0; i < data_shape.GetDimNum(); ++i) {
if (data_shape.GetDim(i) < 0) {
if (batch_shape_index >= batch_shape.size()) {
ErrorManager::GetInstance().ATCReportErrMessage(
"E19012", {"function", "reason"},
{"CalcShape", "the batch shape count " + std::to_string(batch_shape.size()) +
" does not match the data shape " + data_shape.ToString()});
REPORT_INNER_ERROR("E19999", "the batch shape count %zu, does not match the data shape %s",
batch_shape.size(), data_shape.ToString().c_str());
GELOGE(PARAM_INVALID, "[Check][Param] Failed to calc tensor shape, the batch shape count %zu, "
"does not match the data shape %s", batch_shape.size(), data_shape.ToString().c_str());
return PARAM_INVALID;
@@ -563,9 +561,8 @@ Status CalcShape(const std::vector<int64_t> &batch_shape, GeShape &data_shape) {
}
GELOGI("CalcShape size of batch_shape is %zu, batch_shape_index is %zu.", batch_shape.size(), batch_shape_index);
if (batch_shape_index != batch_shape.size()) {
ErrorManager::GetInstance().ATCReportErrMessage(
"E19012", {"function", "reason"}, {"CalcShape", "the batch shape count " + std::to_string(batch_shape.size()) +
" does not match the data shape " + data_shape.ToString()});
REPORT_INNER_ERROR("E19999", "the batch shape count %zu, does not match the data shape %s",
batch_shape.size(), data_shape.ToString().c_str());
GELOGE(PARAM_INVALID, "[Check][Param] Failed to calc tensor shape, the batch shape count %zu, "
"does not match the data shape %s", batch_shape.size(), data_shape.ToString().c_str());
return PARAM_INVALID;


+ 58
- 8
ge/hybrid/executor/node_state.cc View File

@@ -19,8 +19,9 @@
#include "framework/common/debug/log.h"
#include "graph/compute_graph.h"
#include "graph/utils/tensor_utils.h"
#include "hybrid_execution_context.h"
#include "subgraph_context.h"
#include "hybrid/executor/hybrid_execution_context.h"
#include "hybrid/executor/subgraph_context.h"
#include "hybrid/node_executor/task_context.h"

#define INC_ITERATION_COUNT(iteration) \
do { \
@@ -260,6 +261,16 @@ NodeState::NodeState(const NodeItem &node_item, SubgraphContext *subgraph_contex
this->op_desc_ = node_item.node->GetOpDesc();
}

Status NodeState::Init(int group, const shared_ptr<FrameState> &frame_state) {
GE_CHECK_NOTNULL(frame_state);
group_ = group;
frame_state_ = frame_state;
auto unique_task_context = TaskContext::Create(this, subgraph_context_);
GE_CHECK_NOTNULL(unique_task_context);
task_context_ = std::shared_ptr<TaskContext>(unique_task_context.release());
return SUCCESS;
}

Status NodeState::AwaitInputTensors(GraphExecutionContext &context) const {
if (node_item_->IsMergeOp()) {
GELOGD("[%s] merge index %d, input nodes: %zu", GetName().c_str(), merge_index_, node_item_->data_recv_.size());
@@ -314,15 +325,54 @@ std::shared_ptr<TaskContext> NodeState::GetTaskContext() {
return task_context_;
}

void NodeState::SavePersistTensor(int input_idx, const TensorValue &tensor) {
if (node_item_->root_data_.count(input_idx) > 0) {
GELOGD("[%s] Save Root input tensor: %d", GetName().c_str(), input_idx);
root_tensor_values_[input_idx] = tensor;
}

if (node_item_->enter_data_.count(input_idx) > 0) {
GELOGD("[%s] Save Enter input tensor: %d", GetName().c_str(), input_idx);
root_tensor_values_[input_idx] = tensor;
}
}

void NodeState::UpdatePersistTensor(int input_idx) {
const auto it = root_tensor_values_.find(input_idx);
if (it == root_tensor_values_.end()) {
GELOGW("[%s] Not found saved tensor: %d", GetName().c_str(), input_idx);
return;
}

auto tensor = task_context_->MutableInput(input_idx);
if (tensor == nullptr) {
GELOGW("[%s] Not found input tensor: %d", GetName().c_str(), input_idx);
return;
}

*tensor = it->second;
GELOGD("[%s] Update input tensor: %d", GetName().c_str(), input_idx);
}

void NodeState::ResetContext(uint64_t iteration) {
switch_index_ = -1;
subgraph_context_->ResetContext(node_item_->node);
if (iteration == 0) {
data_scheduled_ = static_cast<uint32_t>(node_item_->root_data_.size());
ctrl_scheduled_ = static_cast<uint32_t>(node_item_->root_ctrl_.size());
} else {
data_scheduled_ = static_cast<uint32_t>(node_item_->root_data_.size() + node_item_->enter_data_.size());
ctrl_scheduled_ = static_cast<uint32_t>(node_item_->root_ctrl_.size() + node_item_->enter_ctrl_.size());
auto unique_task_context = TaskContext::Create(this, subgraph_context_);
GE_CHECK_NOTNULL_JUST_RETURN(unique_task_context);
task_context_ = std::shared_ptr<TaskContext>(unique_task_context.release());

data_scheduled_ = static_cast<uint32_t>(node_item_->root_data_.size());
ctrl_scheduled_ = static_cast<uint32_t>(node_item_->root_ctrl_.size());
for (auto item : node_item_->root_data_) {
UpdatePersistTensor(item.first);
}

if (iteration > 0) {
data_scheduled_ += static_cast<uint32_t>(node_item_->enter_data_.size());
ctrl_scheduled_ += static_cast<uint32_t>(node_item_->enter_ctrl_.size());
for (auto item : node_item_->enter_data_) {
UpdatePersistTensor(item.first);
}
}

iteration_count_ = iteration;


+ 6
- 8
ge/hybrid/executor/node_state.h View File

@@ -100,6 +100,8 @@ struct NodeState {
NodeState(const NodeItem &node_item, SubgraphContext *subgraph_context);
~NodeState() = default;

Status Init(int group, const shared_ptr<FrameState> &frame_state);

OpDesc *GetOpDesc() const {
return op_desc_.get();
}
@@ -129,6 +131,8 @@ struct NodeState {
void RunStreamActive();
void RunNextIteration();

void SavePersistTensor(int input_idx, const TensorValue &tensor);

Status NodeScheduled(const std::function<void(const NodeItem *)> &ready) const;

void SetScheduleFuture(std::future<Status> &&future);
@@ -150,18 +154,10 @@ struct NodeState {
return merge_index_;
}

void SetGroup(int group) {
group_ = group;
}

int GetGroup() const {
return group_;
}

void SetFrameState(const shared_ptr<FrameState> &frame_state) {
frame_state_ = frame_state;
}

const shared_ptr<NodeTask> &GetKernelTask() const {
return kernel_task_;
}
@@ -187,6 +183,7 @@ struct NodeState {
void SetCtrlSchedule(const NodeState &node_state, const std::function<void(const NodeItem *)> &ready);
void ResetContext(uint64_t iteration);
void ScheduleContext(const NodeState &node_state);
void UpdatePersistTensor(int input_idx);

const NodeItem *node_item_ = nullptr;
std::shared_ptr<NodeTask> kernel_task_ = nullptr;
@@ -199,6 +196,7 @@ struct NodeState {

std::future<Status> schedule_future_;
std::shared_ptr<FrameState> frame_state_;
std::map<int, TensorValue> root_tensor_values_;
uint64_t active_count_ = 0;
uint64_t iteration_count_ = 0;
uint32_t ctrl_scheduled_ = 0;


+ 19
- 8
ge/hybrid/executor/subgraph_context.cc View File

@@ -19,7 +19,7 @@

namespace ge {
namespace hybrid {
SubgraphContext::SubgraphContext(const GraphItem *graph_item, const GraphExecutionContext *execution_context)
SubgraphContext::SubgraphContext(const GraphItem *graph_item, GraphExecutionContext *execution_context)
: graph_item_(graph_item), execution_context_(execution_context) {
}

@@ -79,20 +79,31 @@ NodeStatePtr SubgraphContext::GetOrCreateNodeState(const NodeItem *node_item) {
return nullptr;
}

return CreateNodeState(node_item);
}

NodeStatePtr SubgraphContext::CreateNodeState(const NodeItem *node_item) {
GELOGD("[%s] lock for write", node_item->NodeName().c_str());
if (mmRWLockWRLock(&rw_lock_) != EN_OK) {
REPORT_CALL_ERROR("E19999", "[Node:%s] Lock for write failed", node_item->NodeName().c_str());
GELOGE(INTERNAL_ERROR, "[RWLock][Lock][Node:%s] Lock for write failed", node_item->NodeName().c_str());
return nullptr;
}

auto &node_state = node_states_[node_item];
if (node_state == nullptr) {
const auto &guard = node_item->MutexGuard("GetOrCreateNodeState");
node_state.reset(new(std::nothrow)NodeState(*node_item, this));
node_state->SetFrameState(GetOrCreateFrameState(*node_item));
node_state->SetGroup(group_);
(void)guard;
}
do {
if (node_state == nullptr) {
const auto &guard = node_item->MutexGuard("GetOrCreateNodeState");
node_state.reset(new(std::nothrow)NodeState(*node_item, this));
if (node_state == nullptr || node_state->Init(group_, GetOrCreateFrameState(*node_item)) != SUCCESS) {
GELOGE(INTERNAL_ERROR, "[Create][NodeState] failed for[%s].", node_item->NodeName().c_str());
REPORT_CALL_ERROR("E19999", "Create NodeState failed for %s.", node_item->NodeName().c_str());
break;
}
(void)guard;
}
} while (0);

GELOGD("[%s] unlock for write", node_item->NodeName().c_str());
if (mmWRLockUnLock(&rw_lock_) != EN_OK) {
REPORT_CALL_ERROR("E19999", "[Node:%s] Unlock for write failed", node_item->NodeName().c_str());


+ 3
- 2
ge/hybrid/executor/subgraph_context.h View File

@@ -30,7 +30,7 @@ namespace ge {
namespace hybrid {
class SubgraphContext {
public:
explicit SubgraphContext(const GraphItem *graph_item, const GraphExecutionContext *execution_context);
explicit SubgraphContext(const GraphItem *graph_item, GraphExecutionContext *execution_context);
~SubgraphContext();

Status Init();
@@ -51,10 +51,11 @@ class SubgraphContext {
void NodeDone(const NodePtr &node);

private:
NodeStatePtr CreateNodeState(const NodeItem *node_item);
FrameStatePtr GetOrCreateFrameState(const NodeItem &node_item); // no lock
friend class TaskContext;
const GraphItem *graph_item_;
const GraphExecutionContext *execution_context_;
GraphExecutionContext *execution_context_;
mmRWLock_t rw_lock_;
std::vector<TensorValue> all_inputs_;
std::vector<TensorValue> all_outputs_;


+ 3
- 15
ge/hybrid/executor/subgraph_executor.cc View File

@@ -175,16 +175,12 @@ Status SubgraphExecutor::ExecuteAsyncForKnownShape(const std::vector<TensorValue
GE_CHECK_NOTNULL(node_state);
node_state->SetKernelTask(node_item->kernel_task);

known_shape_task_context_ = TaskContext::Create(node_state.get(), context_, subgraph_context_.get());
GE_CHECK_NOTNULL(known_shape_task_context_);
node_state->SetTaskContext(known_shape_task_context_);

std::function<void()> callback;
GE_CHK_STATUS_RET_NOLOG(InitCallback(node_state.get(), callback));
HYBRID_CHK_STATUS_RET(ExecutionEngine::ExecuteAsync(*node_state, known_shape_task_context_, *context_, callback),
HYBRID_CHK_STATUS_RET(ExecutionEngine::ExecuteAsync(*node_state, node_state->GetTaskContext(), *context_, callback),
"[%s] Failed to execute node [%s] for known subgraph.",
graph_item_->GetName().c_str(),
known_shape_task_context_->GetNodeName());
node_state->GetName().c_str());

GELOGD("[%s] Done execute non-dynamic subgraph successfully.", graph_item_->GetName().c_str());
return SUCCESS;
@@ -271,16 +267,12 @@ Status SubgraphExecutor::PrepareNode(const NodeItem &node_item, int group) {
} else {
node_state->SetKernelTask(node_item.kernel_task);
}
auto unique_task_context = TaskContext::Create(node_state.get(), context_, subgraph_context_.get());
GE_CHECK_NOTNULL(unique_task_context);
const auto &task = node_state->GetKernelTask();
if (task == nullptr) {
GELOGE(INTERNAL_ERROR, "[Get][KernelTask] failed for[%s], NodeTask is null.", node_state->GetName().c_str());
REPORT_CALL_ERROR("E19999", "GetKernelTask failed for %s, nodetask is null.", node_state->GetName().c_str());
return INTERNAL_ERROR;
}
auto shared_task_context = std::shared_ptr<TaskContext>(unique_task_context.release());
node_state->SetTaskContext(shared_task_context);
GE_CHK_STATUS_RET_NOLOG(NodeEnqueue(p_node_state));
return AfterPrepared(p_node_state);
}
@@ -480,19 +472,15 @@ Status SubgraphExecutor::PrepareForExecution(GraphExecutionContext *ctx, NodeSta
} else {
node_state.SetKernelTask(node_item.kernel_task);
}
auto unique_task_context = TaskContext::Create(&node_state, context_, subgraph_context_.get());
GE_CHECK_NOTNULL(unique_task_context);
const auto &task = node_state.GetKernelTask();
if (task == nullptr) {
GELOGE(INTERNAL_ERROR, "[Invoke][GetKernelTask] failed for[%s], NodeTask is null.", node_state.GetName().c_str());
REPORT_CALL_ERROR("E19999", "invoke GetKernelTask failed for %s, NodeTask is null.", node_state.GetName().c_str());
return INTERNAL_ERROR;
}
auto shared_task_context = std::shared_ptr<TaskContext>(unique_task_context.release());
node_state.SetTaskContext(shared_task_context);
GE_CHK_RT_RET(rtCtxSetCurrent(ctx->rt_context));
RECORD_COMPILE_EVENT(ctx, node_item.NodeName().c_str(), "[UpdateTilingData] start");
GE_CHK_STATUS_RET_NOLOG(task->UpdateTilingData(*shared_task_context)); // update op_desc before alloc ws
GE_CHK_STATUS_RET_NOLOG(task->UpdateTilingData(*node_state.GetTaskContext())); // update op_desc before alloc ws
RECORD_COMPILE_EVENT(ctx, node_item.NodeName().c_str(), "[UpdateTilingData] end");
return SUCCESS;
}


+ 0
- 1
ge/hybrid/executor/subgraph_executor.h View File

@@ -125,7 +125,6 @@ class SubgraphExecutor {
ThreadPool pre_run_pool_;
BlockingQueue<NodeState *> ready_queue_;
std::unique_ptr<ShapeInferenceEngine> shape_inference_engine_;
std::shared_ptr<TaskContext> known_shape_task_context_;

std::mutex mu_; // Guard for prepare_queues_.
std::map<int, BlockingQueue<const NodeItem *>> prepare_queues_;


+ 2
- 3
ge/hybrid/model/node_item.cc View File

@@ -398,12 +398,11 @@ void NodeItem::SetDataSend(NodeItem *node_item, int anchor_index) {
data_send_.emplace(node_item);
node_item->data_recv_[this] = anchor_index;
if (is_root_node_) {
node_item->root_data_.emplace(this);
node_item->root_data_[anchor_index] = this;
}
// If Enter feed Not Merge, take as root Node.
if (IsEnterOp() && (node_item->node_type != STREAMMERGE)) {
node_item->enter_data_.emplace(this);
node_item->enter_inside_.emplace(anchor_index);
node_item->enter_data_[anchor_index] = this;
}
GELOGI("Node[%s] will control node[%s]", NodeName().c_str(), node_item->NodeName().c_str());
}


+ 2
- 3
ge/hybrid/model/node_item.h View File

@@ -148,15 +148,14 @@ struct NodeItem {
int64_t frame_index_ = -1;
int64_t parent_frame_ = -1;
std::set<const NodeItem *> root_ctrl_; // Recv ctrl from root node
std::set<const NodeItem *> root_data_; // Recv data from root node
std::map<int, const NodeItem *> root_data_; // Recv data from root node
std::set<const NodeItem *> enter_ctrl_; // Recv ctrl from Enter node
std::set<const NodeItem *> enter_data_; // Recv data from Enter node
std::map<int, const NodeItem *> enter_data_; // Recv data from Enter node
std::set<const NodeItem *> data_send_; // Send data notify to
std::map<const NodeItem *, int> data_recv_; // Recv data notify from
std::set<const NodeItem *> ctrl_send_; // Send ctrl notify to
std::set<const NodeItem *> ctrl_recv_; // Recv ctrl notify from
std::vector<std::vector<const NodeItem *>> switch_groups_; // Send ctrl notify to
std::set<int> enter_inside_; // Enter feed loop inside Node, Not cross Merge.

std::shared_ptr<NodeTask> kernel_task;
std::unique_ptr<FusedSubgraph> fused_subgraph;


+ 7
- 10
ge/hybrid/node_executor/task_context.cc View File

@@ -52,9 +52,7 @@ void TaskContext::ReleaseWorkspace() {
}
}

std::unique_ptr<TaskContext> TaskContext::Create(NodeState *node_state,
GraphExecutionContext *execution_context,
SubgraphContext *subgraph_context) {
std::unique_ptr<TaskContext> TaskContext::Create(NodeState *node_state, SubgraphContext *subgraph_context) {
const NodeItem &node_item = *node_state->GetNodeItem();
GELOGI("[%s] To create task context, input start = %d, num_inputs = %d, output start = %d, num_outputs = %d.",
node_item.NodeName().c_str(),
@@ -75,7 +73,7 @@ std::unique_ptr<TaskContext> TaskContext::Create(NodeState *node_state,
}

auto task_context = std::unique_ptr<TaskContext>(
new(std::nothrow)TaskContext(execution_context, node_state, subgraph_context));
new(std::nothrow)TaskContext(subgraph_context->execution_context_, node_state, subgraph_context));
if (task_context == nullptr) {
REPORT_CALL_ERROR("E19999", "Create TaskContext failed for [%s].", node_item.NodeName().c_str());
GELOGE(MEMALLOC_FAILED, "[Create][TaskContext] failed for [%s].", node_item.NodeName().c_str());
@@ -85,7 +83,7 @@ std::unique_ptr<TaskContext> TaskContext::Create(NodeState *node_state,
task_context->node_item_ = &node_item;
task_context->inputs_start_ = subgraph_context->all_inputs_.data() + node_item.input_start;
task_context->outputs_start_ = subgraph_context->all_outputs_.data() + node_item.output_start;
task_context->iteration_ = execution_context->iteration;
task_context->iteration_ = subgraph_context->execution_context_->iteration;
return task_context;
}

@@ -460,6 +458,10 @@ Status TaskContext::PropagateOutputs() {
subgraph_context_->all_inputs_[input_offset].SetName(
node_item_->NodeName() + "_in_" + std::to_string(dst_input_idx));
}

auto dst_node_state = subgraph_context_->GetOrCreateNodeState(dst_node_item);
GE_CHECK_NOTNULL(dst_node_state);
dst_node_state->SavePersistTensor(dst_input_idx, *tensor);
}
}
(void)guard;
@@ -489,11 +491,6 @@ void TaskContext::ReleaseInputsAndOutputs() {
}

void TaskContext::ReleaseInput(int index) {
if (node_item_->enter_inside_.count(index) > 0) {
GELOGD("[%s] Tensor of input[%d] is enter, keep it", GetNodeName(), index);
return;
}

auto input_tensor = MutableInput(index);
if (input_tensor != nullptr) {
input_tensor->Destroy();


+ 1
- 3
ge/hybrid/node_executor/task_context.h View File

@@ -36,9 +36,7 @@ class SubgraphContext;

class TaskContext {
public:
static std::unique_ptr<TaskContext> Create(NodeState *node_state,
GraphExecutionContext *execution_context,
SubgraphContext *subgraph_context);
static std::unique_ptr<TaskContext> Create(NodeState *node_state, SubgraphContext *subgraph_context);

~TaskContext();



+ 5
- 4
ge/ir_build/attr_options/keep_dtype_option.cc View File

@@ -27,7 +27,7 @@ namespace {
const size_t kMaxOpsNum = 10;
} // namespace
void KeepDtypeReportError(const std::vector<std::string> &invalid_list) {
void KeepDtypeReportError(const std::vector<std::string> &invalid_list, const std::string &cfg_path) {
std::stringstream err_msg;
size_t list_size = invalid_list.size();
err_msg << "config file contains " << list_size;
@@ -48,8 +48,9 @@ void KeepDtypeReportError(const std::vector<std::string> &invalid_list) {
}
}
ErrorManager::GetInstance().ATCReportErrMessage(
"E10042", {"parameter", "reason"}, {"keep_dtype", err_msg.str().c_str()});
REPORT_INPUT_ERROR(
"E10003", std::vector<std::string>({"parameter", "value", "reason"}),
std::vector<std::string>({"keep_dtype", cfg_path, err_msg.str()}));
GELOGE(FAILED, "%s", err_msg.str().c_str());
}
@@ -95,7 +96,7 @@ graphStatus KeepDtypeFunc(ComputeGraphPtr &graph, const std::string &cfg_path) {
ifs.close();
if (!invalid_list.empty()) {
KeepDtypeReportError(invalid_list);
KeepDtypeReportError(invalid_list, cfg_path);
return GRAPH_PARAM_INVALID;
}


+ 1
- 2
ge/ir_build/ge_ir_build.cc View File

@@ -890,8 +890,7 @@ static std::string AttrTypeToSerialString(aclgrphAttrType attr_type) {
if (it != kAttrTypeToStringMap.end()) {
return it->second;
} else {
ErrorManager::GetInstance().ATCReportErrMessage("E19012", {"function", "reason"},
{"AttrTypeToSerialString", "attr_type[" + std::to_string(attr_type) + "] is not support"});
REPORT_INNER_ERROR("E19999", "attr_type:%u is not support", attr_type);
GELOGE(GRAPH_FAILED, "[Check][AclgrphAttrType] attr_type not support %u", attr_type);
return "UNDEFINED";
}


+ 22
- 20
ge/ir_build/option_utils.cc View File

@@ -186,7 +186,8 @@ bool CheckDynamicBatchSizeInputShapeValid(map<string, vector<int64_t>> shape_map
for (char c : dynamic_batch_size) {
if (!isdigit(c) && (c != ',') && (c != ' ')) {
ErrorManager::GetInstance().ATCReportErrMessage(
"E10033", {"value", "reason"}, {dynamic_batch_size, kDynamicBatchSizeError});
"E10003", {"parameter", "value", "reason"},
{"dynamic_batch_size", dynamic_batch_size, kDynamicBatchSizeError});
GELOGE(ge::PARAM_INVALID, "[Check][DynamicBatchSizeInputShape] --dynamic_batch_size:%s is invalid. reason: %s",
dynamic_batch_size.c_str(), kDynamicBatchSizeError);
return false;
@@ -203,7 +204,8 @@ bool CheckDynamicImagesizeInputShapeValid(map<string, vector<int64_t>> shape_map
if (!input_format.empty() && !ge::TypeUtils::IsFormatValid(input_format.c_str())) {
GELOGE(ge::PARAM_INVALID,
"[Check][DynamicImagesizeInputShape] input_format [%s] invalid, can not support now.", input_format.c_str());
REPORT_INPUT_ERROR("E10414", std::vector<std::string>({"input_format"}), std::vector<std::string>({input_format}));
REPORT_INPUT_ERROR("E10003", std::vector<std::string>({"parameter","value","reason"}),
std::vector<std::string>({"input_format", input_format, "this format is not support"}));
return false;
}
int32_t size = 0;
@@ -242,8 +244,8 @@ bool CheckDynamicImagesizeInputShapeValid(map<string, vector<int64_t>> shape_map
bool is_char_valid = isdigit(c) || (c == ',') || (c == ' ') || (c == ';');
if (!is_char_valid) {
ErrorManager::GetInstance().ATCReportErrMessage(
"E10001", {"parameter", "value", "reason"},
{"dynamic_image_size", dynamic_image_size.c_str(), kDynamicImageSizeError});
"E10003", {"parameter", "value", "reason"},
{"dynamic_image_size", dynamic_image_size, kDynamicImageSizeError});
GELOGE(ge::PARAM_INVALID, "[Check][DynamicImageSizeInputShape] --dynamic_image_size:%s is invalid. reason: %s",
dynamic_image_size.c_str(), kDynamicImageSizeError);
return false;
@@ -256,7 +258,8 @@ bool CheckDynamicImagesizeInputShapeValid(map<string, vector<int64_t>> shape_map
for (auto str : split_set) {
split_dim = StringUtils::Split(str, ',');
if (split_dim.size() != static_cast<size_t>(kDynamicImageSizeNum)) {
ErrorManager::GetInstance().ATCReportErrMessage("E10020");
REPORT_INPUT_ERROR("E10020", std::vector<std::string>({"dynamic_image_size"}),
std::vector<std::string>({dynamic_image_size}));
GELOGE(ge::PARAM_INVALID,
"[Check][DynamicImagesizeInputShape] invalid value:%s number of dimensions of each group must be %ld.",
dynamic_image_size.c_str(), kDynamicImageSizeNum);
@@ -320,8 +323,9 @@ bool CheckAndParseDynamicDims(int32_t dynamic_dim_num, std::string &dynamic_dims
// Different parameter sets are split by ';'
vector<string> split_set = StringUtils::Split(dynamic_dims, ';');
if (split_set.size() > kMaxDynamicDimNum) {
ErrorManager::GetInstance().ATCReportErrMessage(
"E10042", {"parameter", "reason"}, {"dynamic_dims", "dynamic_dims's num of parameter set can not exceed 100"});
REPORT_INPUT_ERROR(
"E10036", std::vector<std::string>({"shapesize", "maxshapesize"}),
std::vector<std::string>({std::to_string(split_set.size()), std::to_string(kMaxDynamicDimNum + 1)}));
GELOGE(ge::PARAM_INVALID,
"[CheckAndParse][DynamicDims]dynamic_dims's num of parameter set can not exceed %zu.", kMaxDynamicDimNum);
return false;
@@ -329,9 +333,10 @@ bool CheckAndParseDynamicDims(int32_t dynamic_dim_num, std::string &dynamic_dims
for (auto split_dim : split_set) {
vector<string> one_set = StringUtils::Split(split_dim, ',');
if (one_set.size() != static_cast<size_t>(dynamic_dim_num)) {
ErrorManager::GetInstance().ATCReportErrMessage(
"E10042", {"parameter", "reason"},
{"dynamic_dims", "Each gear setting needs to be consistent with the number of -1 in the inputshape"});
REPORT_INPUT_ERROR(
"E10003", std::vector<std::string>({"parameter", "value", "reason"}),
std::vector<std::string>({"dynamic_dims", dynamic_dims,
"Each gear setting needs to be consistent with the number of -1 in the inputshape"}));
GELOGE(ge::PARAM_INVALID, "[CheckAndParse][DynamicDims] --dynamic_dims:%s invalid. "
"reason: Each gear setting needs to be consistent with the number of -1 in the inputshape.",
dynamic_dims.c_str());
@@ -496,8 +501,7 @@ Status CheckDynamicInputParamValid(string &dynamic_batch_size, string &dynamic_i
int32_t param_size = static_cast<int32_t>(!dynamic_batch_size.empty()) +
static_cast<int32_t>(!dynamic_image_size.empty()) + static_cast<int32_t>(!dynamic_dims.empty());
if (param_size > 1) {
ErrorManager::GetInstance().ATCReportErrMessage("E10009", {"parameter0", "parameter1", "parameter2"},
{"dynamic_batch_size", "dynamic_image_size", "dynamic_dims"});
REPORT_INPUT_ERROR("E10009", std::vector<std::string>(), std::vector<std::string>());
GELOGE(ge::PARAM_INVALID,
"[Parse][Parameter]dynamic_batch_size, dynamic_image_size and dynamic_dims can only be set one");
return ge::PARAM_INVALID;
@@ -608,17 +612,17 @@ bool ParseInputShape(const string &input_shape, map<string, vector<int64_t>> &sh
} catch (const std::out_of_range &) {
ErrorManager::GetInstance().ATCReportErrMessage("E10013", {"parameter", "value"},
{"--input_shape", shape_value_str});
GELOGW("Input parameter[--input_shape]’s value[%s] cause out of range execption!", shape_value_str.c_str());
GELOGW("Input parameter[--input_shape]'s value[%s] cause out of range execption!", shape_value_str.c_str());
return false;
} catch (const std::invalid_argument &) {
ErrorManager::GetInstance().ATCReportErrMessage("E10014", {"parameter", "value"},
{"--input_shape", shape_value_str});
GELOGW("Input parameter[--input_shape]’s value[%s] cause invalid argument!", shape_value_str.c_str());
GELOGW("Input parameter[--input_shape]'s value[%s] cause invalid argument!", shape_value_str.c_str());
return false;
} catch (...) {
ErrorManager::GetInstance().ATCReportErrMessage("E10015", {"parameter", "value"},
{"--input_shape", shape_value_str});
GELOGW("Input parameter[--input_shape]’s value[%s] cause unkown execption!", shape_value_str.c_str());
GELOGW("Input parameter[--input_shape]'s value[%s] cause unkown execption!", shape_value_str.c_str());
return false;
}
int64_t result = left_result;
@@ -627,7 +631,7 @@ bool ParseInputShape(const string &input_shape, map<string, vector<int64_t>> &sh
ErrorManager::GetInstance().ATCReportErrMessage("E10011", {"shape", "result"},
{shape, std::to_string(result)});
GELOGW(
"Input parameter[--input_shape]’s shape value[%s] is invalid, "
"Input parameter[--input_shape]'s shape value[%s] is invalid, "
"expect positive integer, but value is %ld.",
shape.c_str(), result);
return false;
@@ -725,13 +729,10 @@ int CheckLogParamValidAndSetLogLevel(const std::string log) {
} else {
GELOGE(ge::PARAM_INVALID,
"[Check][LogParam]log:%s invalid, only support debug, info, warning, error, null", log.c_str());
REPORT_INPUT_ERROR("E10417", std::vector<std::string>({"loglevel"}), std::vector<std::string>({log}));
return ret;
}
if (ret != 0) {
GELOGE(ge::PARAM_INVALID, "[Set][LogLevel] fail, level:%s.", log.c_str());
REPORT_INPUT_ERROR("E10417", std::vector<std::string>({"loglevel"}), std::vector<std::string>({log}));

}
return ret;
}
@@ -747,7 +748,8 @@ Status CheckInsertOpConfParamValid(const std::string insert_op_conf) {

Status CheckDisableReuseMemoryParamValid(const std::string disable_reuse_memory) {
if ((disable_reuse_memory != "") && (disable_reuse_memory != "0") && (disable_reuse_memory != "1")) {
ErrorManager::GetInstance().ATCReportErrMessage("E10006", {"parameter"}, {"disable_reuse_memory"});
REPORT_INPUT_ERROR("E10006", std::vector<std::string>({"parameter", "value"}),
std::vector<std::string>({"disable_reuse_memory", disable_reuse_memory}));
GELOGE(ge::PARAM_INVALID, "[Check][DisableReuseMemory]disable_reuse_memory must be 1 or 0.");
return ge::PARAM_INVALID;
}


+ 7
- 40
ge/offline/main.cc View File

@@ -67,7 +67,7 @@ static bool is_dynamic_input = false;
const char *const kModeSupport = "only support 0(model to framework model), "
"1(framework model to json), 3(only pre-check), "
"5(pbtxt to json), 6(display model info)";
const char *const kModelToJsonSupport = "only support 0(Caffe) 3(TensorFlow) 5(Onnx)";
const char *const kModelToJsonSupport = "only support 0(Caffe) 3(TensorFlow) 5(Onnx) when model set 1";
const char *const kCaffeFormatSupport = "only support NCHW, ND in Caffe model";
const char *const kTFFormatSupport = "only support NCHW, NHWC, ND, NCDHW, NDHWC in TF model";
const char *const kONNXFormatSupport = "only support NCHW, ND, NCDHW in ONNX model";
@@ -102,14 +102,6 @@ DEFINE_int32(mode, 0,
"Optional; run mode, 0(default): model => framework model; 1: "
"framework model => json; 3: only pre-check; 5: txt => json.");

#if !defined(__ANDROID__) && !defined(ANDROID)
DEFINE_int32(encrypt_mode, -1, "Optional; the encrypt flag. 0: encrypt; -1(default): not encrypt");
DEFINE_string(encrypt_key, "", "Optional; the encrypt_key file.");
DEFINE_string(certificate, "", "Optional; the certificate file.");
DEFINE_string(hardware_key, "", "Optional; the ISV key file.");
DEFINE_string(private_key, "", "Optional; the private key file.");
#endif

DEFINE_string(out_nodes, "",
"Optional; output nodes designated by users."
"Format: \"node_name1:0;node_name1:1;node_name2:0\"");
@@ -405,29 +397,6 @@ class GFlagUtils {
"dynamic dims function does not support aipp"});
ret = ge::FAILED, "[Check][Param]dynamic dims function does not support aipp");

#if !defined(__ANDROID__) && !defined(ANDROID)
GE_CHK_BOOL_TRUE_EXEC_WITH_LOG(!CheckEncryptModeValid(FLAGS_encrypt_mode), ret = ge::FAILED,
"[Check][EncryptMode]value %d not valid!!", FLAGS_encrypt_mode);

if (FLAGS_encrypt_mode == 0) { // Encryption mode
GELOGI("ge will run with encrypt!");

GE_CHK_BOOL_TRUE_EXEC_WITH_LOG(!ge::CheckInputPathValid(FLAGS_encrypt_key), ret = ge::FAILED,
"[Check][InputPath]encrypt_key file not found!!");

GE_CHK_BOOL_TRUE_EXEC_WITH_LOG(!ge::CheckInputPathValid(FLAGS_certificate), ret = ge::FAILED,
"[Check][InputPath]certificate file not found!!");

GE_CHK_BOOL_TRUE_EXEC_WITH_LOG(!ge::CheckInputPathValid(FLAGS_hardware_key), ret = ge::FAILED,
"[Check][InputPath]hardware_key file not found!!");

GE_CHK_BOOL_TRUE_EXEC_WITH_LOG(!ge::CheckInputPathValid(FLAGS_private_key), ret = ge::FAILED,
"[Check][InputPath]private_key file not found!!");
} else { // No encryption
GELOGI("ge will run without encrypt!");
}
#endif

/**
* Check the validity of the I / O file path
*/
@@ -486,7 +455,8 @@ class GFlagUtils {
ret = ge::FAILED, "[Check][EnableSingleStream]failed!");

GE_CHK_BOOL_TRUE_EXEC_WITH_LOG((FLAGS_display_model_info != "0") && (FLAGS_display_model_info != "1"),
ErrorManager::GetInstance().ATCReportErrMessage("E10006", {"parameter"}, {"display_model_info"});
REPORT_INPUT_ERROR("E10006", std::vector<std::string>({"parameter", "value"}),
std::vector<std::string>({"display_model_info", FLAGS_display_model_info}));
ret = ge::FAILED, "[Check][Parameter]Input parameter[--display_model_info]'s value must be 1 or 0.");

return ret;
@@ -917,7 +887,8 @@ static Status ConvertModelToJson(int fwk_type, const string &model_file, const s
}

if (FLAGS_dump_mode != "0" && FLAGS_dump_mode != "1") {
ErrorManager::GetInstance().ATCReportErrMessage("E10006", {"parameter"}, {"dump_mode"});
REPORT_INPUT_ERROR("E10006", std::vector<std::string>({"parameter", "value"}),
std::vector<std::string>({"dump_mode", FLAGS_dump_mode}));
GELOGE(ge::FAILED, "[Convert][ModelToJson] Input parameter[--dump_mode]'s value must be 1 or 0.");
ret = ge::FAILED;
}
@@ -982,7 +953,8 @@ domi::Status GenerateModel(std::map<string, string> &options, std::string output
ge::Model load_model = ge::Model("loadmodel", "version2");
auto ret1 = load_model.LoadFromFile(FLAGS_model);
if (ret1 != ge::GRAPH_SUCCESS) {
ErrorManager::GetInstance().ATCReportErrMessage("E10041", {"parameter"}, {FLAGS_model});
REPORT_INPUT_ERROR("E10041", std::vector<std::string>({"file"}), std::vector<std::string>({FLAGS_model}));
REPORT_CALL_ERROR("E19999", "load from model file:%s failed", FLAGS_model.c_str());
DOMI_LOGE("Load model from %s failed, please check model file or "
"input parameter[--framework] is correct", FLAGS_model.c_str());
(void)ge_generator.Finalize();
@@ -1186,11 +1158,6 @@ domi::Status GenerateOmModel() {
options.insert(std::pair<string, string>(string(ge::FRAMEWORK_TYPE), to_string(FLAGS_framework)));
options.insert(std::pair<string, string>(string(ge::STREAM_NUM), to_string(f_stream_num)));
options.insert(std::pair<string, string>(string(ge::CALIBRATION_CONF_FILE), FLAGS_cal_conf));
options.insert(std::pair<string, string>(string(ge::ENCRYPT_MODE), to_string(FLAGS_encrypt_mode)));
options.insert(std::pair<string, string>(string(ge::EK_FILE), FLAGS_encrypt_key));
options.insert(std::pair<string, string>(string(ge::CERT_FILE), FLAGS_certificate));
options.insert(std::pair<string, string>(string(ge::HW_KEY_FILE), FLAGS_hardware_key));
options.insert(std::pair<string, string>(string(ge::PRIVATE_KEY_FILE), FLAGS_private_key));
options.insert(std::pair<string, string>(string(ge::OUTPUT_NODE_NAME), FLAGS_out_nodes));
options.insert(std::pair<string, string>(string(ge::INSERT_OP_FILE), FLAGS_insert_op_conf));
options.insert(std::pair<string, string>(string(ge::PRECISION_MODE), FLAGS_precision_mode));


+ 21
- 21
ge/offline/single_op_parser.cc View File

@@ -329,7 +329,7 @@ Status SingleOpParser::ReadJsonFile(const std::string &file, Json &json_obj) {
ifs >> json_obj;
} catch (const std::exception &e) {
ErrorManager::GetInstance().ATCReportErrMessage("E10025", {"realpath", "errmsg"}, {real_path, e.what()});
GELOGE(PARAM_INVALID,
GELOGE(PARAM_INVALID,
"[Parse][JsonFile] fail for file[%s] provided in input parameter[--singleop], exception = %s.",
real_path.c_str(), e.what());
return PARAM_INVALID;
@@ -349,16 +349,16 @@ bool SingleOpParser::Validate(const SingleOpDesc &op_desc) {
int index = 0;
for (auto &tensor_desc : op_desc.input_desc) {
if (!tensor_desc.GetValidFlag()) {
ErrorManager::GetInstance().ATCReportErrMessage("E10027", {"input", "type", "index"},
{"intput", "datatype or format", std::to_string(index)});
GELOGE(PARAM_INVALID,
ErrorManager::GetInstance().ATCReportErrMessage("E10027", {"op_name", "input", "type", "index"},
{op_desc.op, "input", "tensor", std::to_string(index)});
GELOGE(PARAM_INVALID,
"[Check][Param] fail for Input's dataType or format is invalid when the index is %d", index);
return false;
}
if ((tensor_desc.type == DT_UNDEFINED && tensor_desc.format != FORMAT_RESERVED) ||
(tensor_desc.type != DT_UNDEFINED && tensor_desc.format == FORMAT_RESERVED)){
ErrorManager::GetInstance().ATCReportErrMessage("E10027", {"input", "type", "index"},
{"intput", "datatype or format", std::to_string(index)});
ErrorManager::GetInstance().ATCReportErrMessage("E10027", {"op_name", "input", "type", "index"},
{op_desc.op, "input", "datatype or format", std::to_string(index)});
GELOGE(PARAM_INVALID, "[Check][Param]Input's dataType or format is invalid when the index is %d", index);
return false;
}
@@ -368,21 +368,21 @@ bool SingleOpParser::Validate(const SingleOpDesc &op_desc) {
index = 0;
for (auto &tensor_desc : op_desc.output_desc) {
if (!tensor_desc.GetValidFlag()) {
ErrorManager::GetInstance().ATCReportErrMessage("E10027", {"input", "type", "index"},
{"output", "datatype", std::to_string(index)});
ErrorManager::GetInstance().ATCReportErrMessage("E10027", {"op_name", "input", "type", "index"},
{op_desc.op, "output", "tensor", std::to_string(index)});
GELOGE(PARAM_INVALID, "[Check][Param]fail for Output's dataType is invalid when the index is %d", index);
return false;
}
if (tensor_desc.type == DT_UNDEFINED) {
ErrorManager::GetInstance().ATCReportErrMessage("E10027", {"input", "type", "index"},
{"output", "datatype", std::to_string(index)});
ErrorManager::GetInstance().ATCReportErrMessage("E10027", {"op_name", "input", "type", "index"},
{op_desc.op, "output", "datatype", std::to_string(index)});
GELOGE(PARAM_INVALID, "[Check][Param]Output's dataType is invalid when the index is %d", index);
return false;
}

if (tensor_desc.format == FORMAT_RESERVED) {
ErrorManager::GetInstance().ATCReportErrMessage("E10027", {"input", "type", "index"},
{"output", "format", std::to_string(index)});
ErrorManager::GetInstance().ATCReportErrMessage("E10027", {"op_name", "input", "type", "index"},
{op_desc.op, "output", "format", std::to_string(index)});
GELOGE(PARAM_INVALID, "[Check][Param]Output's format is invalid when the index is %d", index);
return false;
}
@@ -391,13 +391,13 @@ bool SingleOpParser::Validate(const SingleOpDesc &op_desc) {

for (auto &attr : op_desc.attrs) {
if (attr.name.empty()) {
ErrorManager::GetInstance().ATCReportErrMessage("E10029");
ErrorManager::GetInstance().ATCReportErrMessage("E10029", {"op_name"}, {op_desc.op});
GELOGE(PARAM_INVALID, "[Parse][Attr]attr name is empty");
return false;
}

if (attr.value.IsEmpty()) {
ErrorManager::GetInstance().ATCReportErrMessage("E10030", {"attrname"}, {attr.name});
ErrorManager::GetInstance().ATCReportErrMessage("E10030", {"op_name", "attrname"}, {op_desc.op, attr.name});
GELOGE(PARAM_INVALID, "[Parse][Attr] fail for vale of attr name:\"%s\" is empty. ", attr.name.c_str());
return false;
}
@@ -498,7 +498,7 @@ Status SingleOpParser::VerifyOpInputOutputSizeByIr(const OpDesc &current_op_desc
string reason = "is smaller than the ir needed input size " + std::to_string(ir_opdesc_inputs_num);
ErrorManager::GetInstance().ATCReportErrMessage("E19014", {"opname", "value", "reason"},
{current_op_desc.GetName(), "input size " + std::to_string(current_opdesc_inputs_num), reason});
GELOGE(PARAM_INVALID,
GELOGE(PARAM_INVALID,
"[Verify][OpInputOutputSize]This op:%s input size %zu is smaller than the ir needed input size %zu",
current_op_desc.GetName().c_str(), current_opdesc_inputs_num, ir_opdesc_inputs_num);
return PARAM_INVALID;
@@ -509,7 +509,7 @@ Status SingleOpParser::VerifyOpInputOutputSizeByIr(const OpDesc &current_op_desc
string reason = "is smaller than the ir needed output size " + std::to_string(ir_opdesc_outputs_num);
ErrorManager::GetInstance().ATCReportErrMessage("E19014", {"opname", "value", "reason"},
{current_op_desc.GetName(), "output size " + std::to_string(current_opdesc_outputs_num), reason});
GELOGE(PARAM_INVALID,
GELOGE(PARAM_INVALID,
"[Verify][OpInputOutputSize]This op:%s output size %zu is smaller than the ir needed output size %zu",
current_op_desc.GetName().c_str(), current_opdesc_outputs_num, ir_opdesc_outputs_num);
return PARAM_INVALID;
@@ -530,7 +530,7 @@ Status SingleOpParser::SetShapeRange(const std::string &op_name,
{op_name,
"shape",
"has unknown rank but dim size is not one"});
GELOGE(PARAM_INVALID, "[Set][ShapeRange]Invalid tensor shape:%s.",
GELOGE(PARAM_INVALID, "[Set][ShapeRange]Invalid tensor shape:%s.",
ge_tensor_desc.MutableShape().ToString().c_str());
return PARAM_INVALID;
}
@@ -572,7 +572,7 @@ Status SingleOpParser::SetShapeRange(const std::string &op_name,
{op_name,
"shape range " + std::to_string(range_index),
reason});
GELOGE(PARAM_INVALID, "[Set][ShapeRange]Invalid shape range entry. index = %zu, size = %zu",
GELOGE(PARAM_INVALID, "[Set][ShapeRange]Invalid shape range entry. index = %zu, size = %zu",
range_index, range.size());
return PARAM_INVALID;
}
@@ -628,7 +628,7 @@ Status SingleOpParser::ParseSingleOpList(const std::string &file, std::vector<Si
}

if (!Validate(single_op_desc)) {
GELOGE(PARAM_INVALID,
GELOGE(PARAM_INVALID,
"[Check][OpDesc]Validate the index[%d] of op failed when read json file[%s].", index, file.c_str());
return PARAM_INVALID;
}
@@ -645,8 +645,8 @@ Status SingleOpParser::ParseSingleOpList(const std::string &file, std::vector<Si
index += 1;
}
} catch (const nlohmann::json::exception &e) {
ErrorManager::GetInstance().ATCReportErrMessage("E10032", {"index", "jsonfile", "exception"},
{std::to_string(index), file, e.what()});
REPORT_INNER_ERROR("E19999", "parse singleop file:%s failed, catch exception:%s, current index:%d",
file.c_str(), e.what(), index);
GELOGE(PARAM_INVALID, "[Parse][OpList] the index:%d of op failed when read json file:%s, exception:%s",
index, file.c_str(), e.what());
return PARAM_INVALID;


+ 15
- 15
ge/session/omg.cc View File

@@ -627,7 +627,7 @@ Status ParseOutNodes(const string &out_nodes) {
// stoi: The method may throw an exception: invalid_argument/out_of_range
if (!CheckDigitStr(key_value_v[1])) {
ErrorManager::GetInstance().ATCReportErrMessage("E10001", {"parameter", "value", "reason"},
{"--out_nodes", out_nodes, "is not positive integer"});
{"--out_nodes", out_nodes, "index is not positive integer"});
GELOGE(PARAM_INVALID, "[Parse][Param]This str must be digit string, while the actual input is %s",
out_nodes.c_str());
return PARAM_INVALID;
@@ -939,7 +939,7 @@ FMK_FUNC_HOST_VISIBILITY Status ConvertOm(const char *model_file, const char *js
int32_t priority = 0;

// Load model from file
Status ret = ModelParserBase::LoadFromFile(model_file, "", priority, model);
Status ret = ModelParserBase::LoadFromFile(model_file, priority, model);
if (ret != SUCCESS) {
REPORT_CALL_ERROR("E19999", "LoadFromFile failed, file:%s", model_file);
GELOGE(ret, "[Invoke][LoadFromFile] failed.");
@@ -955,7 +955,7 @@ FMK_FUNC_HOST_VISIBILITY Status ConvertOm(const char *model_file, const char *js
OmFileLoadHelper omFileLoadHelper;
ge::graphStatus status = omFileLoadHelper.Init(model_data, model_len);
if (status != ge::GRAPH_SUCCESS) {
ErrorManager::GetInstance().ATCReportErrMessage("E19021", {"reason"}, {"Om file init failed"});
REPORT_CALL_ERROR("E19999", "Om file:%s init failed", model_file);
GELOGE(ge::FAILED, "[Invoke][Init]Om file init failed.");
if (model.model_data != nullptr) {
delete[] reinterpret_cast<char *>(model.model_data);
@@ -967,7 +967,7 @@ FMK_FUNC_HOST_VISIBILITY Status ConvertOm(const char *model_file, const char *js
ModelPartition ir_part;
status = omFileLoadHelper.GetModelPartition(MODEL_DEF, ir_part);
if (status != ge::GRAPH_SUCCESS) {
ErrorManager::GetInstance().ATCReportErrMessage("E19021", {"reason"}, {"Get model part failed"});
REPORT_CALL_ERROR("E19999", "Get model part of om file:%s failed", model_file);
GELOGE(ge::FAILED, "[Get][ModelPartition] failed.");
if (model.model_data != nullptr) {
delete[] reinterpret_cast<char *>(model.model_data);
@@ -993,12 +993,12 @@ FMK_FUNC_HOST_VISIBILITY Status ConvertOm(const char *model_file, const char *js
}
} else {
ret = INTERNAL_ERROR;
ErrorManager::GetInstance().ATCReportErrMessage("E19021", {"reason"}, {"ReadProtoFromArray failed"});
REPORT_CALL_ERROR("E19999", "ReadProtoFromArray failed for om file:%s", model_file);
GELOGE(ret, "[Read][Proto]From Array failed.");
}
} else {
ErrorManager::GetInstance().ATCReportErrMessage("E10003",
{"parameter", "value", "reason"}, {"om", model_file, "invalid om file"});
{"parameter", "value", "reason"}, {"om", model_file, "invalid om file, can't be parsed"});
GELOGE(ACL_ERROR_GE_PARAM_INVALID,
"[Parse][ModelContent] failed because of invalid om file. Please check --om param.");
}
@@ -1009,8 +1009,8 @@ FMK_FUNC_HOST_VISIBILITY Status ConvertOm(const char *model_file, const char *js
}
return ret;
} catch (const std::exception &e) {
ErrorManager::GetInstance().ATCReportErrMessage("E19021", {"reason"},
{"Convert om model to json failed, exception message[" + std::string(e.what()) + "]"});
REPORT_INNER_ERROR("E19999", "Convert om model to json failed, exception message:%s, model_file:%s",
std::string(e.what()).c_str(), model_file);
GELOGE(FAILED, "[Save][Model]Convert om model to json failed, exception message : %s.", e.what());
return FAILED;
}
@@ -1021,7 +1021,7 @@ FMK_FUNC_HOST_VISIBILITY Status ConvertPbtxtToJson(const char *model_file, const
// Mode 2 does not need to verify the priority, and a default value of 0 is passed
int32_t priority = 0;
// Load model from file
Status ret = ModelParserBase::LoadFromFile(model_file, "", priority, model);
Status ret = ModelParserBase::LoadFromFile(model_file, priority, model);
auto free_model_data = [](void **ptr) -> void {
if (ptr != nullptr && *ptr != nullptr) {
delete[] reinterpret_cast<char *>(*ptr);
@@ -1042,7 +1042,7 @@ FMK_FUNC_HOST_VISIBILITY Status ConvertPbtxtToJson(const char *model_file, const

if (!flag) {
free_model_data(&model.model_data);
ErrorManager::GetInstance().ATCReportErrMessage("E19021", {"reason"}, {"ParseFromString failed"});
REPORT_CALL_ERROR("E19999", "ParseFromString failed for model_file:%s", model_file);
GELOGE(FAILED, "[Invoke][ParseFromString] failed.");
return FAILED;
}
@@ -1060,13 +1060,13 @@ FMK_FUNC_HOST_VISIBILITY Status ConvertPbtxtToJson(const char *model_file, const
return SUCCESS;
} catch (google::protobuf::FatalException &e) {
free_model_data(&model.model_data);
ErrorManager::GetInstance().ATCReportErrMessage("E19021", {"reason"}, {"ParseFromString failed, exception message["
+ std::string(e.what()) + "]"});
REPORT_INNER_ERROR("E19999", "ParseFromString failed, exception message:%s, model_file:%s",
std::string(e.what()).c_str(), model_file);
GELOGE(FAILED, "[Invoke][ParseFromString] failed. exception message : %s", e.what());
return FAILED;
} catch (const std::exception &e) {
ErrorManager::GetInstance().ATCReportErrMessage("E19021", {"reason"},
{"Convert pbtxt to json failed, exception message[" + std::string(e.what()) + "]"});
REPORT_INNER_ERROR("E19999", "ParseFromString failed, exception message:%s, model_file:%s",
std::string(e.what()).c_str(), model_file);
GELOGE(FAILED, "[Save][pbtxt]Convert pbtxt to json failed, exception message : %s.", e.what());
return FAILED;
}
@@ -1086,7 +1086,7 @@ FMK_FUNC_HOST_VISIBILITY Status ConvertFwkModelToJson(const domi::FrameworkType

ErrorManager::GetInstance().ATCReportErrMessage(
"E10001", {"parameter", "value", "reason"},
{"--framework", std::to_string(framework), "only support 0(Caffe) 3(TensorFlow) 5(Onnx)"});
{"--framework", std::to_string(framework), "only support 0(Caffe) 3(TensorFlow) 5(Onnx) when model set 1"});
GELOGE(PARAM_INVALID, "[Check][Param]Input parameter[--framework] is mandatory "
"and it's value must be: 0(Caffe) 3(TensorFlow) or 5(Onnx).");
return PARAM_INVALID;


+ 2
- 2
ge/single_op/single_op_manager.cc View File

@@ -37,7 +37,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status SingleOpManager::GetOpFr
GELOGI("GetOpFromModel in. model name = %s, model id = %lu", model_name.c_str(), model_id);
if (single_op == nullptr) {
GELOGE(ACL_ERROR_GE_INTERNAL_ERROR, "[Check][Param:single_op] is null.");
REPORT_INPUT_ERROR("E10412", std::vector<std::string>({"inputparam"}), std::vector<std::string>({"single_op"}));
REPORT_INNER_ERROR("E10412", "input param single_op is nullptr, check invalid");
return ACL_ERROR_GE_INTERNAL_ERROR;
}

@@ -156,7 +156,7 @@ Status SingleOpManager::GetResourceId(rtStream_t stream, uintptr_t &resource_id)
auto rt_err = rtCtxGetCurrent(&rt_cur_ctx);
if (rt_err != RT_ERROR_NONE) {
GELOGE(rt_err, "[Get][CurrentContext] failed, runtime result is %d", static_cast<int>(rt_err));
REPORT_CALL_ERROR("E19999",
REPORT_CALL_ERROR("E19999",
"GetResourceId failed because rtCtxGetCurrent result is %d", static_cast<int>(rt_err));
return RT_ERROR_TO_GE_STATUS(rt_err);
}


+ 2
- 0
inc/framework/common/profiling/ge_profiling.h View File

@@ -19,6 +19,7 @@

#include "ge/ge_api_error_codes.h"
#include "toolchain/prof_callback.h"
#include "runtime/base.h"

const int MAX_DEV_NUM = 64;

@@ -42,5 +43,6 @@ GE_FUNC_VISIBILITY ge::Status RegProfCtrlCallback(MsprofCtrlCallback func);
GE_FUNC_VISIBILITY ge::Status RegProfSetDeviceCallback(MsprofSetDeviceCallback func);
GE_FUNC_VISIBILITY ge::Status RegProfReporterCallback(MsprofReporterCallback func);
GE_FUNC_VISIBILITY ge::Status ProfCommandHandle(ProfCommandHandleType type, void *data, uint32_t len);
GE_FUNC_VISIBILITY ge::Status ProfSetStepInfo(uint64_t index_id, uint16_t tag_id, rtStream_t stream);

#endif // INC_FRAMEWORK_COMMON_GE_PROFILING_H_

+ 0
- 10
inc/framework/common/util.h View File

@@ -232,16 +232,6 @@ using google::protobuf::Message;
///
const int32_t DOMI_MAX_PATH_LEN = 256;

///
/// @ingroup domi_common
/// @brief proto file in bianary format
/// @param [in] file path of proto file
/// @param [out] proto memory for storing the proto file
/// @return true success
/// @return false fail
///
GE_FUNC_VISIBILITY bool ReadProtoFromBinaryFile(const char *file, Message *proto);

///
/// @ingroup domi_common
/// @brief Reads the proto structure from an array.


+ 1
- 1
metadef

@@ -1 +1 @@
Subproject commit b27915cd37919430a61953f8998b7acce4a60177
Subproject commit c6030152c6dc05515115765babb5d64fde649df4

+ 1
- 1
parser

@@ -1 +1 @@
Subproject commit e75eda62de2b51a0bded5481ca81eb8fc7bf376e
Subproject commit 155d3262ba17f800094abb58b6a809b041cf0a74

+ 0
- 112
tests/ut/common/graph/testcase/ge_graph/ge_graph_anchor_unittest.cc View File

@@ -272,115 +272,3 @@ TEST_F(UtestGeAnchor, graph_utils_test) {
EXPECT_EQ(GraphUtils::RemoveEdge(conv_node->GetOutDataAnchor(0), bn_node->GetInControlAnchor()), GRAPH_SUCCESS);
EXPECT_EQ(GraphUtils::RemoveEdge(conv_node->GetOutDataAnchor(0), bn_node->GetInControlAnchor()), GRAPH_FAILED);
}

TEST_F(UtestGeAnchor, data_anchor_replace_peer) {
ComputeGraphPtr graph_ptr = std::make_shared<ComputeGraph>("graph");
OpDescPtr in_op_ptr = std::make_shared<OpDesc>("in_op_1", "float");
in_op_ptr->AddInputDesc("x1", GeTensorDesc(GeShape({1, 32, 8, 8}), FORMAT_NCHW));
in_op_ptr->AddInputDesc("x2", GeTensorDesc(GeShape({1, 32, 8, 8}), FORMAT_NCHW));
in_op_ptr->AddInputDesc("x3", GeTensorDesc(GeShape({1, 32, 8, 8}), FORMAT_NCHW));
in_op_ptr->AddOutputDesc("y1", GeTensorDesc(GeShape({1, 32, 8, 8}), FORMAT_NCHW));
in_op_ptr->AddOutputDesc("y2", GeTensorDesc(GeShape({1, 32, 8, 8}), FORMAT_NCHW));
in_op_ptr->AddOutputDesc("y3", GeTensorDesc(GeShape({1, 32, 8, 8}), FORMAT_NCHW));
NodePtr node1 = graph_ptr->AddNode(in_op_ptr);
NodePtr node2 = graph_ptr->AddNode(in_op_ptr);
NodePtr node3 = graph_ptr->AddNode(in_op_ptr);

OutDataAnchorPtr out_data_anchor = node1->GetOutDataAnchor(1);
InDataAnchorPtr in_data_anchor = node2->GetInDataAnchor(1);
EXPECT_EQ(out_data_anchor != nullptr, true);
EXPECT_EQ(in_data_anchor != nullptr, true);
EXPECT_EQ(node1->GetOutDataAnchor(1)->LinkTo(node2->GetInDataAnchor(0)), GRAPH_SUCCESS);
EXPECT_EQ(node1->GetOutDataAnchor(1)->LinkTo(node2->GetInDataAnchor(1)), GRAPH_SUCCESS);
EXPECT_EQ(node1->GetOutDataAnchor(1)->LinkTo(node2->GetInDataAnchor(2)), GRAPH_SUCCESS);

size_t out_idx = 0;
for (; out_idx < out_data_anchor->peer_anchors_.size(); out_idx++) {
if (out_data_anchor->peer_anchors_[out_idx].lock() == in_data_anchor) {
break;
}
}
EXPECT_EQ(out_idx, 1);

size_t in_idx = 0;
for (; in_idx < in_data_anchor->peer_anchors_.size(); in_idx++) {
if (in_data_anchor->peer_anchors_[in_idx].lock() == out_data_anchor) {
break;
}
}
EXPECT_EQ(in_idx, 0);

out_data_anchor->ReplacePeer(in_data_anchor, node3->GetInDataAnchor(1), node3->GetOutDataAnchor(1));

size_t out_idx1 = 0;
for (; out_idx1 < out_data_anchor->peer_anchors_.size(); out_idx1++) {
if (out_data_anchor->peer_anchors_[out_idx1].lock() == node3->GetInDataAnchor(1)) {
break;
}
}
EXPECT_EQ(out_idx1, out_idx);

size_t in_idx1 = 0;
for (; in_idx1 < in_data_anchor->peer_anchors_.size(); in_idx1++) {
if (in_data_anchor->peer_anchors_[in_idx1].lock() == node3->GetOutDataAnchor(1)) {
break;
}
}
EXPECT_EQ(in_idx1, in_idx);
}

TEST_F(UtestGeAnchor, graph_utils_insert_node) {
ComputeGraphPtr graph_ptr = std::make_shared<ComputeGraph>("graph");
OpDescPtr in_op_ptr = std::make_shared<OpDesc>("in_op_1", "float");
in_op_ptr->AddInputDesc("x1", GeTensorDesc(GeShape({1, 32, 8, 8}), FORMAT_NCHW));
in_op_ptr->AddInputDesc("x2", GeTensorDesc(GeShape({1, 32, 8, 8}), FORMAT_NCHW));
in_op_ptr->AddInputDesc("x3", GeTensorDesc(GeShape({1, 32, 8, 8}), FORMAT_NCHW));
in_op_ptr->AddOutputDesc("y1", GeTensorDesc(GeShape({1, 32, 8, 8}), FORMAT_NCHW));
in_op_ptr->AddOutputDesc("y2", GeTensorDesc(GeShape({1, 32, 8, 8}), FORMAT_NCHW));
in_op_ptr->AddOutputDesc("y3", GeTensorDesc(GeShape({1, 32, 8, 8}), FORMAT_NCHW));
NodePtr node1 = graph_ptr->AddNode(in_op_ptr);
NodePtr node2 = graph_ptr->AddNode(in_op_ptr);
NodePtr node3 = graph_ptr->AddNode(in_op_ptr);

OutDataAnchorPtr out_data_anchor = node1->GetOutDataAnchor(1);
InDataAnchorPtr in_data_anchor = node2->GetInDataAnchor(1);
EXPECT_EQ(out_data_anchor != nullptr, true);
EXPECT_EQ(in_data_anchor != nullptr, true);
EXPECT_EQ(node1->GetOutDataAnchor(1)->LinkTo(node2->GetInDataAnchor(0)), GRAPH_SUCCESS);
EXPECT_EQ(node1->GetOutDataAnchor(1)->LinkTo(node2->GetInDataAnchor(1)), GRAPH_SUCCESS);
EXPECT_EQ(node1->GetOutDataAnchor(1)->LinkTo(node2->GetInDataAnchor(2)), GRAPH_SUCCESS);

size_t out_idx = 0;
for (; out_idx < out_data_anchor->peer_anchors_.size(); out_idx++) {
if (out_data_anchor->peer_anchors_[out_idx].lock() == in_data_anchor) {
break;
}
}
EXPECT_EQ(out_idx, 1);

size_t in_idx = 0;
for (; in_idx < in_data_anchor->peer_anchors_.size(); in_idx++) {
if (in_data_anchor->peer_anchors_[in_idx].lock() == out_data_anchor) {
break;
}
}
EXPECT_EQ(in_idx, 0);

GraphUtils::InsertNodeBetweenDataAnchors(out_data_anchor, in_data_anchor, node3);

size_t out_idx1 = 0;
for (; out_idx1 < out_data_anchor->peer_anchors_.size(); out_idx1++) {
if (out_data_anchor->peer_anchors_[out_idx1].lock() == node3->GetInDataAnchor(0)) {
break;
}
}
EXPECT_EQ(out_idx1, out_idx);

size_t in_idx1 = 0;
for (; in_idx1 < in_data_anchor->peer_anchors_.size(); in_idx1++) {
if (in_data_anchor->peer_anchors_[in_idx1].lock() == node3->GetOutDataAnchor(0)) {
break;
}
}
EXPECT_EQ(in_idx1, in_idx);
}

+ 2
- 1
tests/ut/common/graph/testcase/ge_graph/ge_model_serialize_unittest.cc View File

@@ -30,6 +30,7 @@
#include "graph/model_serialize.h"

#include "graph/detail/model_serialize_imp.h"
#include "graph/node_impl.h"
#include "graph/ge_attr_value.h"
#include "graph/utils/graph_utils.h"
#include "graph/utils/tensor_utils.h"
@@ -1062,7 +1063,7 @@ TEST(UtestGeModelSerialize, test_model_serialize_imp_invalid_param) {

auto graph = std::make_shared<ComputeGraph>("test_graph");
auto node = graph->AddNode(std::make_shared<OpDesc>());
node->op_ = nullptr;
node->impl_->op_ = nullptr;
ge::proto::ModelDef model_def;
Model model;
model.SetGraph(GraphUtils::CreateGraphFromComputeGraph(graph));


+ 1
- 17
tests/ut/common/graph/testcase/ge_graph/ge_tensor_unittest.cc View File

@@ -25,6 +25,7 @@
#include "graph/ge_attr_value.h"
#include "graph/tensor.h"
#include "graph/utils/tensor_utils.h"
#include "graph/ge_tensor_impl.h"
#undef private
#undef protected

@@ -196,23 +197,6 @@ TEST_F(UtestGeTensor, test_shape_copy_move) {
EXPECT_EQ(shape4.GetDimNum(), 3);
}

TEST_F(UtestGeTensor, test_tensor_desc_invalid_null) {
GeTensorDesc tensor_desc(nullptr, nullptr);
EXPECT_EQ(tensor_desc.GetDataType(), DT_UNDEFINED);
EXPECT_EQ(tensor_desc.GetFormat(), FORMAT_RESERVED);
EXPECT_EQ(tensor_desc.MutableShape().shape_def_.GetProtoMsg(), nullptr);

GeTensorDesc tensor_desc2;
EXPECT_EQ(tensor_desc2.GetDataType(), DT_FLOAT);
EXPECT_EQ(tensor_desc2.GetFormat(), FORMAT_ND);

tensor_desc2.SetDataType(DT_DUAL_SUB_INT8);
EXPECT_EQ(tensor_desc2.GetDataType(), DT_DUAL_SUB_INT8);

TensorUtils::SetWeightSize(tensor_desc, 100);
EXPECT_EQ(TensorUtils::GetWeightSize(tensor_desc), 0);
}

TEST_F(UtestGeTensor, test_tensor_invalid_null) {
ProtoMsgOwner msg_owner;
GeTensor tensor(msg_owner, nullptr);


+ 1
- 0
tests/ut/ge/CMakeLists.txt View File

@@ -121,6 +121,7 @@ set(GRAPH_SRC_FILES
"${GE_CODE_DIR}/metadef/register/op_tiling.cpp"
"${GE_CODE_DIR}/metadef/graph/utils/tuning_utils.cc"
"${GE_CODE_DIR}/metadef/register/op_tiling_registry.cpp"
"${GE_CODE_DIR}/metadef/register/op_tiling_registry_impl.cpp"
)

set(PARSER_SRC_FILES


+ 4
- 17
tests/ut/ge/graph/manager/graph_manager_unittest.cc View File

@@ -579,29 +579,16 @@ TEST_F(UtestGraphManagerTest, test_prerunthread_failed_2) {
// }

TEST_F(UtestGraphManagerTest, ChangeAndDeleteConst_success) {
std::map<string, string> options_map;
options_map.insert({ge::RUN_FLAG, "0"});
ge::GetThreadLocalContext().SetGraphOption(options_map);

GraphId graph_id = 1;
GraphManager graph_manager;
graph_manager.options_.train_graph_flag = true;

auto graph = CreateGraphWithIsolatedConst();
Status status = graph_manager.ChangeConstType(graph);
EXPECT_EQ(status, ge::SUCCESS);
auto constant1 = graph->FindFirstNodeMatchType("Constant");
EXPECT_EQ(constant1, nullptr);

options_map.clear();
options_map.insert({ge::RUN_FLAG, "1"});
ge::GetThreadLocalContext().SetGraphOption(options_map);
status = graph_manager.ChangeConstType(graph);
EXPECT_EQ(status, ge::SUCCESS);
constant1 = graph->FindFirstNodeMatchType("Constant");
EXPECT_NE(constant1, nullptr);
graph_manager.ChangeConstTypeWhenTraining(graph);
auto const1 = graph->FindFirstNodeMatchType("Const");
EXPECT_EQ(const1, nullptr);

status = graph_manager.RemoveIsolatedConstInThisGraph(graph);
Status status = graph_manager.RemoveIsolatedConstInThisGraph(graph);
EXPECT_EQ(status, ge::SUCCESS);
auto all_nodes = graph->GetDirectNode();
EXPECT_EQ(all_nodes.size(), 3);


+ 155
- 44
tests/ut/ge/graph/partition/dynamic_shape_partition_unittest.cc View File

@@ -20,9 +20,11 @@
#define protected public
#include "graph/partition/dynamic_shape_partition.h"
#include "compute_graph.h"
#include "graph/compute_graph_impl.h"
#include "inc/framework/common/types.h"
#include "utils/graph_utils.h"
#include "graph/debug/ge_attr_define.h"
#include "graph/common/omg_util.h"

namespace ge {
namespace {
@@ -37,33 +39,33 @@ GeTensorDescPtr CreateTensorDesc(std::initializer_list<int64_t> shape, Format fo
}

class NodeBuilder {
public:
NodeBuilder(const std::string &name, const std::string &type) { op_desc_ = std::make_shared<OpDesc>(name, type); }
NodeBuilder &AddInputDesc(std::initializer_list<int64_t> shape = {1, 1, 224, 224}, Format format = FORMAT_NCHW,
DataType data_type = DT_FLOAT) {
op_desc_->AddInputDesc(CreateTensorDesc(shape, format, data_type)->Clone());
return *this;
}
NodeBuilder &AddOutputDesc(std::initializer_list<int64_t> shape = {1, 1, 224, 224}, Format format = FORMAT_NCHW,
DataType data_type = DT_FLOAT) {
op_desc_->AddOutputDesc(CreateTensorDesc(shape, format, data_type)->Clone());
return *this;
}
NodeBuilder &AddOutputDesc(GeTensorDescPtr tensor_desc) {
op_desc_->AddOutputDesc(tensor_desc->Clone());
return *this;
}
NodePtr Build(const ComputeGraphPtr &graph) {
NodePtr node = graph->AddNode(op_desc_);
return node;
}
private:
OpDescPtr op_desc_;
public:
NodeBuilder(const std::string &name, const std::string &type) { op_desc_ = std::make_shared<OpDesc>(name, type); }
NodeBuilder &AddInputDesc(std::initializer_list<int64_t> shape = {1, 1, 224, 224}, Format format = FORMAT_NCHW,
DataType data_type = DT_FLOAT) {
op_desc_->AddInputDesc(CreateTensorDesc(shape, format, data_type)->Clone());
return *this;
}
NodeBuilder &AddOutputDesc(std::initializer_list<int64_t> shape = {1, 1, 224, 224}, Format format = FORMAT_NCHW,
DataType data_type = DT_FLOAT) {
op_desc_->AddOutputDesc(CreateTensorDesc(shape, format, data_type)->Clone());
return *this;
}
NodeBuilder &AddOutputDesc(GeTensorDescPtr tensor_desc) {
op_desc_->AddOutputDesc(tensor_desc->Clone());
return *this;
}
NodePtr Build(const ComputeGraphPtr &graph) {
NodePtr node = graph->AddNode(op_desc_);
return node;
}
private:
OpDescPtr op_desc_;
};
} // namespace

@@ -92,28 +94,137 @@ TEST_F(UtestDynamicShapePartition, single_op_scene_success) {
EXPECT_EQ(partitioner.Partition(), SUCCESS);
}

/*******************************************************************************
* |
* Merge1
* Active / \ Active
* / \.
* / \.
* Merge2 \.
* Active/ \Active \.
* / \ \.
* Add Sub Relu
* | | |
* | | |
* Switch_f2 Switch_t2 |
* \ / |
* \ / |
* Less2 |
* | |
* | |
* Switch_f Switch_t
* | \ / |
* | Active |
* | | |
* | Less1 |
* | / \ |
* | / \ |
* Data Data
******************************************************************************/
TEST_F(UtestDynamicShapePartition, merge_control_flow_group) {
ComputeGraphPtr graph = std::make_shared<ComputeGraph>("default");
AttrUtils::SetStr(*graph, ATTR_NAME_SESSION_GRAPH_ID, "session_graph_id");

NodePtr data1 = NodeBuilder("data1", DATA).AddInputDesc({1}).AddOutputDesc({1}).Build(graph);
NodePtr data2 = NodeBuilder("data2", DATA).AddInputDesc({1}).AddOutputDesc({1}).Build(graph);
NodePtr merge = NodeBuilder("node2", MERGE).AddInputDesc({1}).AddInputDesc({1})
.AddOutputDesc({1}).AddOutputDesc({}).Build(graph);

GraphUtils::AddEdge(data1->GetOutDataAnchor(0), merge->GetInDataAnchor(0));
GraphUtils::AddEdge(data2->GetOutDataAnchor(0), merge->GetInDataAnchor(1));

(void)AttrUtils::SetBool(data1->GetOpDesc(), ATTR_NAME_FORCE_UNKNOWN_SHAPE, true);
(void)AttrUtils::SetInt(data1->GetOpDesc(), ATTR_NAME_CONTROL_FLOW_GROUP, 3);
(void)AttrUtils::SetBool(data2->GetOpDesc(), ATTR_NAME_FORCE_UNKNOWN_SHAPE, true);
(void)AttrUtils::SetInt(data2->GetOpDesc(), ATTR_NAME_CONTROL_FLOW_GROUP, 3);
(void)AttrUtils::SetBool(merge->GetOpDesc(), ATTR_NAME_FORCE_UNKNOWN_SHAPE, true);
(void)AttrUtils::SetInt(merge->GetOpDesc(), ATTR_NAME_CONTROL_FLOW_GROUP, 3);

EXPECT_EQ(graph->sub_graph_.size(), 0);
auto data1 = NodeBuilder("data1", DATA).AddInputDesc({1}).AddOutputDesc({1}).Build(graph);
auto data2 = NodeBuilder("data2", DATA).AddInputDesc({1}).AddOutputDesc({1}).Build(graph);

auto less1 = NodeBuilder("less1", LESS).AddInputDesc({1}).AddInputDesc({1}).AddOutputDesc({1}).Build(graph);
auto active1 = NodeBuilder("active1", STREAMACTIVE).Build(graph);
auto switch_t = NodeBuilder("switch_t", STREAMSWITCH).AddInputDesc({1}).AddInputDesc({1}).Build(graph);
auto switch_f = NodeBuilder("switch_f", STREAMSWITCH).AddInputDesc({1}).AddInputDesc({1}).Build(graph);
auto const_01 = NodeBuilder("const_01", CONSTANT).AddOutputDesc({1}).Build(graph);
auto const_11 = NodeBuilder("const_11", CONSTANT).AddOutputDesc({1}).Build(graph);


auto less2 = NodeBuilder("less2", LESS).AddInputDesc({1}).AddInputDesc({1}).AddOutputDesc({1}).Build(graph);
auto active2 = NodeBuilder("active2", STREAMACTIVE).Build(graph);
auto switch_t2 = NodeBuilder("switch_t2", STREAMSWITCH).AddInputDesc({1}).AddInputDesc({1}).Build(graph);
auto switch_f2 = NodeBuilder("switch_f2", STREAMSWITCH).AddInputDesc({1}).AddInputDesc({1}).Build(graph);
auto const_02 = NodeBuilder("const_02", CONSTANT).AddOutputDesc({1}).Build(graph);
auto const_12 = NodeBuilder("const_12", CONSTANT).AddOutputDesc({1}).Build(graph);

auto add2 = NodeBuilder("add2", ADD).AddInputDesc({1}).AddOutputDesc({1}).Build(graph);
auto sub2 = NodeBuilder("sub2", SUB).AddInputDesc({1}).AddOutputDesc({1}).Build(graph);
auto merge2 = NodeBuilder("merge2", STREAMMERGE).AddInputDesc({1}).AddInputDesc({1}).AddOutputDesc({1}).Build(graph);
auto active_f2 = NodeBuilder("active_f2", STREAMACTIVE).Build(graph);
auto active_t2 = NodeBuilder("active_t2", STREAMACTIVE).Build(graph);

auto relu1 = NodeBuilder("relu1", RELU).AddInputDesc({1}).AddOutputDesc({1}).Build(graph);
auto merge1 = NodeBuilder("merge1", STREAMMERGE).AddInputDesc({1}).AddInputDesc({1}).AddOutputDesc({1}).Build(graph);
auto active_f1 = NodeBuilder("active_f1", STREAMACTIVE).Build(graph);
auto active_t1 = NodeBuilder("active_t1", STREAMACTIVE).Build(graph);

auto output1 = NodeBuilder("noutput1", NETOUTPUT).AddInputDesc({1}).Build(graph);

GraphUtils::AddEdge(data1->GetOutDataAnchor(0), less1->GetInDataAnchor(0));
GraphUtils::AddEdge(data2->GetOutDataAnchor(0), less1->GetInDataAnchor(1));
GraphUtils::AddEdge(less1->GetOutDataAnchor(0), switch_t->GetInDataAnchor(0));
GraphUtils::AddEdge(less1->GetOutDataAnchor(0), switch_f->GetInDataAnchor(0));
GraphUtils::AddEdge(const_01->GetOutDataAnchor(0), switch_t->GetInDataAnchor(1));
GraphUtils::AddEdge(const_11->GetOutDataAnchor(0), switch_f->GetInDataAnchor(1));
GraphUtils::AddEdge(less1->GetOutControlAnchor(), active1->GetInControlAnchor());
GraphUtils::AddEdge(active1->GetOutControlAnchor(), switch_t->GetInControlAnchor());
GraphUtils::AddEdge(active1->GetOutControlAnchor(), switch_f->GetInControlAnchor());


GraphUtils::AddEdge(data1->GetOutDataAnchor(0), less2->GetInDataAnchor(0));
GraphUtils::AddEdge(less1->GetOutDataAnchor(0), less2->GetInDataAnchor(1));
GraphUtils::AddEdge(less2->GetOutDataAnchor(0), switch_t2->GetInDataAnchor(0));
GraphUtils::AddEdge(less2->GetOutDataAnchor(0), switch_f2->GetInDataAnchor(0));
GraphUtils::AddEdge(const_02->GetOutDataAnchor(0), switch_t2->GetInDataAnchor(1));
GraphUtils::AddEdge(const_12->GetOutDataAnchor(0), switch_f2->GetInDataAnchor(1));
GraphUtils::AddEdge(less2->GetOutControlAnchor(), active2->GetInControlAnchor());
GraphUtils::AddEdge(active2->GetOutControlAnchor(), switch_t2->GetInControlAnchor());
GraphUtils::AddEdge(active2->GetOutControlAnchor(), switch_f2->GetInControlAnchor());


GraphUtils::AddEdge(switch_f2->GetOutControlAnchor(), add2->GetInControlAnchor());
GraphUtils::AddEdge(less2->GetOutDataAnchor(0), add2->GetInDataAnchor(0));
GraphUtils::AddEdge(add2->GetOutDataAnchor(0), merge2->GetInDataAnchor(0));
GraphUtils::AddEdge(add2->GetOutControlAnchor(), active_f2->GetInControlAnchor());
GraphUtils::AddEdge(active_f2->GetOutControlAnchor(), merge2->GetInControlAnchor());

GraphUtils::AddEdge(switch_t2->GetOutControlAnchor(), sub2->GetInControlAnchor());
GraphUtils::AddEdge(less2->GetOutDataAnchor(0), sub2->GetInDataAnchor(0));
GraphUtils::AddEdge(sub2->GetOutDataAnchor(0), merge2->GetInDataAnchor(1));
GraphUtils::AddEdge(sub2->GetOutControlAnchor(), active_t2->GetInControlAnchor());
GraphUtils::AddEdge(active_t2->GetOutControlAnchor(), merge2->GetInControlAnchor());

GraphUtils::AddEdge(switch_t->GetOutControlAnchor(), less2->GetInControlAnchor());
GraphUtils::AddEdge(switch_f->GetOutControlAnchor(), relu1->GetInControlAnchor());


GraphUtils::AddEdge(merge2->GetOutDataAnchor(0), merge1->GetInDataAnchor(0));
GraphUtils::AddEdge(merge2->GetOutControlAnchor(), active_f1->GetInControlAnchor());
GraphUtils::AddEdge(active_f1->GetOutControlAnchor(), merge1->GetInControlAnchor());

GraphUtils::AddEdge(data2->GetOutDataAnchor(0), relu1->GetInDataAnchor(1));
GraphUtils::AddEdge(relu1->GetOutDataAnchor(0), merge1->GetInDataAnchor(0));
GraphUtils::AddEdge(relu1->GetOutControlAnchor(), active_t1->GetInControlAnchor());
GraphUtils::AddEdge(active_t1->GetOutControlAnchor(), merge1->GetInControlAnchor());

GraphUtils::AddEdge(merge1->GetOutDataAnchor(0), output1->GetInDataAnchor(0));

AttrUtils::SetBool(merge2->GetOpDesc(), ATTR_NAME_FORCE_UNKNOWN_SHAPE, true);
EXPECT_EQ(graph->TopologicalSorting(), GRAPH_SUCCESS);

SetControlFlowGroup(merge2, merge2->GetOpDesc()->GetId());
SetControlFlowGroup(switch_f2, merge2->GetOpDesc()->GetId());
SetControlFlowGroup(switch_t2, merge2->GetOpDesc()->GetId());
SetControlFlowGroup(active2, merge2->GetOpDesc()->GetId());
SetControlFlowGroup(active_t2, merge2->GetOpDesc()->GetId());
SetControlFlowGroup(active_f2, merge2->GetOpDesc()->GetId());

SetControlFlowGroup(merge1, merge1->GetOpDesc()->GetId());
SetControlFlowGroup(switch_f, merge1->GetOpDesc()->GetId());
SetControlFlowGroup(switch_t, merge1->GetOpDesc()->GetId());
SetControlFlowGroup(active1, merge1->GetOpDesc()->GetId());
SetControlFlowGroup(active_f1, merge1->GetOpDesc()->GetId());
SetControlFlowGroup(active_t1, merge1->GetOpDesc()->GetId());

EXPECT_EQ(graph->impl_->sub_graph_.size(), 0);
DynamicShapePartitioner partitioner(graph);
EXPECT_EQ(partitioner.Partition(), SUCCESS);
EXPECT_EQ(graph->sub_graph_.size(), 1);
EXPECT_EQ(graph->impl_->sub_graph_.size(), 3); // input less1 uknown
}
} // namespace ge

+ 11
- 18
tests/ut/ge/hybrid/executor/worker/execution_engine_unittest.cc View File

@@ -83,18 +83,14 @@ TEST_F(UtestExecutionEngine, ExecuteAsync_without_kernel_task) {
execution_context.profiling_level = 1;
SubgraphContext subgraph_context(nullptr, &execution_context);

NodeState node_state(*node_item, &subgraph_context);
auto task_context = TaskContext::Create(&node_state, &execution_context, &subgraph_context);
auto shared_task_context = std::shared_ptr<TaskContext>(task_context.release());
node_state.SetTaskContext(shared_task_context);

ExecutionEngine execution_engine;
ASSERT_TRUE(node_state.GetTaskContext() != nullptr);
auto node_state = subgraph_context.GetOrCreateNodeState(node_item.get());
ASSERT_TRUE(node_state->GetTaskContext() != nullptr);

std::function<void()> callback;
SubgraphExecutor executor(hybrid_model.GetRootGraphItem(), &execution_context);
executor.InitCallback(&node_state, callback);
EXPECT_EQ(execution_engine.ExecuteAsync(node_state, node_state.GetTaskContext(), execution_context, callback), INTERNAL_ERROR);
executor.InitCallback(node_state.get(), callback);
ExecutionEngine execution_engine;
EXPECT_EQ(execution_engine.ExecuteAsync(*node_state, node_state->GetTaskContext(), execution_context, callback), INTERNAL_ERROR);
}

TEST_F(UtestExecutionEngine, ExecuteAsync_without_callback_and_kernel_task) {
@@ -118,21 +114,18 @@ TEST_F(UtestExecutionEngine, ExecuteAsync_without_callback_and_kernel_task) {
execution_context.model = &hybrid_model;
SubgraphContext subgraph_context(nullptr, &execution_context);

NodeState node_state(*node_item, &subgraph_context);
auto task_context = TaskContext::Create(&node_state, &execution_context, &subgraph_context);
auto node_state = subgraph_context.GetOrCreateNodeState(node_item.get());
uint32_t task_id = 0;
uint32_t stream_id = 1;
std::string task_type = "rts";
uint32_t block_dim = 0;
task_context->SaveProfilingTaskDescInfo(task_id, stream_id, task_type, block_dim);
auto shared_task_context = std::shared_ptr<TaskContext>(task_context.release());
node_state.SetTaskContext(shared_task_context);
node_state->GetTaskContext()->SaveProfilingTaskDescInfo(task_id, stream_id, task_type, block_dim);

ExecutionEngine execution_engine;
ASSERT_TRUE(node_state.GetTaskContext() != nullptr);
ASSERT_TRUE(node_state->GetTaskContext() != nullptr);

std::function<void()> callback;
SubgraphExecutor executor(hybrid_model.GetRootGraphItem(), &execution_context);
executor.InitCallback(&node_state, callback);
EXPECT_EQ(execution_engine.ExecuteAsync(node_state, node_state.GetTaskContext(), execution_context, callback), INTERNAL_ERROR);
executor.InitCallback(node_state.get(), callback);
ExecutionEngine execution_engine;
EXPECT_EQ(execution_engine.ExecuteAsync(*node_state, node_state->GetTaskContext(), execution_context, callback), INTERNAL_ERROR);
}

+ 14
- 11
tests/ut/ge/hybrid/ge_hybrid_unittest.cc View File

@@ -42,6 +42,7 @@
#include "graph/utils/tensor_utils.h"
#include "graph/testcase/ge_graph/graph_builder_utils.h"
#include "single_op/task/build_task_utils.h"
#include "graph/op_desc_impl.h"
#undef private
#undef protected

@@ -161,11 +162,9 @@ TEST_F(UtestGeHybrid, task_update_tiling_info) {

GraphExecutionContext execution_context;
SubgraphContext subgraph_context(nullptr, &execution_context);
NodeState node_state(*node_item, &subgraph_context);
auto task_context = TaskContext::Create(&node_state, &execution_context, &subgraph_context);
ASSERT_TRUE(task_context != nullptr);
auto node_state = subgraph_context.GetOrCreateNodeState(node_item.get());
ASSERT_EQ(aicore_task->InitTilingInfo(*op_desc), SUCCESS);
ASSERT_EQ(aicore_task->UpdateTilingInfo(*task_context), SUCCESS);
ASSERT_EQ(aicore_task->UpdateTilingInfo(*node_state->GetTaskContext()), SUCCESS);
}

TEST_F(UtestGeHybrid, index_taskdefs_failed) {
@@ -478,12 +477,14 @@ TEST_F(UtestGeHybrid, TestTaskContext) {
node_item->output_start = 0;

GraphExecutionContext execution_context;
SubgraphContext subgraph_context(nullptr, &execution_context);
GraphItem graph_item;
SubgraphContext subgraph_context(&graph_item, &execution_context);
ASSERT_EQ(subgraph_context.Init(), SUCCESS);
subgraph_context.all_inputs_.resize(2);
subgraph_context.all_outputs_.resize(1);

NodeState node_state(*node_item, &subgraph_context);
auto task_context = TaskContext::Create(&node_state, &execution_context, &subgraph_context);
auto node_state = subgraph_context.GetOrCreateNodeState(node_item.get());
auto task_context = node_state->GetTaskContext();
ASSERT_TRUE(task_context != nullptr);
auto desc = task_context->MutableInputDesc(2);
ASSERT_TRUE(desc == nullptr);
@@ -523,12 +524,14 @@ TEST_F(UtestGeHybrid, hybrid_model_executor_update_args) {
node_item->output_start = 0;

GraphExecutionContext execution_context;
SubgraphContext subgraph_context(nullptr, &execution_context);
GraphItem graph_item;
SubgraphContext subgraph_context(&graph_item, &execution_context);
ASSERT_EQ(subgraph_context.Init(), SUCCESS);
subgraph_context.all_inputs_.resize(2);
subgraph_context.all_outputs_.resize(1);

NodeState node_state(*node_item, &subgraph_context);
auto task_context = TaskContext::Create(&node_state, &execution_context, &subgraph_context);
auto node_state = subgraph_context.GetOrCreateNodeState(node_item.get());
auto task_context = node_state->GetTaskContext();

int32_t buffer[1];
aicore_task->tiling_buffer_ = TensorBuffer::Create(buffer, sizeof(buffer));
@@ -738,7 +741,7 @@ TEST_F(UtestGeHybrid, TestParseDependencies) {
std::vector<std::string> deps;
deps.push_back("Data");
auto op_desc = netoutput->GetOpDesc();
op_desc->input_name_idx_["Data"] = 0;
op_desc->impl_->input_name_idx_["Data"] = 0;
auto data_desc = data->GetOpDesc();
auto tensor = std::make_shared<GeTensor>();
auto tensor_desc = data_desc->MutableInputDesc(0);


+ 0
- 5
tests/ut/ge/hybrid/node_executor/ge_local/ge_local_node_executor_unittest.cc View File

@@ -97,11 +97,6 @@ TEST_F(UtestGeLocalNodeExecutor, test_no_op_task) {
auto node_state = subgraph_context.GetOrCreateNodeState(node_item);
ASSERT_NE(node_state, nullptr);

auto unique_task_context = TaskContext::Create(node_state.get(), &graph_context, &subgraph_context);
ASSERT_NE(unique_task_context, nullptr);
auto shared_task_context = std::shared_ptr<TaskContext>(unique_task_context.release());
node_state->SetTaskContext(shared_task_context);

NodeTaskPtr task = nullptr;
GeLocalNodeExecutor node_executor;
ASSERT_EQ(node_executor.LoadTask(hybrid_model, node, task), SUCCESS);


+ 3
- 14
tests/ut/ge/hybrid/node_executor/hccl/hccl_node_executor_unittest.cc View File

@@ -94,18 +94,17 @@ TEST_F(UtestHcclNodeExecutor, test_rdmatask_extract_tensor) {
tensor.SetData(data);
ctx->SetTensor(1, 0, tensor.Clone());
auto unique_task_context = TaskContext::Create(node_state.get(), &graph_context, &subgraph_context);
vector<HcomRemoteAccessAddrInfo> addr_infos;
shared_ptr<RdmaNodeTask> task = MakeShared<RdmaNodeTask>();
task->remote_index_ = {1, 0};
ASSERT_EQ(task->ExtractTensor(*unique_task_context, addr_infos), PARAM_INVALID);
ASSERT_EQ(task->ExtractTensor(*node_state->GetTaskContext(), addr_infos), PARAM_INVALID);
Shape s2({1});
TensorDesc tensor_desc2(s2);
Tensor tensor2(tensor_desc2);
ctx->SetTensor(1, 0, tensor2.Clone());
task->ExtractTensor(*unique_task_context, addr_infos);
ASSERT_EQ(task->ExtractTensor(*unique_task_context, addr_infos), PARAM_INVALID);
task->ExtractTensor(*node_state->GetTaskContext(), addr_infos);
ASSERT_EQ(task->ExtractTensor(*node_state->GetTaskContext(), addr_infos), PARAM_INVALID);
RuntimeInferenceContext::DestroyContext(std::to_string(graph_context.context_id));
}
@@ -140,11 +139,6 @@ TEST_F(UtestHcclNodeExecutor, gatheralltoallv_execute) {
auto node_state = subgraph_context.GetOrCreateNodeState(node_item);
ASSERT_NE(node_state, nullptr);
auto unique_task_context = TaskContext::Create(node_state.get(), &graph_context, &subgraph_context);
ASSERT_NE(unique_task_context, nullptr);
auto shared_task_context = std::shared_ptr<TaskContext>(unique_task_context.release());
node_state->SetTaskContext(shared_task_context);
for (int i=0; i<4; ++i) {
uint64_t value_0 = 512;
TensorValue in_tensor0(&value_0, sizeof(value_0));
@@ -206,11 +200,6 @@ TEST_F(UtestHcclNodeExecutor, alltoallv_execute) {
auto node_state = subgraph_context.GetOrCreateNodeState(node_item);
ASSERT_NE(node_state, nullptr);
auto unique_task_context = TaskContext::Create(node_state.get(), &graph_context, &subgraph_context);
ASSERT_NE(unique_task_context, nullptr);
auto shared_task_context = std::shared_ptr<TaskContext>(unique_task_context.release());
node_state->SetTaskContext(shared_task_context);
for (int i=0; i<5; ++i) {
uint64_t value_0 = 512;
TensorValue in_tensor0(&value_0, sizeof(value_0));


+ 0
- 40
tests/ut/ge/hybrid/node_executor/rts/rts_node_task_unittest.cc View File

@@ -96,11 +96,6 @@ TEST_F(UtestRtsNodeTask, test_stream_switch_task) {
auto node_state = subgraph_context.GetOrCreateNodeState(node_item);
ASSERT_NE(node_state, nullptr);

auto unique_task_context = TaskContext::Create(node_state.get(), &graph_context, &subgraph_context);
ASSERT_NE(unique_task_context, nullptr);
auto shared_task_context = std::shared_ptr<TaskContext>(unique_task_context.release());
node_state->SetTaskContext(shared_task_context);

uint64_t value_0 = 110;
uint64_t value_1 = 120;
TensorValue in_tensor0(&value_0, sizeof(value_0));
@@ -153,11 +148,6 @@ TEST_F(UtestRtsNodeTask, test_stream_active_task) {
auto node_state = subgraph_context.GetOrCreateNodeState(node_item);
ASSERT_NE(node_state, nullptr);

auto unique_task_context = TaskContext::Create(node_state.get(), &graph_context, &subgraph_context);
ASSERT_NE(unique_task_context, nullptr);
auto shared_task_context = std::shared_ptr<TaskContext>(unique_task_context.release());
node_state->SetTaskContext(shared_task_context);

NodeTaskPtr task = nullptr;
RtsNodeExecutor node_executor;
ASSERT_EQ(node_executor.LoadTask(hybrid_model, node, task), SUCCESS);
@@ -203,11 +193,6 @@ TEST_F(UtestRtsNodeTask, test_stream_merge_task) {
auto node_state = subgraph_context.GetOrCreateNodeState(node_item);
ASSERT_NE(node_state, nullptr);

auto unique_task_context = TaskContext::Create(node_state.get(), &graph_context, &subgraph_context);
ASSERT_NE(unique_task_context, nullptr);
auto shared_task_context = std::shared_ptr<TaskContext>(unique_task_context.release());
node_state->SetTaskContext(shared_task_context);

uint64_t value_0 = 110;
TensorValue in_tensor0(&value_0, sizeof(value_0));
subgraph_context.SetInput(*node_item, 0, in_tensor0);
@@ -271,11 +256,6 @@ TEST_F(UtestRtsNodeTask, test_memcpy_async_task) {
auto node_state = subgraph_context.GetOrCreateNodeState(node_item);
ASSERT_NE(node_state, nullptr);

auto unique_task_context = TaskContext::Create(node_state.get(), &graph_context, &subgraph_context);
ASSERT_NE(unique_task_context, nullptr);
auto shared_task_context = std::shared_ptr<TaskContext>(unique_task_context.release());
node_state->SetTaskContext(shared_task_context);

uint64_t value_0 = 110;
TensorValue in_tensor0(&value_0, sizeof(value_0));
subgraph_context.SetInput(*node_item, 0, in_tensor0);
@@ -328,11 +308,6 @@ TEST_F(UtestRtsNodeTask, test_pass_through_task) {
auto node_state = subgraph_context.GetOrCreateNodeState(node_item);
ASSERT_NE(node_state, nullptr);

auto unique_task_context = TaskContext::Create(node_state.get(), &graph_context, &subgraph_context);
ASSERT_NE(unique_task_context, nullptr);
auto shared_task_context = std::shared_ptr<TaskContext>(unique_task_context.release());
node_state->SetTaskContext(shared_task_context);

uint64_t value_0 = 110;
TensorValue in_tensor0(&value_0, sizeof(value_0));
subgraph_context.SetInput(*node_item, 0, in_tensor0);
@@ -384,11 +359,6 @@ TEST_F(UtestRtsNodeTask, test_unsupport_label_set) {
auto node_state = subgraph_context.GetOrCreateNodeState(node_item);
ASSERT_NE(node_state, nullptr);

auto unique_task_context = TaskContext::Create(node_state.get(), &graph_context, &subgraph_context);
ASSERT_NE(unique_task_context, nullptr);
auto shared_task_context = std::shared_ptr<TaskContext>(unique_task_context.release());
node_state->SetTaskContext(shared_task_context);

NodeTaskPtr task = nullptr;
RtsNodeExecutor node_executor;
ASSERT_EQ(node_executor.LoadTask(hybrid_model, node, task), SUCCESS);
@@ -428,11 +398,6 @@ TEST_F(UtestRtsNodeTask, test_unsupport_label_goto) {
auto node_state = subgraph_context.GetOrCreateNodeState(node_item);
ASSERT_NE(node_state, nullptr);

auto unique_task_context = TaskContext::Create(node_state.get(), &graph_context, &subgraph_context);
ASSERT_NE(unique_task_context, nullptr);
auto shared_task_context = std::shared_ptr<TaskContext>(unique_task_context.release());
node_state->SetTaskContext(shared_task_context);

NodeTaskPtr task = nullptr;
RtsNodeExecutor node_executor;
ASSERT_EQ(node_executor.LoadTask(hybrid_model, node, task), SUCCESS);
@@ -472,11 +437,6 @@ TEST_F(UtestRtsNodeTask, test_unsupport_label_switch) {
auto node_state = subgraph_context.GetOrCreateNodeState(node_item);
ASSERT_NE(node_state, nullptr);

auto unique_task_context = TaskContext::Create(node_state.get(), &graph_context, &subgraph_context);
ASSERT_NE(unique_task_context, nullptr);
auto shared_task_context = std::shared_ptr<TaskContext>(unique_task_context.release());
node_state->SetTaskContext(shared_task_context);

NodeTaskPtr task = nullptr;
RtsNodeExecutor node_executor;
ASSERT_EQ(node_executor.LoadTask(hybrid_model, node, task), SUCCESS);


Loading…
Cancel
Save