Browse Source

Merge branch 'master' of gitee.com:liyihan123/graphengine

tags/v1.3.0
liyihan2@huawei.com 4 years ago
parent
commit
48ae6fcdb2
80 changed files with 2291 additions and 222 deletions
  1. +0
    -4
      ge/common/dump/opdebug_register.cc
  2. +145
    -4
      ge/common/formats/format_transfers/format_transfer_fractal_z.cc
  3. +25
    -8
      ge/common/profiling/profiling_manager.cc
  4. +2
    -1
      ge/common/profiling/profiling_manager.h
  5. +31
    -9
      ge/generator/ge_generator.cc
  6. +37
    -0
      ge/graph/build/graph_builder.cc
  7. +10
    -0
      ge/graph/build/label_allocator.cc
  8. +9
    -0
      ge/graph/build/logical_stream_allocator.cc
  9. +4
    -7
      ge/graph/build/memory/block_mem_assigner.cc
  10. +2
    -1
      ge/graph/build/memory/graph_mem_assigner.cc
  11. +1
    -0
      ge/graph/build/memory/hybrid_mem_assigner.cc
  12. +17
    -4
      ge/graph/build/memory/var_mem_assign_util.cc
  13. +69
    -3
      ge/graph/build/model_builder.cc
  14. +22
    -0
      ge/graph/build/run_context.cc
  15. +65
    -3
      ge/graph/build/stream_allocator.cc
  16. +9
    -0
      ge/graph/build/stream_graph_optimizer.cc
  17. +83
    -6
      ge/graph/build/task_generator.cc
  18. +2
    -0
      ge/graph/common/bcast.cc
  19. +7
    -0
      ge/graph/common/bcast.h
  20. +18
    -0
      ge/graph/common/omg_util.cc
  21. +30
    -0
      ge/graph/execute/graph_execute.cc
  22. +16
    -0
      ge/graph/label/case_label_maker.cc
  23. +22
    -0
      ge/graph/label/if_label_maker.cc
  24. +30
    -0
      ge/graph/label/label_maker.cc
  25. +7
    -0
      ge/graph/label/partitioned_call_label_maker.cc
  26. +20
    -0
      ge/graph/label/while_label_maker.cc
  27. +8
    -7
      ge/graph/load/model_manager/davinci_model.cc
  28. +4
    -0
      ge/graph/load/model_manager/task_info/end_graph_task_info.cc
  29. +4
    -0
      ge/graph/load/model_manager/task_info/event_record_task_info.cc
  30. +7
    -0
      ge/graph/load/model_manager/task_info/event_wait_task_info.cc
  31. +3
    -0
      ge/graph/load/model_manager/task_info/fusion_start_task_info.cc
  32. +3
    -0
      ge/graph/load/model_manager/task_info/fusion_stop_task_info.cc
  33. +20
    -0
      ge/graph/load/model_manager/task_info/hccl_task_info.cc
  34. +67
    -7
      ge/graph/load/model_manager/task_info/kernel_ex_task_info.cc
  35. +155
    -2
      ge/graph/load/model_manager/task_info/kernel_task_info.cc
  36. +12
    -3
      ge/graph/load/model_manager/task_info/super_kernel/super_kernel.cc
  37. +19
    -9
      ge/graph/load/model_manager/task_info/super_kernel/super_kernel_factory.cc
  38. +1
    -1
      ge/graph/load/model_manager/task_info/super_kernel/super_kernel_factory.h
  39. +3
    -3
      ge/graph/load/model_manager/zero_copy_offset.h
  40. +6
    -6
      ge/graph/passes/atomic_addr_clean_pass.cc
  41. +0
    -1
      ge/graph/passes/attach_stream_label_pass.cc
  42. +7
    -1
      ge/graph/passes/pass_utils.cc
  43. +2
    -0
      ge/graph/passes/pass_utils.h
  44. +1
    -1
      ge/graph/passes/subexpression_migration_pass.cc
  45. +8
    -2
      ge/graph/passes/switch_dead_branch_elimination.cc
  46. +2
    -0
      ge/graph/passes/switch_to_stream_switch_pass.cc
  47. +22
    -10
      ge/graph/preprocess/graph_preprocess.cc
  48. +13
    -12
      ge/hybrid/executor/hybrid_model_async_executor.cc
  49. +159
    -63
      ge/hybrid/model/hybrid_model_builder.cc
  50. +7
    -2
      ge/hybrid/model/hybrid_model_builder.h
  51. +4
    -0
      ge/hybrid/model/node_item.cc
  52. +2
    -0
      ge/hybrid/model/node_item.h
  53. +23
    -20
      ge/hybrid/node_executor/compiledsubgraph/known_node_executor.cc
  54. +5
    -3
      ge/hybrid/node_executor/compiledsubgraph/known_node_executor.h
  55. +3
    -1
      ge/offline/main.cc
  56. +9
    -1
      inc/external/ge/ge_api_types.h
  57. +3
    -0
      inc/framework/common/debug/log.h
  58. +3
    -2
      inc/framework/common/ge_types.h
  59. +4
    -0
      inc/framework/generator/ge_generator.h
  60. +1
    -1
      metadef
  61. +1
    -1
      parser
  62. +6
    -0
      tests/depends/error_manager/src/error_manager_stub.cc
  63. +4
    -0
      tests/depends/runtime/src/runtime_stub.cc
  64. +1
    -1
      tests/ut/common/graph/CMakeLists.txt
  65. +6
    -1
      tests/ut/ge/CMakeLists.txt
  66. +234
    -0
      tests/ut/ge/common/format_transfer_hwcn_fractalz_unittest.cc
  67. +27
    -0
      tests/ut/ge/generator/ge_generator_unittest.cc
  68. +14
    -0
      tests/ut/ge/graph/build/mem_assigner_unittest.cc
  69. +146
    -0
      tests/ut/ge/graph/build/model_builder_unittest.cc
  70. +68
    -0
      tests/ut/ge/graph/build/task_generator_unittest.cc
  71. +48
    -0
      tests/ut/ge/graph/load/davinci_model_unittest.cc
  72. +25
    -0
      tests/ut/ge/graph/load/kernel_task_info_unittest.cc
  73. +65
    -0
      tests/ut/ge/graph/passes/atomic_addr_clean_pass_unittest.cc
  74. +163
    -0
      tests/ut/ge/graph/passes/switch_dead_branch_elimination_unittest.cc
  75. +30
    -0
      tests/ut/ge/graph/preprocess/graph_preprocess_unittest.cc
  76. +74
    -2
      tests/ut/ge/hybrid/ge_hybrid_unittest.cc
  77. +62
    -0
      tests/ut/ge/hybrid/known_node_executor_unittest.cc
  78. +21
    -9
      tests/ut/ge/profiling/ge_profiling_manager_unittest.cc
  79. +22
    -0
      third_party/fwkacllib/inc/runtime/stream.h
  80. +1
    -0
      third_party/fwkacllib/inc/toolchain/prof_callback.h

+ 0
- 4
ge/common/dump/opdebug_register.cc View File

@@ -88,7 +88,6 @@ Status OpdebugRegister::RegisterDebugForStream(rtStream_t stream, uint32_t op_de

uint32_t debug_stream_id = 0;
uint32_t debug_task_id = 0;
#ifdef ONLY_COMPILE_OPEN_SRC
auto rt_ret = rtDebugRegisterForStream(stream, op_debug_mode, op_debug_addr_, &debug_stream_id, &debug_task_id);
if (rt_ret != RT_ERROR_NONE) {
GELOGE(RT_FAILED, "[Register][rtDebug]Failed in stream overflow, ret:0x%X, op_debug_mode:%u.",
@@ -97,7 +96,6 @@ Status OpdebugRegister::RegisterDebugForStream(rtStream_t stream, uint32_t op_de
rt_ret, op_debug_mode);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
#endif
GELOGD("debug_task_id:%u, debug_stream_id:%u in stream overflow.", debug_task_id, debug_stream_id);
data_dumper.SaveOpDebugId(debug_task_id, debug_stream_id, p2p_debug_addr_, true);
return SUCCESS;
@@ -105,7 +103,6 @@ Status OpdebugRegister::RegisterDebugForStream(rtStream_t stream, uint32_t op_de

void OpdebugRegister::UnregisterDebugForStream(rtStream_t stream) {
rtError_t rt_ret = RT_ERROR_NONE;
#ifdef ONLY_COMPILE_OPEN_SRC
if (stream != nullptr) {
GELOGD("start call rtDebugUnRegisterForStream in unknown shape over flow.");
rt_ret = rtDebugUnRegisterForStream(stream);
@@ -113,7 +110,6 @@ void OpdebugRegister::UnregisterDebugForStream(rtStream_t stream) {
GELOGW("rtDebugUnRegisterForStream failed, ret: 0x%X", rt_ret);
}
}
#endif

if (op_debug_addr_ != nullptr) {
rt_ret = rtFree(op_debug_addr_);


+ 145
- 4
ge/common/formats/format_transfers/format_transfer_fractal_z.cc View File

@@ -29,6 +29,25 @@
namespace ge {
namespace formats {
namespace {
constexpr int64_t kDim = 1;
static int64_t Measure(int64_t x, int64_t y) {
int64_t z = y;
while (x % y != 0) {
z = x % y;
x = y;
y = z;
}
return z;
}
// least common multiple
static int64_t Lcm(int64_t a, int64_t b) {
if (b == 0) {
return -1;
}
int64_t temp = (a * b) / (Measure(a, b));
return temp;
}

Status CheckDataTypeSupport(DataType data_type) { return GetSizeByDataType(data_type) > 0 ? SUCCESS : UNSUPPORTED; }

/**
@@ -61,6 +80,35 @@ Status TransShapeToFz(int64_t n, int64_t c, int64_t h, int64_t w, DataType data_
return SUCCESS;
}

Status TransShapeToFzWithGroups(int64_t n, int64_t c, int64_t h, int64_t w, DataType data_type, std::vector<int64_t> &dst_shape,
int64_t groups) {
auto c0 = GetCubeSizeByDataType(data_type);
if (c0 < 0) {
return ACL_ERROR_GE_DATATYPE_INVALID;
}
int64_t cin_ori = c;
int64_t cout_ori = n / groups;
int64_t cube_k = GetCubeSizeByDataType(data_type);
int64_t e_mult = std::min(
Lcm(Lcm(cin_ori, cube_k) / (cin_ori), Lcm(cout_ori, static_cast<int64_t>(kCubeSize)) / (cout_ori)),
groups);
int64_t cin_opt = Ceil(e_mult * cin_ori, cube_k) * cube_k;
int64_t c1_dim = cin_opt / cube_k;
int64_t g_dim = Ceil(groups, e_mult);
auto n1 = Ceil(cout_ori * e_mult, static_cast<int64_t>(kCubeSize));
dst_shape.clear();
dst_shape.push_back(g_dim * c1_dim * h * w);
dst_shape.push_back(n1);
dst_shape.push_back(16);
dst_shape.push_back(cube_k);
if (!IsShapeValid(dst_shape)) {
GELOGE(ACL_ERROR_GE_SHAPE_INVALID, "Failed to check dst shape %s",
ShapeToString(dst_shape).c_str());
return ACL_ERROR_GE_SHAPE_INVALID;
}
return SUCCESS;
}

Status TransShapeNchwToFz(const std::vector<int64_t> &src_shape, DataType data_type, std::vector<int64_t> &dst_shape) {
if (!CheckShapeValid(src_shape, kNchwDimsNum)) {
return ACL_ERROR_GE_SHAPE_INVALID;
@@ -86,6 +134,21 @@ Status TransShapeHwcnToFz(const std::vector<int64_t> &src_shape, DataType data_t
return TransShapeToFz(n, c, h, w, data_type, dst_shape);
}

Status TransShapeHwcnToFzWithGroups(const std::vector<int64_t> &src_shape, DataType data_type, std::vector<int64_t> &dst_shape
, int64_t groups){
if (!CheckShapeValid(src_shape, kHwcnDimsNum)) {
return ACL_ERROR_GE_SHAPE_INVALID;
}

auto h = src_shape.at(kHwcnH);
auto w = src_shape.at(kHwcnW);
auto c = src_shape.at(kHwcnC);
auto n = src_shape.at(kHwcnN);

return TransShapeToFzWithGroups(n, c, h, w, data_type, dst_shape, groups);
}


Status TransShapeNhwcToFz(const std::vector<int64_t> &src_shape, DataType data_type, std::vector<int64_t> &dst_shape) {
if (!CheckShapeValid(src_shape, kNhwcDimsNum)) {
return ACL_ERROR_GE_SHAPE_INVALID;
@@ -189,6 +252,80 @@ Status TransFormatFromNchwToFz(const TransArgs &args, TransResult &result) {
return SUCCESS;
}

Status TransFormatHwcnToFzWithGroups(const TransArgs &args, TransResult &result, int64_t groups){
int64_t h_dim = args.src_shape[kHwcnH];
int64_t w_dim = args.src_shape[kHwcnW];
int64_t c_dim = args.src_shape[kHwcnC];
int64_t n_dim = args.src_shape[kHwcnN];
int64_t cin_ori = c_dim;
int64_t cout_ori = n_dim / groups;
if (cin_ori == 0 || cout_ori == 0) {
GELOGE(GRAPH_FAILED, "Cin_ori, cout_ori must not be equal 0, and current cin_ori, cout_ori,"
"groups are %ld %ld %ld",cin_ori, cout_ori, groups);
return GRAPH_FAILED;
}
const int64_t cube_k = GetCubeSizeByDataType(args.src_data_type);
int64_t e_mult = std::min(
Lcm(Lcm(cin_ori, cube_k) / (cin_ori), Lcm(cout_ori, static_cast<int64_t>(kCubeSize)) / (cout_ori)),
groups);
int64_t cin_opt = Ceil(e_mult * cin_ori, cube_k) * cube_k;
int64_t cout_opt = Ceil(e_mult * cout_ori, static_cast<int64_t>(kCubeSize)) * static_cast<int64_t>(kCubeSize);
int64_t c1_dim = cin_opt / cube_k;
int64_t g_dim = Ceil(groups, e_mult);
int64_t dim_cin = cin_opt / cube_k;
int64_t data_size = GetSizeByDataType(args.src_data_type);
int64_t size_output_data = g_dim * kDim * dim_cin * h_dim * w_dim * cout_opt * cube_k * data_size;
if (size_output_data == 0) {
result.length = static_cast<size_t>(size_output_data);
return SUCCESS;
}
errno_t ret = EOK;
std::shared_ptr<uint8_t> dst(new (std::nothrow) uint8_t[size_output_data], std::default_delete<uint8_t[]>());
if (dst == nullptr) {
GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, "Failed to trans format from %s to %s, can not alloc the memory for dst buf %ld",
TypeUtils::FormatToSerialString(args.src_format).c_str(),
TypeUtils::FormatToSerialString(args.dst_format).c_str(), size_output_data);
return ACL_ERROR_GE_MEMORY_ALLOCATION;
}
ret = memset_s(dst.get(), static_cast<size_t>(size_output_data), 0, static_cast<size_t>(size_output_data));
if (ret != EOK) {
GELOGE(ACL_ERROR_GE_MEMORY_OPERATE_FAILED, "Failed to operate the dst memory, ret is %d", ret);
return ACL_ERROR_GE_MEMORY_OPERATE_FAILED;
}
for (int64_t g = 0; g < groups; g++) {
for (int64_t d = 0; d < kDim; d++) {
for (int64_t c = 0; c < c_dim; c++) {
for (int64_t h = 0; h < h_dim; h++) {
for (int64_t w = 0; w < w_dim; w++) {
for (int64_t n = 0; n < cout_ori; n++) {
int64_t e_val = g % e_mult;
int64_t dst_ci = e_val * cin_ori + c;
int64_t dst_co = e_val * cout_ori + n;
int64_t src_co = g * cout_ori + n;
int64_t tempory = dst_ci % cube_k;
int64_t srx_inx = 0;
int64_t dst_inx = (g / e_mult) * kDim * c1_dim * h_dim * w_dim * cout_opt * cube_k +
d * c1_dim * h_dim * w_dim * cout_opt * cube_k +
(dst_ci / cube_k) * h_dim * w_dim * cout_opt * cube_k +
h * w_dim * cout_opt * cube_k + w * cout_opt * cube_k +
dst_co * cube_k + tempory;
srx_inx = d * h_dim * w_dim * c_dim * n_dim + h * w_dim * c_dim * n_dim +
w * c_dim * n_dim + c * n_dim + src_co;
char *dst_data = reinterpret_cast<char *>(dst.get() + dst_inx * data_size);
const char *src_data = reinterpret_cast<const char *>(args.data + srx_inx * data_size);
for (int64_t index = 0; index < data_size; index++) {
*dst_data++ = *src_data++;
}
}
}
}
}
}
}
result.data = dst;
result.length = static_cast<size_t>(size_output_data);
return SUCCESS;
}
Status TransFormatHwcnToFz(const TransArgs &args, TransResult &result) {
int64_t h = args.src_shape[kHwcnH];
int64_t w = args.src_shape[kHwcnW];
@@ -363,15 +500,16 @@ Status FormatTransferFractalZ::TransFormat(const TransArgs &args, TransResult &r
if (args.src_format == FORMAT_NHWC && args.dst_format == FORMAT_FRACTAL_Z) {
return TransFormatNhwcToFz(args, result);
}

if (args.src_format == FORMAT_HWCN && args.dst_format == FORMAT_FRACTAL_Z) {
if ((args.src_format == FORMAT_HWCN) && (GetPrimaryFormat(args.dst_format) == FORMAT_FRACTAL_Z)) {
if (GetSubFormat(args.dst_format) > 1) {
return TransFormatHwcnToFzWithGroups(args, result, GetSubFormat(args.dst_format));
}
return TransFormatHwcnToFz(args, result);
}

if (args.src_format == FORMAT_NCHW && args.dst_format == FORMAT_FRACTAL_Z) {
return TransFormatFromNchwToFz(args, result);
}

return ACL_ERROR_GE_FORMAT_INVALID;
}

@@ -384,7 +522,10 @@ Status FormatTransferFractalZ::TransShape(Format src_format, const std::vector<i
if (src_format == FORMAT_NHWC && dst_format == FORMAT_FRACTAL_Z) {
return TransShapeNhwcToFz(src_shape, data_type, dst_shape);
}
if (src_format == FORMAT_HWCN && dst_format == FORMAT_FRACTAL_Z) {
if ((src_format == FORMAT_HWCN) && (GetPrimaryFormat(dst_format) == FORMAT_FRACTAL_Z)) {
if (GetSubFormat(dst_format) > 1) {
return TransShapeHwcnToFzWithGroups(src_shape, data_type, dst_shape, GetSubFormat(dst_format));
}
return TransShapeHwcnToFz(src_shape, data_type, dst_shape);
}
if (src_format == FORMAT_NCHW && dst_format == FORMAT_FRACTAL_Z) {


+ 25
- 8
ge/common/profiling/profiling_manager.cc View File

@@ -31,7 +31,6 @@ const char *const kFpPoint = "fp_point";
const char *const kBpPoint = "bp_point";

#ifdef DAVINCI_SUPPORT_PROFILING
const size_t kReportMaxLen = 2048;
const int32_t kMaxDeviceNum = 256;
const uint32_t kInteval = 2;
const std::string kConfigNumsdev = "devNums";
@@ -293,21 +292,22 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void ProfilingManager::ReportDa
ReporterData reporter_data{};
int ret = -1;
int32_t cb_ret = -1;
size_t index = data.size() / kReportMaxLen;
size_t report_max_len = reporter_max_len_;
size_t index = data.size() / report_max_len;
if (index >= 1) {
reporter_data.deviceId = device_id;
ret = memcpy_s(reporter_data.tag, MSPROF_ENGINE_MAX_TAG_LEN + 1, tag_name.c_str(), tag_name.size());
GE_IF_BOOL_EXEC(ret != EOK, GELOGE(ret, "Report data tag [%s] memcpy error!", tag_name.c_str()); return;);
for (size_t i = 0; i < index; ++i) {
reporter_data.data = (unsigned char *)data.c_str() + kReportMaxLen * i;
reporter_data.dataLen = kReportMaxLen;
reporter_data.data = (unsigned char *)data.c_str() + report_max_len * i;
reporter_data.dataLen = report_max_len;
cb_ret = CallMsprofReport(reporter_data);
GE_IF_BOOL_EXEC(cb_ret != 0, GELOGE(cb_ret, "Reporter data [%s] failed, ret:%d", tag_name.c_str(), cb_ret);
return;);
}
reporter_data.dataLen = data.size() - kReportMaxLen * index;
reporter_data.dataLen = data.size() - report_max_len * index;
if (reporter_data.dataLen != 0) {
reporter_data.data = (unsigned char *)data.c_str() + kReportMaxLen * index;
reporter_data.data = (unsigned char *)data.c_str() + report_max_len * index;
cb_ret = CallMsprofReport(reporter_data);
GE_IF_BOOL_EXEC(cb_ret != 0, GELOGE(cb_ret, "Reporter data [%s] failed, ret:%d", tag_name.c_str(), cb_ret);
return;);
@@ -745,15 +745,32 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY bool ProfilingManager::Profilin
return execute_model_prof_on;
}

FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ProfilingManager::PluginInit() const {
FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ProfilingManager::PluginInit() {
if (prof_cb_.msprofReporterCallback == nullptr) {
GELOGE(ge::PARAM_INVALID, "MsprofReporterCallback callback is nullptr.");
return ge::PARAM_INVALID;
}
return prof_cb_.msprofReporterCallback(
int32_t cb_ret = prof_cb_.msprofReporterCallback(
static_cast<uint32_t>(MsprofReporterModuleId::MSPROF_MODULE_FRAMEWORK),
static_cast<uint32_t>(MsprofReporterCallbackType::MSPROF_REPORTER_INIT),
nullptr, 0);
if (cb_ret != MSPROF_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Profiling reporter init failed, ret = %d.", cb_ret);
GELOGE(INTERNAL_ERROR, "[Init][ProfilingReporter] profiling init failed, ret = %d.", cb_ret);
return INTERNAL_ERROR;
}

cb_ret = prof_cb_.msprofReporterCallback(
static_cast<uint32_t>(MsprofReporterModuleId::MSPROF_MODULE_FRAMEWORK),
static_cast<uint32_t>(MsprofReporterCallbackType::MSPROF_REPORTER_DATA_MAX_LEN),
&reporter_max_len_, sizeof(uint32_t));
if (cb_ret != MSPROF_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Get profiling reporter data max len failed, ret = %d.", cb_ret);
GELOGE(INTERNAL_ERROR, "[Init][ProfilingReporter] Get profiling reporter data max len failed, ret = %d.", cb_ret);
return INTERNAL_ERROR;
}

return SUCCESS;
}

FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void ProfilingManager::PluginUnInit() const {


+ 2
- 1
ge/common/profiling/profiling_manager.h View File

@@ -88,7 +88,7 @@ class FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY ProfilingManager {
void ProfilingTaskDescInfo(uint32_t model_id, const std::vector<TaskDescInfo> &task_desc_info,
const int32_t &device_id);
void ProfilingOpInputOutInfo(const TaskDescInfo &task, Json &task_json);
Status PluginInit() const;
Status PluginInit();
void PluginUnInit() const;
Status CallMsprofReport(ReporterData &reporter_data) const;
struct MsprofCallback &GetMsprofCallback() { return prof_cb_; }
@@ -119,6 +119,7 @@ class FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY ProfilingManager {
MsprofCallback prof_cb_;
std::string fp_point_;
std::string bp_point_;
uint32_t reporter_max_len_ = 0;
};
} // namespace ge
#endif // GE_COMMON_PROFILING_PROFILING_MANAGER_H_

+ 31
- 9
ge/generator/ge_generator.cc View File

@@ -154,7 +154,7 @@ static Status CheckEngineTypeSupport(const NodePtr &node, OpEngineType engine_ty
}

static Status AddInputs(const ComputeGraphPtr &graph, const NodePtr &node, const GeTensorDesc &tensor, int32_t index,
bool attr) {
bool attr, int32_t &data_index) {
GE_CHECK_NOTNULL_EXEC(graph, return PARAM_INVALID);
GE_CHECK_NOTNULL_EXEC(node, return PARAM_INVALID);

@@ -197,9 +197,10 @@ static Status AddInputs(const ComputeGraphPtr &graph, const NodePtr &node, const
"[Add][InputDesc]fail for node:%s", data_op->GetName().c_str());
GE_CHK_BOOL_EXEC(data_op->AddOutputDesc(tensor) == GRAPH_SUCCESS, return FAILED,
"[Add][OutputDesc]fail for node:%s", data_op->GetName().c_str());
if (attr) {
GE_CHK_BOOL_EXEC(AttrUtils::SetInt(data_op, ATTR_NAME_INDEX, index), return FAILED,
if (attr && !is_const) {
GE_CHK_BOOL_EXEC(AttrUtils::SetInt(data_op, ATTR_NAME_INDEX, data_index), return FAILED,
"[Set][Attr:%s]fail for node:%s", ATTR_NAME_INDEX.c_str(), data_op->GetName().c_str());
++data_index;
}

ge::NodePtr arg_node = graph->AddNode(data_op);
@@ -571,7 +572,7 @@ Status GeGenerator::SetModelNameForDump(const GeRootModelPtr &ge_root_model) {
if (ret != SUCCESS) {
GELOGE(FAILED, "[Check][IsUnknownShape]Check root model is unknown shape failed, model id:%u",
ge_root_model->GetModelId());
REPORT_CALL_ERROR("E19999", "Check root model is unknown shape failed, model id:%zu",
REPORT_CALL_ERROR("E19999", "Check root model is unknown shape failed, model id:%u",
ge_root_model->GetModelId());
return FAILED;
}
@@ -592,8 +593,6 @@ Status GeGenerator::SetModelNameForDump(const GeRootModelPtr &ge_root_model) {
ErrorManager::GetInstance().ATCReportErrMessage("E10000", {"parameter"}, {"output"});
GELOGE(FAILED, "[Check][GetModelNameStep]Get model_name failed. Param --output is invalid, root graph name: %s",
ge_root_model->GetRootGraph()->GetName().c_str());
REPORT_CALL_ERROR("E19999", "Get model_name failed. Param --output is invalid,",
"root graph name: %s", ge_root_model->GetRootGraph()->GetName().c_str());
return PARAM_INVALID;
}
map<string, GeModelPtr> name_to_ge_model = ge_root_model->GetSubgraphInstanceNameToModel();
@@ -709,6 +708,17 @@ bool GeGenerator::CheckNoAicore(const ComputeGraphPtr &graph) {
return true;
}

void GeGenerator::RemoveConst(const vector<GeTensor> &inputs, vector<GeTensor> &outputs) {
for (auto &input : inputs) {
GeTensorDesc input_desc = input.GetTensorDesc();
bool is_const = false;
(void)AttrUtils::GetBool(input_desc, CONST_ATTR_NAME_INPUT, is_const);
if (!is_const) {
outputs.emplace_back(input);
}
}
}

Status GeGenerator::CheckForSingleOp(OpDescPtr &op_desc, const vector<GeTensor> &inputs,
const vector<GeTensor> &outputs) {
GE_CHECK_NOTNULL_EXEC(op_desc, return PARAM_INVALID);
@@ -773,7 +783,9 @@ Status GeGenerator::BuildSingleOp(OpDescPtr &op_desc, const vector<GeTensor> &in
GELOGI("ATC parser success in single op build.");

GeRootModelPtr ge_root_model = nullptr;
GE_CHK_STATUS_RET_NOLOG(impl_->BuildModel(graph, inputs, ge_root_model));
vector<GeTensor> data_inputs;
RemoveConst(inputs, data_inputs);
GE_CHK_STATUS_RET_NOLOG(impl_->BuildModel(graph, data_inputs, ge_root_model));
map<string, GeAttrValue> op_attrs = op_desc_tmp->GetAllAttrs();
GE_CHECK_NOTNULL(ge_root_model);
GE_CHECK_NOTNULL(ge_root_model->GetRootGraph());
@@ -832,9 +844,12 @@ Status GeGenerator::BuildSingleOpModel(OpDescPtr &op_desc, const vector<GeTensor
* @param [in] vector<GeTensor> &inputs: Operator input data description information.
* @param [in] vector<GeTensor> &outputs: Operator output data description information.
* @param [in] engine_type: specific engine.
* @param [in] compile_flag: op build flag, compile flag by acl
* @param [out] ModelBufferData &Model_buff: Model_buff: model buffer of the op.
* @return SUCCESS handle successfully / others handle failed
*/

// old process will be deleted
Status GeGenerator::BuildSingleOpModel(OpDescPtr &op_desc, const vector<GeTensor> &inputs,
const vector<GeTensor> &outputs, OpEngineType engine_type,
ModelBufferData &model_buff) {
@@ -845,6 +860,12 @@ Status GeGenerator::BuildSingleOpModel(OpDescPtr &op_desc, const vector<GeTensor
return status;
}

Status GeGenerator::BuildSingleOpModel(OpDescPtr &op_desc, const vector<GeTensor> &inputs,
const vector<GeTensor> &outputs, OpEngineType engine_type, int32_t compile_flag,
ModelBufferData &model_buff) {
return SUCCESS;
}

Status GeGenerator::BuildSingleOpGraph(OpDescPtr &op_desc, const vector<GeTensor> &inputs,
const vector<GeTensor> &outputs, std::string graph_name, Graph &graph) {
ge::ComputeGraphPtr compute_graph = MakeShared<ComputeGraph>(graph_name);
@@ -856,18 +877,19 @@ Status GeGenerator::BuildSingleOpGraph(OpDescPtr &op_desc, const vector<GeTensor

// 2. Create InputData node.
int32_t arg_index = 0;
int32_t data_index = 0;
if (inputs.empty()) {
for (const auto &input_desc : op_desc->GetAllInputsDescPtr()) {
GE_CHECK_NOTNULL_EXEC(input_desc, return INTERNAL_ERROR);
if (!IsNeedConnectInputOpForSingleOp(*input_desc)) {
continue;
}
GE_CHK_STATUS_RET_NOLOG(AddInputs(compute_graph, op_node, *input_desc, arg_index, false));
GE_CHK_STATUS_RET_NOLOG(AddInputs(compute_graph, op_node, *input_desc, arg_index, false, data_index));
arg_index++;
}
} else {
for (const auto &in_desc : inputs) {
GE_CHK_STATUS_RET_NOLOG(AddInputs(compute_graph, op_node, in_desc.GetTensorDesc(), arg_index, true));
GE_CHK_STATUS_RET_NOLOG(AddInputs(compute_graph, op_node, in_desc.GetTensorDesc(), arg_index, true, data_index));
arg_index++;
}
}


+ 37
- 0
ge/graph/build/graph_builder.cc View File

@@ -77,6 +77,8 @@ Status HandleSubgraphNode(NodePtr &src_node, OutDataAnchorPtr &src_out_anchor) {
Status HandleSubgraphDataNode(NodePtr &src_node, OutDataAnchorPtr &src_out_anchor) {
uint32_t index = 0;
if (!AttrUtils::GetInt(src_node->GetOpDesc(), ATTR_NAME_PARENT_NODE_INDEX, index)) {
REPORT_INNER_ERROR("E19999", "get attr:%s failed from node:%s when HandleSubgraphDataNode",
ATTR_NAME_PARENT_NODE_INDEX.c_str(), src_node->GetName().c_str());
GELOGE(FAILED, "Get attr ATTR_NAME_PARENT_NODE_INDEX failed, node:%s.", src_node->GetName().c_str());
return FAILED;
}
@@ -109,6 +111,8 @@ Status GraphBuilder::CalcOpParam(const ge::ComputeGraphPtr &graph) {
GE_CHECK_NOTNULL(graph);
auto instance_ptr = ge::GELib::GetInstance();
if (instance_ptr == nullptr || !instance_ptr->InitFlag()) {
REPORT_INNER_ERROR("E19999", "check gelib instance null when CalcOpParam for graph:%s",
graph->GetName().c_str());
GELOGE(GE_CLI_GE_NOT_INITIALIZED, "GraphBuilder: GE is not initialized");
return GE_CLI_GE_NOT_INITIALIZED;
}
@@ -121,6 +125,8 @@ Status GraphBuilder::CalcOpParam(const ge::ComputeGraphPtr &graph) {
(void)instance_ptr->DNNEngineManagerObj().GetDNNEngineName(node_ptr);
kernel_lib_name = node_ptr->GetOpDesc()->GetOpKernelLibName();
if (kernel_lib_name.empty()) {
REPORT_INNER_ERROR("E19999", "op kernel lib is empty in node:%s(%s) when CalcOpParam",
node_ptr->GetName().c_str(), node_ptr->GetType().c_str());
GELOGE(INTERNAL_ERROR, "Get node:%s(%s) kernel lib failed.", node_ptr->GetName().c_str(),
node_ptr->GetType().c_str());
return INTERNAL_ERROR;
@@ -129,12 +135,16 @@ Status GraphBuilder::CalcOpParam(const ge::ComputeGraphPtr &graph) {

auto ret = SetInputSize(node_ptr);
if (ret != SUCCESS) {
REPORT_CALL_ERROR("E19999", "Set node:%s(%s) inputDesc size failed when CalcOpParam",
node_ptr->GetName().c_str(), node_ptr->GetType().c_str());
GELOGE(ret, "Set node inputDesc size failed, node name is %s", node_ptr->GetName().c_str());
return ret;
}

ret = OpsKernelBuilderManager::Instance().CalcOpRunningParam(*node_ptr);
if (ret != SUCCESS) {
REPORT_CALL_ERROR("E19999", "Call Calculate op:%s(%s) running param failed",
node_ptr->GetName().c_str(), node_ptr->GetType().c_str());
GELOGE(ret, "Calculate op running param failed, node name is %s", node_ptr->GetName().c_str());
return ret;
}
@@ -191,6 +201,7 @@ Status GraphBuilder::UpdateParentNodeOutputSize(const ge::ComputeGraphPtr &graph

Status GraphBuilder::Build(ComputeGraphPtr &comp_graph, GeRootModelPtr &ge_root_model_ptr, uint64_t session_id) {
if (comp_graph == nullptr) {
REPORT_INNER_ERROR("E19999", "check compute_graph nullptr when BuildGraph, session_id:%lu", session_id);
GELOGE(GE_GRAPH_PARAM_NULLPTR, "Graph build comp_graph is null.");
return GE_GRAPH_PARAM_NULLPTR;
}
@@ -302,6 +313,8 @@ Status GraphBuilder::SetConstantInputOffset(ComputeGraphPtr &comp_graph) {

std::vector<GeTensorPtr> weights = OpDescUtils::MutableWeights(peer_node);
if (weights.empty()) {
REPORT_INNER_ERROR("E19999", "check weights size of node %s(%s) is empty when SetConstantInputOffset",
node->GetName().c_str(), node->GetType().c_str());
GELOGE(FAILED, "weights size of node %s is empty", node->GetName().c_str());
return FAILED;
}
@@ -393,6 +406,7 @@ static Status InsertMemcpyNode(const ComputeGraphPtr &graph, const OutDataAnchor
.Build();
(void)AttrUtils::SetBool(op_desc, ATTR_NO_NEED_CONSTANT_FOLDING, false);
if (GraphUtils::InsertNodeAfter(out_anchor, in_anchors, graph->AddNode(op_desc)) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Insert IDENTITY node %s after %s failed", name.c_str(), in_node->GetName().c_str());
GELOGE(FAILED, "Insert IDENTITY node %s after %s failed.", name.c_str(), in_node->GetName().c_str());
return FAILED;
}
@@ -423,6 +437,8 @@ static Status GenerateTaskForConstant(const std::shared_ptr<ComputeGraph> &graph
GELOGD("Insert MemcpyAsync node between %s and %s.", in_node->GetName().c_str(), node->GetName().c_str());
std::string name = node->GetName() + "_input_" + std::to_string(in_data_anchor->GetIdx()) + "_Memcpy";
if (InsertMemcpyNode(graph, peer_out_anchor, {in_data_anchor}, name) != SUCCESS) {
REPORT_CALL_ERROR("E19999", "Insert memcpy between %s and %s failed when GenerateTaskForConstant",
in_node->GetName().c_str(), node->GetName().c_str());
GELOGE(FAILED, "Insert memcpy between %s and %s failed.",
in_node->GetName().c_str(), node->GetName().c_str());
return FAILED;
@@ -470,6 +486,8 @@ Status GraphBuilder::MarkFpBpProfilingTaskAttr(ComputeGraphPtr &com_graph) {
GELOGI("The all reduce node of dynamic graph is %s, idx %u", op_desc->GetName().c_str(), node_index);
(void)ge::AttrUtils::SetBool(op_desc, ATTR_NAME_INSERT_BP_PROFILILNG_TASK, true);
GE_IF_BOOL_EXEC(TypeUtils::CheckUint64MulOverflow(i, kProfilingArStep),
REPORT_INNER_ERROR("E19999", "Multiply result is out of range when calc profiling ar log id "
"for node:%s(%s)", op_desc->GetName().c_str(), op_desc->GetType().c_str());
GELOGE(FAILED, "Multiply result is out of range.");
return FAILED);
int64_t log_id = i * kProfilingArStep + kProfilingArStartLogid;
@@ -549,16 +567,19 @@ Status GraphBuilder::GetTaskInfo(const ge::ModelBuilder &builder, const ModelPtr

int64_t memory_size = 0;
if (!AttrUtils::GetInt(model_ptr, ATTR_MODEL_MEMORY_SIZE, memory_size)) {
REPORT_INNER_ERROR("E19999", "Get Attr:%s fail in model", ATTR_MODEL_MEMORY_SIZE.c_str());
GELOGE(INTERNAL_ERROR, "Get memory size fail.");
return INTERNAL_ERROR;
}
int64_t p2p_memory_size = 0;
if (!AttrUtils::GetInt(model_ptr, ATTR_MODEL_P2P_MEMORY_SIZE, p2p_memory_size)) {
REPORT_INNER_ERROR("E19999", "Get Attr:%s fail in model", ATTR_MODEL_P2P_MEMORY_SIZE.c_str());
GELOGE(INTERNAL_ERROR, "Get p2p memory size fail.");
return INTERNAL_ERROR;
}
int64_t weight_size = 0;
if (!AttrUtils::GetInt(model_ptr, ATTR_MODEL_WEIGHT_SIZE, weight_size)) {
REPORT_INNER_ERROR("E19999", "Get Attr:%s fail in model", ATTR_MODEL_WEIGHT_SIZE.c_str());
GELOGE(INTERNAL_ERROR, "Get weight memory size fail.");
return INTERNAL_ERROR;
}
@@ -668,6 +689,7 @@ Status GraphBuilder::SetInputSize(const ge::NodePtr &node_ptr) {
Status GraphBuilder::UpdateDataInputSize(const ge::NodePtr &node_ptr) {
const auto &op_desc = node_ptr->GetOpDesc();
if (op_desc == nullptr) {
REPORT_INNER_ERROR("E19999", "check op_desc is nullptr when UpdateDataInputSize");
GELOGE(FAILED, "Op desc is nullptr.");
return FAILED;
}
@@ -685,6 +707,8 @@ Status GraphBuilder::UpdateDataInputSize(const ge::NodePtr &node_ptr) {
int64_t real_dim_size = 0;
ge::graphStatus graph_status = TensorUtils::GetTensorSizeInBytes(output_desc, real_dim_size);
if (graph_status != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Get tensor size in bytes failed for op:%s(%s) index:0 when UpdateDataInputSize",
op_desc->GetName().c_str(), op_desc->GetType().c_str());
GELOGE(FAILED, "Get tensor size in bytes failed.");
return FAILED;
}
@@ -692,6 +716,8 @@ Status GraphBuilder::UpdateDataInputSize(const ge::NodePtr &node_ptr) {
ge::GeTensorDesc input_desc = op_desc->GetInputDesc(0);
ge::TensorUtils::SetSize(input_desc, real_dim_size);
if (op_desc->UpdateInputDesc(0, input_desc) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Update input desc size failed for op:%s(%s) index:0 when UpdateDataInputSize",
op_desc->GetName().c_str(), op_desc->GetType().c_str());
GELOGE(FAILED, "Update input desc size failed.");
return FAILED;
}
@@ -720,6 +746,9 @@ Status GraphBuilder::CalcDynShapeRootGraphDataSize(const ge::OpDescPtr &op_desc)
int64_t real_dim_size = 0;
ge::graphStatus graph_status = TensorUtils::GetTensorSizeInBytes(output_desc, real_dim_size);
if (graph_status != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Get tensor size in bytes failed for op:%s(%s) index:0 "
"when CalcDynShapeRootGraphDataSize",
op_desc->GetName().c_str(), op_desc->GetType().c_str());
GELOGE(FAILED, "Get tensor size in bytes failed.");
return FAILED;
}
@@ -727,6 +756,9 @@ Status GraphBuilder::CalcDynShapeRootGraphDataSize(const ge::OpDescPtr &op_desc)
ge::TensorUtils::SetSize(output_desc, real_dim_size);
GELOGI("Update dynamic shape graph data output size to [%ld].", real_dim_size);
if (op_desc->UpdateOutputDesc(0, output_desc) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Update output desc size failed for op:%s(%s) index:0 "
"when CalcDynShapeRootGraphDataSize",
op_desc->GetName().c_str(), op_desc->GetType().c_str());
GELOGE(FAILED, "Update dynamic shape graph data output desc size failed.");
return FAILED;
}
@@ -744,6 +776,8 @@ Status GraphBuilder::SecondPartition(ge::ComputeGraphPtr &comp_graph) {
GE_CHK_STATUS_RET(ret, "Graph partition Failed.");
const auto &graph_2_subgraphlist = graph_partitioner_.GetSubGraphMap();
if (graph_2_subgraphlist.find(comp_graph) == graph_2_subgraphlist.end()) {
REPORT_INNER_ERROR("E19999", "find subgraphlis in graph:%s failed when SecondPartition",
comp_graph->GetName().c_str());
GELOGE(FAILED, "Find subgraph failed.");
return FAILED;
}
@@ -772,6 +806,9 @@ Status GraphBuilder::AddOutputMemTypeForNode(const NodePtr &node) {
mem_type);
if (!AttrUtils::SetInt(src_desc->MutableOutputDesc(src_out_anchor->GetIdx()), ATTR_OUTPUT_MEMORY_TYPE,
mem_type)) {
REPORT_INNER_ERROR("E19999", "Set Attr:%s for node:%s(%s) out_index:%u failed when AddOutputMemTypeForNode",
ATTR_OUTPUT_MEMORY_TYPE.c_str(), src_desc->GetName().c_str(), src_desc->GetType().c_str(),
src_out_anchor->GetIdx());
GELOGE(INTERNAL_ERROR, "Set out_memory_type attr for [%s:%d] failed.", src_desc->GetName().c_str(),
src_out_anchor->GetIdx());
return INTERNAL_ERROR;


+ 10
- 0
ge/graph/build/label_allocator.cc View File

@@ -28,6 +28,7 @@ LabelAllocator::LabelAllocator(const ComputeGraphPtr &graph) : compute_graph_(gr

Status LabelAllocator::AssignFunctionalLabels() {
if (compute_graph_ == nullptr) {
REPORT_INNER_ERROR("E19999", "check param compute_graph nullptr when AssignFunctionalLabels");
GELOGE(INTERNAL_ERROR, "ComputeGraph not set, Assign labels failed.");
return INTERNAL_ERROR;
}
@@ -46,11 +47,15 @@ Status LabelAllocator::AssignFunctionalLabels() {
for (auto node : functional_nodes) {
LabelMakerPtr maker = LabelMakerFactory::Instance().Create(node->GetType(), compute_graph_, node);
if (maker == nullptr) {
REPORT_CALL_ERROR("E19999", "Check Node:%s(%s) label maker not registed when AssignFunctionalLabels",
node->GetName().c_str(), node->GetType().c_str());
GELOGE(INTERNAL_ERROR, "Node: %s label maker not registed.", node->GetType().c_str());
return INTERNAL_ERROR;
}

if (maker->Run(label_index) != SUCCESS) {
REPORT_CALL_ERROR("E19999", "Node:%s(%s) run label maker failed when AssignFunctionalLabels",
node->GetName().c_str(), node->GetType().c_str());
GELOGE(INTERNAL_ERROR, "Node: %s run label maker failed.", node->GetType().c_str());
return INTERNAL_ERROR;
}
@@ -63,6 +68,7 @@ Status LabelAllocator::AssignFunctionalLabels() {

bool LabelAllocator::CollectFunctionalNode(ComputeGraphPtr &graph, std::set<NodePtr> &functional_nodes) {
if (graph == nullptr) {
REPORT_INNER_ERROR("E19999", "check param compute_graph nullptr when CollectFunctionalNode");
GELOGE(INTERNAL_ERROR, "Sub ComputeGraph is null.");
return false;
}
@@ -74,12 +80,16 @@ bool LabelAllocator::CollectFunctionalNode(ComputeGraphPtr &graph, std::set<Node

NodePtr func_node = graph->GetParentNode();
if (func_node == nullptr) {
REPORT_INNER_ERROR("E19999", "Parent node not set in node:%s(%s), graph:%s",
func_node->GetName().c_str(), func_node->GetType().c_str(), graph->GetName().c_str());
GELOGE(INTERNAL_ERROR, "Parent functional node not set: %s.", graph->GetName().c_str());
return false;
}

ComputeGraphPtr owner_graph = func_node->GetOwnerComputeGraph();
if (owner_graph == nullptr) {
REPORT_INNER_ERROR("E19999", "ComputeGraph owner not set in node:%s(%s), graph:%s",
func_node->GetName().c_str(), func_node->GetType().c_str(), graph->GetName().c_str());
GELOGE(INTERNAL_ERROR, "ComputeGraph owner not set: %s.", func_node->GetName().c_str());
return false;
}


+ 9
- 0
ge/graph/build/logical_stream_allocator.cc View File

@@ -320,6 +320,8 @@ Status SingleStreamPass::Run(ComputeGraphPtr graph, const vector<SubgraphPtr> &s
if (!HasAssignedStream(*subgraph)) {
const string &stream_label = subgraph->subgraph_info.GetStreamLabel();
if (!stream_label.empty()) {
REPORT_INNER_ERROR("E19999", "Stream labels are not supported in SingleStream mode "
"(subgraph: %s, stream label: %s)", subgraph->name.c_str(), stream_label.c_str());
GELOGE(INTERNAL_ERROR, "Stream labels are not supported (subgraph: %s, stream label: %s).",
subgraph->name.c_str(), stream_label.c_str());
return INTERNAL_ERROR;
@@ -337,6 +339,8 @@ Status NodeStreamUpdatePass::Run(ComputeGraphPtr graph, const vector<SubgraphPtr
const string &engine_name = subgraph->engine_conf.id;

if (!IsEngineSkip(*subgraph) && !HasAssignedStream(*subgraph)) {
REPORT_INNER_ERROR("E19999", "Subgraph %s has not yet been assigned a stream (engine: %s) "
" when run NodeStreamUpdatePass", subgraph->name.c_str(), engine_name.c_str());
GELOGE(INTERNAL_ERROR, "Subgraph %s has not yet been assigned a stream (engine: %s).", subgraph->name.c_str(),
engine_name.c_str());
return INTERNAL_ERROR;
@@ -636,6 +640,8 @@ Status LogicalStreamAllocator::DoAssign(const ComputeGraphPtr &graph, const Grap

auto iter = subgraph_map.find(graph);
if (iter == subgraph_map.end()) {
REPORT_INNER_ERROR("E19999", "Graph %s not found in subgraph_map when do logical stream assign ",
graph->GetName().c_str());
GELOGE(FAILED, "Graph %s not found.", graph->GetName().c_str());
return FAILED;
}
@@ -675,6 +681,8 @@ Status LogicalStreamAllocator::ConvertSubgraphs(const vector<SubGraphInfoPtr> &s
const string &engine_name = subgraph_info->GetEngineName();
auto engine_conf_iter = engine_confs.find(engine_name);
if ((engine_conf_iter == engine_confs.end()) || (engine_conf_iter->second == nullptr)) {
REPORT_INNER_ERROR("E19999", "Engine conf of subgraph %s not found (engine name: %s) when ConvertSubgraphs",
subgraph_name.c_str(), engine_name.c_str());
GELOGE(INTERNAL_ERROR, "Engine conf of subgraph %s not found (engine name: %s).", subgraph_name.c_str(),
engine_name.c_str());

@@ -722,6 +730,7 @@ Status LogicalStreamAllocator::RunPasses(const ComputeGraphPtr &graph, const vec
} else if (status == NOT_CHANGED) {
GELOGD("[Show][Status]Stream pass %s return NOT_CHANGED.", pass->GetName().c_str());
} else {
REPORT_CALL_ERROR("E19999", "Stream pass %s run failed.", pass->GetName().c_str());
GELOGE(status, "Stream pass %s failed.", pass->GetName().c_str());
return status;
}


+ 4
- 7
ge/graph/build/memory/block_mem_assigner.cc View File

@@ -430,17 +430,14 @@ void SetLastUsedInputMemAttr(NodePtr &node, int input_index) {
}
auto node_op_desc = node->GetOpDesc();
if (node_op_desc != nullptr) {
auto input_desc = node_op_desc->GetInputDesc(input_index);
if (!ge::AttrUtils::SetInt(input_desc, ATTR_NAME_IS_END_OF_INPUTMEM_LIFECYCLE, true)) {
auto input_desc = node_op_desc->MutableInputDesc(input_index);
if (!ge::AttrUtils::SetInt(*input_desc, ATTR_NAME_IS_END_OF_INPUTMEM_LIFECYCLE, true)) {
GELOGW("Set %s input[%d] ATTR_NAME_IS_END_OF_INPUTMEM_LIFECYCLE to true failed.", node_op_desc->GetName().c_str(),
input_index);
return;
}
GELOGD("Set %s input[%d] ATTR_NAME_IS_END_OF_INPUTMEM_LIFECYCLE to true success.", node_op_desc->GetName().c_str(),
input_index);
if (node_op_desc->UpdateInputDesc(input_index, input_desc) != GRAPH_SUCCESS) {
GELOGW("Update %s input[%d] desc failed.", node_op_desc->GetName().c_str(), input_index);
}
}
}

@@ -593,9 +590,9 @@ void BlockMemAssigner::GetOutAndWorkSpaceMem(vector<int64_t> &all_memory_size) {
}

for (auto &out_anchor : n->GetAllOutDataAnchors()) {
GeTensorDesc output_desc = node_op_desc->GetOutputDesc(out_anchor->GetIdx());
auto output_desc = node_op_desc->GetOutputDescPtr(out_anchor->GetIdx());
int64_t size = 0;
GE_IF_BOOL_EXEC(ge::TensorUtils::GetSize(output_desc, size) != SUCCESS, GELOGI("Get size failed"));
GE_IF_BOOL_EXEC(ge::TensorUtils::GetSize(*output_desc, size) != SUCCESS, GELOGI("Get size failed"));
GE_IF_BOOL_EXEC(size < 0,
GELOGE(FAILED, "[Check][TensorSize]tensor_size:%ld is invalid, "
"maybe it is unknown shape node, Node_name:%s",


+ 2
- 1
ge/graph/build/memory/graph_mem_assigner.cc View File

@@ -482,7 +482,7 @@ Status GraphMemoryAssigner::ReAssignContinuousMemory(bool is_loop_graph) {
"[Assign][Memory:Continuous:Input]fail for node:%s.", node->GetName().c_str())
}
for (auto pair : memory_offset_) {
GELOGD("[Reassign][Memory:Continuous]At last, memory type = %ld, mem offset = %zu.", pair.first,
GELOGD("[Reassign][Memory:Continuous]At last, memory type = %ld, mem offset = %zu", pair.first,
pair.second.mem_offset_);
}
return ge::SUCCESS;
@@ -1215,6 +1215,7 @@ Status GraphMemoryAssigner::CheckOffset() {
std::map<std::string, std::string> anchor_to_symbol;
std::map<std::string, std::list<NodeIndexIO>> symbol_to_anchors;
if (GraphUtils::GetRefMapping(compute_graph_, symbol_to_anchors, anchor_to_symbol) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Get ref-mapping for graph %s failed", compute_graph_->GetName().c_str());
GELOGE(FAILED, "[Get][RefMapping]fail for graph %s", compute_graph_->GetName().c_str());
return FAILED;
}


+ 1
- 0
ge/graph/build/memory/hybrid_mem_assigner.cc View File

@@ -42,6 +42,7 @@ Status HybridMemAssigner::AssignMemory(std::unique_ptr<BlockMemAssigner> &block_

Status HybridMemAssigner::Assign() {
if (GraphUtils::GetRefMapping(compute_graph_, symbol_to_anchors_, anchor_to_symbol_) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Get ref-mapping for graph %s failed", compute_graph_->GetName().c_str());
GELOGE(FAILED, "Get ref-mapping for graph %s failed.", compute_graph_->GetName().c_str());
return FAILED;
}


+ 17
- 4
ge/graph/build/memory/var_mem_assign_util.cc View File

@@ -53,6 +53,8 @@ Status VarMemAssignUtil::AssignStaticMemory2Node(ge::ComputeGraphPtr &compute_gr
GE_IF_BOOL_EXEC(ge::AttrUtils::GetStr(n->GetOpDesc(), REF_VAR_SRC_VAR_NAME, ref_var_src_var_name), continue);
string node_name = n->GetName();
GE_IF_BOOL_EXEC(n->GetOpDesc()->GetAllOutputsDesc().empty(),
REPORT_INNER_ERROR("E19999", "check node:%s has no OutputDesc when AssignStaticMemory2Node",
n->GetName().c_str());
GELOGE(FAILED, "node:%s has no OutputDesc.", n->GetName().c_str());
return FAILED);
ge::ConstGeTensorDescPtr tensor_desc = n->GetOpDesc()->GetOutputDescPtr(0);
@@ -116,6 +118,8 @@ Status VarMemAssignUtil::SetOutVariableAttr(const ge::NodePtr &node, const ge::N
GE_CHECK_NOTNULL(node->GetOpDesc());
output_list = node->GetOpDesc()->GetOutputOffset();
if (output_list.empty()) {
REPORT_INNER_ERROR("E19999", "check node:%s output_offset_list is empty when SetOutVariableAttr",
node->GetName().c_str());
GELOGE(PARAM_INVALID, "Output_list is empty");
return PARAM_INVALID;
}
@@ -126,7 +130,12 @@ Status VarMemAssignUtil::SetOutVariableAttr(const ge::NodePtr &node, const ge::N
VarManager::Instance(session_id)->GetVarAddr(var_node->GetName(), var_tensor_desc, &dev_ptr, memory_type));

int out_list_size = static_cast<int>(output_list.size());
GE_CHK_BOOL_RET_STATUS(index < out_list_size, FAILED, "index %d >= output_list.size() %d", index, out_list_size);
if (index >= out_list_size) {
REPORT_INNER_ERROR("E19999", "param index:%d >= output_list.size() %d in node %s, "
"check invalid when SetOutVariableAttr", index, out_list_size, node->GetName().c_str());
GELOGE(FAILED, "index %d >= output_list.size() %d", index, out_list_size);
return FAILED;
}

output_list[index] = static_cast<int64_t>(reinterpret_cast<intptr_t>(dev_ptr));
GELOGI("Assign node outputOffset[index] is: %ld", output_list[index]);
@@ -168,9 +177,13 @@ Status VarMemAssignUtil::DealBroadCastNode(uint32_t graph_id, const ge::NodePtr

auto broad_cast_index = static_cast<size_t>(broad_cast_info.idx);
auto input_tensor_desc_ptr_vistor = op_desc->GetAllInputsDescPtr();
GE_CHK_BOOL_RET_STATUS(input_tensor_desc_ptr_vistor.size() > broad_cast_index, FAILED,
"Get broadcast op %s input tensor desc size [%zu] < idx [%d]", node->GetName().c_str(),
input_tensor_desc_ptr_vistor.size(), broad_cast_info.idx);
if (input_tensor_desc_ptr_vistor.size() <= broad_cast_index) {
REPORT_INNER_ERROR("E19999", "Get broadcast op %s input tensor desc size [%zu] < idx [%d]",
node->GetName().c_str(), input_tensor_desc_ptr_vistor.size(), broad_cast_info.idx);
GELOGE(FAILED, "Get broadcast op %s input tensor desc size [%zu] < idx [%d]", node->GetName().c_str(),
input_tensor_desc_ptr_vistor.size(), broad_cast_info.idx);
return FAILED;
}
const ge::GeTensorDescPtr input_tensor_desc =
input_tensor_desc_ptr_vistor.at(static_cast<size_t>(broad_cast_info.idx));
int64_t input_size = 0;


+ 69
- 3
ge/graph/build/model_builder.cc View File

@@ -116,11 +116,15 @@ Status ModelBuilder::CalcOutputSize(const ge::NodePtr &n) {
int64_t size_temp = 0;
graphStatus graph_status = TensorUtils::GetTensorMemorySizeInBytes(desc_temp, size_temp);
if (graph_status != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Get tensor size in bytes failed for op:%s(%s) index:%u when CalcOutputSize",
node_op_desc->GetName().c_str(), node_op_desc->GetType().c_str(), index);
GELOGE(graph_status, "GetTensorMemorySizeInBytes failed!");
return FAILED;
}
TensorUtils::SetSize(desc_temp, size_temp);
if (node_op_desc->UpdateOutputDesc(index, desc_temp) != SUCCESS) {
REPORT_CALL_ERROR("E19999", "Update Output desc size failed for op:%s(%s) index:%u when CalcOutputSize",
node_op_desc->GetName().c_str(), node_op_desc->GetType().c_str(), index);
GELOGE(FAILED, "UpdateOutputDesc failed.");
return FAILED;
}
@@ -197,8 +201,7 @@ void ModelBuilder::SetInputIsConst(const ge::NodePtr &n) {
}
}

std::string input_const_info = ToString(is_input_const);
GELOGD("update opdesc:%s InputConst:%s", node_op_desc->GetName().c_str(), input_const_info.c_str());
GELOGD("update opdesc:%s InputConst:%s", node_op_desc->GetName().c_str(), ToString(is_input_const).c_str());
node_op_desc->SetIsInputConst(is_input_const);
}

@@ -207,11 +210,15 @@ Status ModelBuilder::AdjustConstWeightSize(const ge::NodePtr &node, size_t &mem_
if (node->GetType() == CONSTANT) {
vector<GeTensorPtr> weights = OpDescUtils::MutableWeights(node);
if (weights.empty()) {
REPORT_INNER_ERROR("E19999", "Check weights size of node %s(%s) is empty when AdjustConstWeightSize",
node->GetName().c_str(), node->GetType().c_str());
GELOGE(FAILED, "weights size of node %s is empty", node->GetName().c_str());
return FAILED;
}
GeTensorPtr weight = weights[0];
if (weight == nullptr) {
REPORT_INNER_ERROR("E19999", "Check weight of node %s(%s) is nullptr when AdjustConstWeightSize",
node->GetName().c_str(), node->GetType().c_str());
GELOGE(FAILED, "weights[0] is null.");
return FAILED;
}
@@ -353,6 +360,9 @@ Status ModelBuilder::AdjustInputTensorFlag() {
auto input_desc = owner_node_op_desc->GetInputDesc(in_anchors->GetIdx());
ge::TensorUtils::SetInputTensor(input_desc, true);
if (owner_node_op_desc->UpdateInputDesc(in_anchors->GetIdx(), input_desc) != SUCCESS) {
REPORT_CALL_ERROR("E19999", "Update Input desc size failed for op:%s(%s) index:%u when %s",
owner_node_op_desc->GetName().c_str(), owner_node_op_desc->GetType().c_str(),
in_anchors->GetIdx(), __FUNCTION__);
GELOGE(FAILED, "UpdateOutputDesc failed.");
return FAILED;
}
@@ -381,33 +391,51 @@ Status ModelBuilder::BuildModelDef(ge::Model &model) {

max_mem_offset_ = mem_type_to_mem_offset_[RT_MEMORY_HBM];
GE_CHK_BOOL_EXEC(ge::AttrUtils::SetInt(&model, ATTR_MODEL_MEMORY_SIZE, max_mem_offset_),
REPORT_INNER_ERROR("E19999", "Set Attr:%s in model failed when %s",
ATTR_MODEL_MEMORY_SIZE.c_str(), __FUNCTION__);
GELOGE(FAILED, "SetInt of ATTR_MODEL_MEMORY_SIZE failed.");
return FAILED);
if (mem_type_to_mem_offset_.find(RT_MEMORY_P2P_DDR) != mem_type_to_mem_offset_.end()) {
p2p_mem_offset_ = mem_type_to_mem_offset_[RT_MEMORY_P2P_DDR];
}
GE_CHK_BOOL_EXEC(ge::AttrUtils::SetInt(&model, ATTR_MODEL_P2P_MEMORY_SIZE, p2p_mem_offset_),
REPORT_INNER_ERROR("E19999", "Set Attr:%s in model failed when %s",
ATTR_MODEL_P2P_MEMORY_SIZE.c_str(), __FUNCTION__);
GELOGE(FAILED, "SetInt of ATTR_MODEL_P2P_MEMORY_SIZE failed.");
return FAILED);
GE_CHK_BOOL_EXEC(ge::AttrUtils::SetInt(&model, ATTR_MODEL_WEIGHT_SIZE, weight_offset_),
REPORT_INNER_ERROR("E19999", "Set Attr:%s in model failed when %s",
ATTR_MODEL_WEIGHT_SIZE.c_str(), __FUNCTION__);
GELOGE(FAILED, "SetInt of ATTR_MODEL_WEIGHT_SIZE failed.");
return FAILED);
GE_CHK_BOOL_EXEC(ge::AttrUtils::SetInt(&model, ATTR_MODEL_STREAM_NUM, stream_num_),
REPORT_INNER_ERROR("E19999", "Set Attr:%s in model failed when %s",
ATTR_MODEL_STREAM_NUM.c_str(), __FUNCTION__);
GELOGE(FAILED, "SetInt of ATTR_MODEL_STREAM_NUM failed.");
return FAILED);
GE_CHK_BOOL_EXEC(ge::AttrUtils::SetInt(&model, ATTR_MODEL_EVENT_NUM, event_num_),
REPORT_INNER_ERROR("E19999", "Set Attr:%s in model failed when %s",
ATTR_MODEL_EVENT_NUM.c_str(), __FUNCTION__);
GELOGE(FAILED, "SetInt of ATTR_MODEL_EVENT_NUM failed.");
return FAILED);
GE_CHK_BOOL_EXEC(ge::AttrUtils::SetListInt(&model, ATTR_MODEL_HUGE_STREAM_LIST, huge_streams_),
REPORT_INNER_ERROR("E19999", "Set Attr:%s in model failed when %s",
ATTR_MODEL_HUGE_STREAM_LIST.c_str(), __FUNCTION__);
GELOGE(FAILED, "SetInt of ATTR_MODEL_HUGE_STREAM_LIST failed.");
return FAILED);
GE_CHK_BOOL_EXEC(ge::AttrUtils::SetInt(&model, ATTR_MODEL_LABEL_NUM, label_num_),
REPORT_INNER_ERROR("E19999", "Set Attr:%s in model failed when %s",
ATTR_MODEL_LABEL_NUM.c_str(), __FUNCTION__);
GELOGE(FAILED, "SetInt of ATTR_MODEL_LABEL_NUM failed.");
return FAILED);
GE_CHK_BOOL_EXEC(ge::AttrUtils::SetInt(&model, ATTR_MODEL_ZERO_COPY_MEMORY_SIZE, zero_copy_mem_size_),
REPORT_INNER_ERROR("E19999", "Set Attr:%s in model failed when %s",
ATTR_MODEL_ZERO_COPY_MEMORY_SIZE.c_str(), __FUNCTION__);
GELOGE(FAILED, "SetInt of ATTR_MODEL_ZERO_COPY_MEMORY_SIZE failed.");
return FAILED);
GE_CHK_BOOL_EXEC(ge::AttrUtils::SetListStr(&model, ATTR_MODEL_OUT_NODES_NAME, GetLocalOmgContext().net_out_nodes),
REPORT_INNER_ERROR("E19999", "Set Attr:%s in model failed when %s",
ATTR_MODEL_OUT_NODES_NAME.c_str(), __FUNCTION__);
GELOGE(FAILED, "SetListStr of ATTR_MODEL_OUT_NODES_NAME failed.");
return FAILED);
GELOGI("For model, max_mem_offset_: %zu, p2p_mem_size: %zu, zero_copy_mem_size_: %zu", max_mem_offset_,
@@ -415,6 +443,8 @@ Status ModelBuilder::BuildModelDef(ge::Model &model) {
string fp_ceiling_mode;
if (ge::GetContext().GetOption("ge.fpCeilingMode", fp_ceiling_mode) == SUCCESS) {
if (!ge::AttrUtils::SetStr(&model, ATTR_FP_CEILING_MODE, fp_ceiling_mode)) {
REPORT_INNER_ERROR("E19999", "Set Attr:%s in model failed when %s",
ATTR_FP_CEILING_MODE.c_str(), __FUNCTION__);
GELOGE(FAILED, "Failed to set attr ATTR_FP_CEILING_MODE");
return FAILED;
}
@@ -429,22 +459,30 @@ Status ModelBuilder::BuildModelDef(ge::Model &model) {
int64_t core_type = (ge_core_type == kVectorCore) ? 1 : 0;
GELOGI("core_type: %ld", core_type);
if (!ge::AttrUtils::SetInt(&model, ATTR_MODEL_CORE_TYPE, core_type)) {
REPORT_INNER_ERROR("E19999", "Set Attr:%s in model failed when %s",
ATTR_MODEL_CORE_TYPE.c_str(), __FUNCTION__);
GELOGE(FAILED, "SetInt of ATTR_CORE_TYPE failed.");
}
InitL1FusionOption();
GE_CHK_BOOL_EXEC(ge::AttrUtils::SetBool(&model, ATTR_NAME_SWITCH_FOR_L1_FUSION, is_l1_fusion_enable_),
REPORT_INNER_ERROR("E19999", "Set Attr:%s in model failed when %s",
ATTR_NAME_SWITCH_FOR_L1_FUSION.c_str(), __FUNCTION__);
GELOGE(FAILED, "SetBool of ATTR_NAME_SWITCH_FOR_L1_FUSION failed.");
return FAILED);
const DumpProperties &dump_properties = DumpManager::GetInstance().GetDumpProperties(session_id_);
bool is_op_debug = dump_properties.IsOpDebugOpen();
if (is_op_debug) {
if (!ge::AttrUtils::SetBool(&model, ATTR_OP_DEBUG_FLAG, is_op_debug)) {
REPORT_INNER_ERROR("E19999", "Set Attr:%s in model failed when %s",
ATTR_OP_DEBUG_FLAG.c_str(), __FUNCTION__);
GELOGE(FAILED, "SetBool of ATTR_OP_DEBUG_FLAG failed.");
return FAILED;
}
uint32_t op_debug_mode = dump_properties.GetOpDebugMode();
GELOGI("Get op debug mode:%d", op_debug_mode);
if (!ge::AttrUtils::SetInt(&model, ATTR_OP_DEBUG_MODE, op_debug_mode)) {
REPORT_INNER_ERROR("E19999", "Set Attr:%s in model failed when %s",
ATTR_OP_DEBUG_MODE.c_str(), __FUNCTION__);
GELOGE(FAILED, "SetBool of ATTR_OP_DEBUG_MODE failed.");
return FAILED;
}
@@ -516,6 +554,8 @@ Status ModelBuilder::MergeWeights() {
// If MutableTensor failed, weight is nullptr.
(void)ge::AttrUtils::MutableTensor(op_desc, ATTR_NAME_WEIGHTS, weight);
if (weight == nullptr) {
REPORT_INNER_ERROR("E19999", "Can't get const weight in op:%s(%s) when %s",
op_desc->GetName().c_str(), op_desc->GetType().c_str(), __FUNCTION__);
GELOGE(FAILED, "Can't get const op weight, name: %s", node->GetName().c_str());
return FAILED;
}
@@ -538,8 +578,15 @@ Status ModelBuilder::MergeWeights() {
continue;
}
if (weight_data.data() != nullptr) {
GE_IF_BOOL_EXEC(base_addr == nullptr, GELOGE(FAILED, "Base addr is nullptr."); return FAILED);
GE_IF_BOOL_EXEC(base_addr == nullptr,
REPORT_INNER_ERROR("E19999", "Check weight in op:%s(%s) is nullptr when %s",
op_desc->GetName().c_str(), op_desc->GetType().c_str(), __FUNCTION__);
GELOGE(FAILED, "Base addr is nullptr.");
return FAILED);
if (weight_offset_ - offset < weight_data.size()) {
REPORT_INNER_ERROR("E19999", "left weight size not enough for op:%s(%s) left_size:%zu, weight_size:%zu when %s",
op_desc->GetName().c_str(), op_desc->GetType().c_str(),
weight_offset_ - offset, weight_data.size(), __FUNCTION__);
GELOGE(FAILED, "left weight size not enough. left_size:%lu, weight_size:%lu",
weight_offset_ - offset, weight_data.size());
return FAILED;
@@ -551,6 +598,9 @@ Status ModelBuilder::MergeWeights() {
auto err = memcpy_s(reinterpret_cast<void *>(dst_ptr), SECUREC_MEM_MAX_LEN, reinterpret_cast<void *>(src_ptr),
SECUREC_MEM_MAX_LEN);
if (err != EOK) {
REPORT_CALL_ERROR("E19999", "mem copy failed. errret:%u, "
"dst_ptr:%lx, dst_size:%lu, src_ptr:%lx, src_size:%lu, when %s",
err, dst_ptr, SECUREC_MEM_MAX_LEN, src_ptr, SECUREC_MEM_MAX_LEN, __FUNCTION__);
GELOGE(FAILED, "mem copy failed. errret:%u, "
"dst_ptr:%lx, dst_size:%lu, src_ptr:%lx, src_size:%lu",
err, dst_ptr, SECUREC_MEM_MAX_LEN, src_ptr, SECUREC_MEM_MAX_LEN);
@@ -562,6 +612,9 @@ Status ModelBuilder::MergeWeights() {
}
auto err = memcpy_s(reinterpret_cast<void *>(dst_ptr), left_size, reinterpret_cast<void *>(src_ptr), left_size);
if (err != EOK) {
REPORT_CALL_ERROR("E19999", "mem copy failed. errret:%u, "
"dst_ptr:%lx, dst_size:%lu, src_ptr:%lx, src_size:%lu, when %s",
err, dst_ptr, SECUREC_MEM_MAX_LEN, src_ptr, SECUREC_MEM_MAX_LEN, __FUNCTION__);
GELOGE(FAILED, "mem copy failed. errret:%u, "
"dst_ptr:%lx, dst_size:%lu, src_ptr:%lx, src_size:%lu",
err, dst_ptr, SECUREC_MEM_MAX_LEN, src_ptr, SECUREC_MEM_MAX_LEN);
@@ -602,6 +655,8 @@ Status ModelBuilder::SaveDataToModel(ge::Model &model, ge::GeModel &ge_model) {
}
GE_IF_BOOL_EXEC(tbe_kernel == nullptr, continue);
if (tbe_name_set.count(tbe_kernel->GetName()) > 0) {
REPORT_INNER_ERROR("E19999", "tbe_kernel name %s can't be the same, judge for op:%s(%s), when %s",
tbe_kernel->GetName().c_str(), n->GetName().c_str(), n->GetType().c_str(), __FUNCTION__);
GELOGE(FAILED, "tbe_kernel name %s can't be the same", tbe_kernel->GetName().c_str());
return FAILED;
}
@@ -618,6 +673,8 @@ Status ModelBuilder::SaveDataToModel(ge::Model &model, ge::GeModel &ge_model) {
node_op_desc->TryGetExtAttr(ge::OP_EXTATTR_CUSTAICPU_KERNEL, CustAICPUKernelPtr());
GE_IF_BOOL_EXEC(cust_aicpu_kernel == nullptr, continue);
if (aicpu_name_set.count(cust_aicpu_kernel->GetName()) > 0) {
REPORT_INNER_ERROR("E19999", "aicpu_kernel name %s can't be the same, judge for op:%s(%s), when %s",
cust_aicpu_kernel->GetName().c_str(), n->GetName().c_str(), n->GetType().c_str(), __FUNCTION__);
GELOGE(FAILED, "aicpu_kernel name %s can't be the same", cust_aicpu_kernel->GetName().c_str());
return FAILED;
}
@@ -640,6 +697,7 @@ Status ModelBuilder::SaveDataToModel(ge::Model &model, ge::GeModel &ge_model) {
// Add task
GeAttrValue::BYTES task_def_bytes;
if (!AttrUtils::GetZeroCopyBytes(model, MODEL_ATTR_TASKS, task_def_bytes)) {
REPORT_CALL_ERROR("E19999", "Get attr:%s in model fail when %s", MODEL_ATTR_TASKS.c_str(), __FUNCTION__);
GELOGE(INTERNAL_ERROR, "Get zero copy bytes fail.");
return INTERNAL_ERROR;
}
@@ -675,6 +733,7 @@ void ModelBuilder::SetModelVersion(ge::Model &model) {

Status ModelBuilder::PreBuildModel() {
if ((compute_graph_ == nullptr) || !(compute_graph_->IsValid())) {
REPORT_INNER_ERROR("E19999", "Check compute_graph no valid when %s", __FUNCTION__);
GELOGE(FAILED, "Graph_ is not valid.");
return FAILED;
}
@@ -754,6 +813,7 @@ Status ModelBuilder::CompileSingleOp() {
// Create ge instance
std::shared_ptr<GELib> instance = ge::GELib::GetInstance();
if ((instance == nullptr) || !instance->InitFlag()) {
REPORT_INNER_ERROR("E19999", "Check GELib instance not init before when %s", __FUNCTION__);
GELOGE(ge::GE_CLI_GE_NOT_INITIALIZED, "CompileSingleOp failed.");
return ge::GE_CLI_GE_NOT_INITIALIZED;
}
@@ -775,6 +835,8 @@ Status ModelBuilder::CompileSingleOp() {
(void)instance->DNNEngineManagerObj().GetDNNEngineName(node);
kernel_lib_name = op_desc->GetOpKernelLibName();
if (kernel_lib_name.empty()) {
REPORT_INNER_ERROR("E19999", "Check kernel lib name empty of op:%s(%s) when %s",
node->GetName().c_str(), node->GetType().c_str(), __FUNCTION__);
GELOGE(ge::INTERNAL_ERROR, "Get node:%s(%s) kernel lib failed.", node->GetName().c_str(),
node->GetType().c_str());
return ge::INTERNAL_ERROR;
@@ -785,6 +847,8 @@ Status ModelBuilder::CompileSingleOp() {
if (kernel_info != nullptr) {
node_vector_map[kernel_lib_name].emplace_back(node);
} else {
REPORT_INNER_ERROR("E19999", "Get ops kernel info store failed for op:%s(%s), op_kernel_name:%s, when %s",
node->GetName().c_str(), node->GetType().c_str(), kernel_lib_name.c_str(), __FUNCTION__);
GELOGE(ge::GE_GRAPH_PARAM_NULLPTR, "Get op %s ops kernel info store failed", node->GetName().c_str());
return ge::GE_GRAPH_PARAM_NULLPTR;
}
@@ -800,6 +864,8 @@ Status ModelBuilder::CompileSingleOp() {
GELOGI("[GEPERFTRACE] The node size of compile op of %s is %zu", kernel_lib_name.c_str(), node_vector.size());
GE_TIMESTAMP_ADD(BatchCompileOp);
if (ret != ge::SUCCESS) {
REPORT_CALL_ERROR("E19999", "Batch compile op failed, kernel lib name, node size:%zu, when %s",
node_vector.size(), __FUNCTION__);
GELOGE(ret, "Compile op failed, kernel lib name is %s", kernel_lib_name.c_str());
return ret;
}


+ 22
- 0
ge/graph/build/run_context.cc View File

@@ -27,15 +27,21 @@ Status RunContextUtil::InitMemInfo(uint8_t *data_mem_base, uint64_t data_mem_siz
std::map<int64_t, uint64_t> mem_type_to_data_mem_size, uint8_t *weight_mem_base,
uint64_t weight_mem_size) {
if ((data_mem_size > 0) && (data_mem_base == nullptr)) {
REPORT_INNER_ERROR("E19999", "InitMemInfo param data_mem_base is null but data_mem_size = %lu", data_mem_size);
GELOGE(PARAM_INVALID, "InitMemInfo param data_mem_base is null but data_mem_size = %lu.", data_mem_size);
return PARAM_INVALID;
}
if ((weight_mem_size > 0) && (weight_mem_base == nullptr)) {
REPORT_INNER_ERROR("E19999", "InitMemInfo param weight_mem_base is null but weight_mem_size = %lu",
weight_mem_size);
GELOGE(PARAM_INVALID, "InitMemInfo param weight_mem_base is null but weight_mem_size = %lu.", weight_mem_size);
return PARAM_INVALID;
}
if (mem_type_to_data_mem_base.empty() || mem_type_to_data_mem_size.empty() ||
mem_type_to_data_mem_base.size() != mem_type_to_data_mem_size.size()) {
REPORT_INNER_ERROR("E19999", "InitMemInfo param mem_type_to_data_mem_base size[%zu] "
"is not equal to the size of mem_type_to_data_mem_size[%zu].",
mem_type_to_data_mem_base.size(), mem_type_to_data_mem_size.size());
GELOGE(PARAM_INVALID,
"InitMemInfo param mem_type_to_data_mem_base size[%zu] is not equal to the size of "
"mem_type_to_data_mem_size[%zu].",
@@ -55,6 +61,7 @@ Status RunContextUtil::CreateRtModelResources(uint32_t stream_num, uint32_t even
// Create rt model
rtError_t rt_ret = rtModelCreate(&rt_model_, 0);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "call rtModelCreate fail, ret:%d, when %s", static_cast<int>(rt_ret), __FUNCTION__);
GELOGE(RT_FAILED, "rtModelCreate failed. rt_ret = %d", static_cast<int>(rt_ret));
return RT_FAILED;
}
@@ -64,6 +71,8 @@ Status RunContextUtil::CreateRtModelResources(uint32_t stream_num, uint32_t even
rtStream_t stream = nullptr;
rt_ret = rtStreamCreate(&stream, 0);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "call rtStreamCreate fail, ret:%d, index:%u, when %s",
static_cast<int>(rt_ret), i, __FUNCTION__);
GELOGE(RT_FAILED, "rtStreamCreate failed. rt_ret = %d, index = %u", static_cast<int>(rt_ret), i);
return RT_FAILED;
}
@@ -71,6 +80,8 @@ Status RunContextUtil::CreateRtModelResources(uint32_t stream_num, uint32_t even

rt_ret = rtModelBindStream(rt_model_, stream, 0);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "call rtModelBindStream fail, ret:%d, index:%u, when %s",
static_cast<int>(rt_ret), i, __FUNCTION__);
GELOGE(RT_FAILED, "Bind stream and model failed. rt_ret = %d, index = %u", static_cast<int>(rt_ret), i);
return RT_FAILED;
}
@@ -81,6 +92,8 @@ Status RunContextUtil::CreateRtModelResources(uint32_t stream_num, uint32_t even
rtEvent_t event = nullptr;
rt_ret = rtEventCreate(&event);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "call rtEventCreate fail, ret:%d, index:%u, when %s",
static_cast<int>(rt_ret), i, __FUNCTION__);
GELOGE(RT_FAILED, "rtEventCreate failed. rt_ret = %d, index = %u", static_cast<int>(rt_ret), i);
return RT_FAILED;
}
@@ -92,6 +105,8 @@ Status RunContextUtil::CreateRtModelResources(uint32_t stream_num, uint32_t even
rtLabel_t label = nullptr;
rt_ret = rtLabelCreateV2(&label, rt_model_);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "call rtLabelCreateV2 fail, ret:%d, index:%u, when %s",
static_cast<int>(rt_ret), i, __FUNCTION__);
GELOGE(RT_FAILED, "rtLabelCreate failed. rt_ret = %d, index = %u", static_cast<int>(rt_ret), i);
return RT_FAILED;
}
@@ -143,12 +158,15 @@ Status RunContextUtil::CreateRunContext(Model &model, const ComputeGraphPtr &gra
GELOGD("Begin to Create RunContext, session_id = %lu", session_id);
// check params
if (graph == nullptr) {
REPORT_INNER_ERROR("E19999", "Check param graph nullptr, session_id:%lu, when %s", session_id, __FUNCTION__);
GELOGE(PARAM_INVALID, "CreateRunContext param graph is null. session_id=%lu", session_id);
return PARAM_INVALID;
}

uint32_t stream_num = 0;
if (!AttrUtils::GetInt(&model, ATTR_MODEL_STREAM_NUM, stream_num)) {
REPORT_INNER_ERROR("Get Attr:%s fail for model, session_id:%lu, when %s",
ATTR_MODEL_STREAM_NUM.c_str(), session_id, __FUNCTION__);
GELOGE(INTERNAL_ERROR, "Get stream_num attr from model_def failed. session_id=%lu", session_id);
return INTERNAL_ERROR;
}
@@ -156,6 +174,8 @@ Status RunContextUtil::CreateRunContext(Model &model, const ComputeGraphPtr &gra

uint32_t event_num = 0;
if (!AttrUtils::GetInt(&model, ATTR_MODEL_EVENT_NUM, event_num)) {
REPORT_INNER_ERROR("Get Attr:%s fail for model, session_id:%lu, when %s",
ATTR_MODEL_EVENT_NUM.c_str(), session_id, __FUNCTION__);
GELOGE(INTERNAL_ERROR, "Get event_num attr from model failed. session_id=%lu", session_id);
return INTERNAL_ERROR;
}
@@ -163,6 +183,8 @@ Status RunContextUtil::CreateRunContext(Model &model, const ComputeGraphPtr &gra

uint32_t label_num = 0;
if (!AttrUtils::GetInt(&model, ATTR_MODEL_LABEL_NUM, label_num)) {
REPORT_INNER_ERROR("Get Attr:%s fail for model, session_id:%lu, when %s",
ATTR_MODEL_LABEL_NUM.c_str(), session_id, __FUNCTION__);
GELOGE(INTERNAL_ERROR, "Get label_num attr from model failed. session_id=%lu", session_id);
return INTERNAL_ERROR;
}


+ 65
- 3
ge/graph/build/stream_allocator.cc View File

@@ -76,6 +76,7 @@ Status StreamAllocator::AssignLogicalStreams(const std::map<std::string, int> &m

auto gelib = GELib::GetInstance();
if (gelib == nullptr) {
REPORT_INNER_ERROR("E19999", "Check GELib instance nullptr when %s", __FUNCTION__);
GELOGE(FAILED, "Get GELib instance failed.");
return FAILED;
}
@@ -184,6 +185,8 @@ Status StreamAllocator::AssignSingleStream() {
}

if (stream_num_ > 1) {
REPORT_INNER_ERROR("E19999", "The number of ts streams is %ld, only one is supported when %s",
stream_num_, __FUNCTION__);
GELOGE(FAILED, "The number of ts streams is %ld, only one is supported.", stream_num_);
return FAILED;
}
@@ -257,6 +260,9 @@ Status StreamAllocator::SetActiveStreamsByLabel() {
}
}
GE_CHK_BOOL_EXEC(AttrUtils::SetListInt(node->GetOpDesc(), ATTR_NAME_ACTIVE_STREAM_LIST, activated_stream_list),
REPORT_INNER_ERROR("E19999", "Set Attr:%s for op:%s(%s) failed when %s",
ATTR_NAME_ACTIVE_STREAM_LIST.c_str(),
node->GetName().c_str(), node->GetType().c_str(), __FUNCTION__);
GELOGE(FAILED, "SetListInt failed.");
return FAILED);
}
@@ -307,6 +313,9 @@ Status StreamAllocator::SetActiveStreamsForSubgraphs() {
}

if (!AttrUtils::SetListInt(first_active_node->GetOpDesc(), ATTR_NAME_ACTIVE_STREAM_LIST, active_streams)) {
REPORT_INNER_ERROR("E19999", "Set Attr:%s for op:%s(%s) failed when %s",
ATTR_NAME_ACTIVE_STREAM_LIST.c_str(),
first_active_node->GetName().c_str(), first_active_node->GetType().c_str(), __FUNCTION__);
GELOGE(FAILED, "Set active streams for node %s failed.", first_active_node->GetName().c_str());
return FAILED;
}
@@ -376,6 +385,8 @@ Status StreamAllocator::InsertOneEventInTwoNodes(const NodePtr &cur_node, const
}

if (next_stream_id == kInvalidStream) {
REPORT_INNER_ERROR("E19999", "Stream id of next_node %s(%s) should not be %ld when %s",
next_node->GetName().c_str(), next_node->GetType().c_str(), kInvalidStream, __FUNCTION__);
GELOGE(FAILED, "Stream id of next_node %s should not be %ld", next_node->GetName().c_str(), kInvalidStream);
return FAILED;
}
@@ -589,8 +600,14 @@ Status StreamAllocator::OptimizeByStreamActivate() {
// -> stream(streamSwitch) -> stream(streamActivate) -> stream(stream true or false)
// No need to insert an event between node in stream(normal) and node in stream(stream true or false)
bool StreamAllocator::IsRecvNodeActivatedBySendNode(const NodePtr &send_node_ptr, const NodePtr &recv_node_ptr) const {
GE_CHECK_NOTNULL_EXEC(send_node_ptr->GetOpDesc(), GELOGE(FAILED, "op desc is nullptr"); return false);
GE_CHECK_NOTNULL_EXEC(recv_node_ptr->GetOpDesc(), GELOGE(FAILED, "op desc is nullptr"); return false);
GE_CHECK_NOTNULL_EXEC(send_node_ptr->GetOpDesc(),
REPORT_INNER_ERROR("E19999", "Check param send_node_ptr nullptr when %s", __FUNCTION__);
GELOGE(FAILED, "op desc is nullptr");
return false);
GE_CHECK_NOTNULL_EXEC(recv_node_ptr->GetOpDesc(),
REPORT_INNER_ERROR("E19999", "Check param recv_node_ptr nullptr when %s", __FUNCTION__);
GELOGE(FAILED, "op desc is nullptr");
return false);
auto cur_stream_id = send_node_ptr->GetOpDesc()->GetStreamId();
if (AttrUtils::HasAttr(recv_node_ptr->GetOpDesc(), ATTR_NAME_STREAM_LABEL)) {
// find streamActivate node
@@ -714,6 +731,8 @@ Status StreamAllocator::SplitStreams(vector<set<int64_t>> &split_streams) {
continue;
}
if (stream_id > last_stream_id) {
REPORT_INNER_ERROR("E19999", "streamid(%ld) > last_stream_id(%ld), check invalid when %s",
stream_id, last_stream_id, __FUNCTION__);
GELOGE(FAILED, "SplitStreams:streamid(%ld) > last_stream_id(%ld)", stream_id, last_stream_id);
return FAILED;
}
@@ -727,6 +746,8 @@ Status StreamAllocator::SplitStreams(vector<set<int64_t>> &split_streams) {
stream_continuous_2_node_num_map[continuous_stream_label]++;
// return error
if (stream_continuous_2_node_num_map[continuous_stream_label] > max_node_num_one_stream) {
REPORT_INNER_ERROR("E19999", "Check node[%s] stream_id[%ld] continuous stream label[%s] unsatisfied when %s",
op_desc->GetName().c_str(), stream_id, continuous_stream_label.c_str(), __FUNCTION__);
GELOGE(FAILED, "SplitStreams:node[%s] stream_id[%ld] continuous stream label[%s] unsatisfied ",
op_desc->GetName().c_str(), stream_id, continuous_stream_label.c_str());
return FAILED;
@@ -881,6 +902,8 @@ Status StreamAllocator::UpdateActiveStreamsForSwitchNode(NodePtr &switch_node) {
GE_CHECK_NOTNULL(op_desc);

if (!AttrUtils::SetListInt(op_desc, ATTR_NAME_ACTIVE_STREAM_LIST, stream_ids)) {
REPORT_INNER_ERROR("E19999", "Set Attr:%s fail for op:%s(%s) when %s", ATTR_NAME_ACTIVE_STREAM_LIST.c_str(),
op_desc->GetName().c_str(), op_desc->GetType().c_str(), __FUNCTION__);
GELOGE(FAILED, "SetListInt failed.");
return FAILED;
}
@@ -895,6 +918,8 @@ Status StreamAllocator::InsertActiveNodesAfterSwitch(NodePtr &switch_node, vecto
vector<string> ori_active_label_list;
if (!AttrUtils::GetListStr(switch_desc, ATTR_NAME_ACTIVE_LABEL_LIST, ori_active_label_list) ||
ori_active_label_list.empty()) {
REPORT_INNER_ERROR("E19999", "Get Attr:%s fail for op:%s(%s) when %s", ATTR_NAME_ACTIVE_LABEL_LIST.c_str(),
switch_node->GetName().c_str(), switch_node->GetType().c_str(), __FUNCTION__);
GELOGE(INTERNAL_ERROR, "Get active label list of switch %s failed.", switch_node->GetName().c_str());
return INTERNAL_ERROR;
}
@@ -918,6 +943,8 @@ Status StreamAllocator::InsertActiveNodesAfterSwitch(NodePtr &switch_node, vecto
for (auto &active_node : added_active_nodes) {
GE_CHECK_NOTNULL(switch_node->GetOutControlAnchor());
if (switch_node->GetOutControlAnchor()->LinkTo(active_node->GetInControlAnchor()) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Link from %s to %s failed when %s",
switch_node->GetName().c_str(), active_node->GetName().c_str(), __FUNCTION__);
GELOGE(FAILED, "Link %s to %s failed.", switch_node->GetName().c_str(), active_node->GetName().c_str());
return FAILED;
}
@@ -933,6 +960,8 @@ Status StreamAllocator::UpdateActiveStreamsForActiveNode(const vector<set<int64_
vector<uint32_t> new_active_streams = active_streams;
for (uint32_t logical_stream : active_streams) {
if (static_cast<size_t>(logical_stream) >= split_streams.size()) {
REPORT_INNER_ERROR("E19999", "Check logical stream:%u is out of range:%zu when %s",
logical_stream, split_streams.size(), __FUNCTION__);
GELOGE(FAILED, "logical stream is out of range.");
return FAILED;
}
@@ -951,6 +980,8 @@ Status StreamAllocator::UpdateActiveStreamsForActiveNode(const vector<set<int64_
}
}
if (!AttrUtils::SetListInt(node->GetOpDesc(), ATTR_NAME_ACTIVE_STREAM_LIST, new_active_streams)) {
REPORT_INNER_ERROR("E19999", "Set Attr:%s fail for op:%s(%s) when %s", ATTR_NAME_ACTIVE_STREAM_LIST.c_str(),
node->GetName().c_str(), node->GetType().c_str(), __FUNCTION__);
GELOGE(FAILED, "Set active streams for node %s failed.", node->GetName().c_str());
return FAILED;
}
@@ -991,6 +1022,8 @@ Status StreamAllocator::UpdateActiveStreamsForSubgraphs() const {
new_active_streams.emplace(static_cast<uint32_t>(new_split_stream));
active_streams.assign(new_active_streams.begin(), new_active_streams.end());
if (!AttrUtils::SetListInt(active_op, ATTR_NAME_ACTIVE_STREAM_LIST, active_streams)) {
REPORT_INNER_ERROR("E19999", "Set Attr:%s fail for op:%s(%s) when %s", ATTR_NAME_ACTIVE_STREAM_LIST.c_str(),
active_op->GetName().c_str(), active_op->GetType().c_str(), __FUNCTION__);
GELOGE(FAILED, "Set active streams for node %s failed.", active_node->GetName().c_str());
return FAILED;
}
@@ -1059,6 +1092,8 @@ Status StreamAllocator::SetActiveStreamsForLoop() {

NodePtr pre_switch_node = FindSwitchNodeBeforeLoopActiveNode(node);
if (pre_switch_node == nullptr) {
REPORT_INNER_ERROR("E19999", "Find switch node before loop active node %s fail when %s",
node->GetName().c_str(), __FUNCTION__);
GELOGE(FAILED, "find switch node before loop active node %s failed", node->GetName().c_str());
return FAILED;
}
@@ -1066,6 +1101,9 @@ Status StreamAllocator::SetActiveStreamsForLoop() {
if (!AttrUtils::GetListStr(node->GetOpDesc(), ATTR_NAME_ACTIVE_LABEL_LIST, activated_label_list) ||
activated_label_list.empty()) {
GE_CHK_BOOL_EXEC(AttrUtils::SetListInt(node->GetOpDesc(), ATTR_NAME_ACTIVE_STREAM_LIST, loop_active_streams),
REPORT_INNER_ERROR("E19999", "Set Attr:%s fail for op:%s(%s) when %s",
ATTR_NAME_ACTIVE_STREAM_LIST.c_str(),
node->GetName().c_str(), node->GetType().c_str(), __FUNCTION__);
GELOGE(FAILED, "SetListInt failed.");
return FAILED);
for (const auto &stream_id : loop_active_streams) {
@@ -1112,6 +1150,8 @@ Status StreamAllocator::CheckStreamActived() const {
uint32_t stream_id = static_cast<uint32_t>(node->GetOpDesc()->GetStreamId());
auto iter = find(active_streams.begin(), active_streams.end(), stream_id);
if (iter != active_streams.end()) {
REPORT_INNER_ERROR("E19999", "Node:%s(%s) cannot active its own stream %u, check invalid when %s",
node->GetName().c_str(), node->GetType().c_str(), stream_id, __FUNCTION__);
GELOGE(FAILED, "Node %s cannot active its own stream %u.", node->GetName().c_str(), stream_id);
return FAILED;
}
@@ -1139,6 +1179,7 @@ Status StreamAllocator::RefreshContinuousEvents() {
for (size_t i = 0; i < send_events.size(); i++) {
auto find_it = old_to_new_events.find(send_events[i]);
if (find_it == old_to_new_events.end()) {
REPORT_INNER_ERROR("E19999", "Check invalid send event %u when %s", send_events[i], __FUNCTION__);
GELOGE(FAILED, "RefreshContinuousEvents: invalid send event %u", send_events[i]);
return FAILED;
}
@@ -1152,6 +1193,7 @@ Status StreamAllocator::RefreshContinuousEvents() {
for (size_t i = 0; i < recv_events.size(); i++) {
auto find_it = old_to_new_events.find(recv_events[i]);
if (find_it == old_to_new_events.end()) {
REPORT_INNER_ERROR("E19999", "Check invalid recv event %u when %s", recv_events[i], __FUNCTION__);
GELOGE(FAILED, "RefreshContinuousEvents: invalid recv event %u", recv_events[i]);
return FAILED;
}
@@ -1180,7 +1222,11 @@ Status StreamAllocator::InsertSyncEventNodes() {

int64_t temp_stream_id = node->GetOpDesc()->GetStreamId();
op_desc_ptr->SetStreamId(temp_stream_id);
GE_CHK_BOOL_EXEC(AttrUtils::SetInt(op_desc_ptr, RECV_ATTR_EVENT_ID, event_id), GELOGE(FAILED, "SetInt failed.");
GE_CHK_BOOL_EXEC(AttrUtils::SetInt(op_desc_ptr, RECV_ATTR_EVENT_ID, event_id),
REPORT_INNER_ERROR("E19999", "Set Attr:%s for op:%s(%s) failed, event_id:%u, when %s",
RECV_ATTR_EVENT_ID.c_str(),
node->GetName().c_str(), node->GetType().c_str(), event_id, __FUNCTION__);
GELOGE(FAILED, "SetInt failed.");
return FAILED);
(void)AttrUtils::SetListStr(op_desc_ptr, ATTR_NAME_DATA_DUMP_ORIGIN_OP_NAMES,
std::move(std::vector<std::string>()));
@@ -1189,6 +1235,8 @@ Status StreamAllocator::InsertSyncEventNodes() {
GE_CHECK_NOTNULL(recv_node->GetOutControlAnchor());
Status status = GraphUtils::AddEdge(recv_node->GetOutControlAnchor(), node->GetInControlAnchor());
if (status != SUCCESS) {
REPORT_INNER_ERROR("E19999", "Add edge from node %s to node %s failed when %s",
recv_node->GetName().c_str(), node->GetName().c_str(), __FUNCTION__);
GELOGE(status, "Add edge for node %s and node %s failed.", recv_node->GetName().c_str(),
node->GetName().c_str());
return status;
@@ -1217,6 +1265,8 @@ Status StreamAllocator::InsertSyncEventNodes() {
GE_CHECK_NOTNULL(send_node->GetInControlAnchor());
Status status = GraphUtils::AddEdge(node->GetOutControlAnchor(), send_node->GetInControlAnchor());
if (status != SUCCESS) {
REPORT_INNER_ERROR("E19999", "Add edge from node %s to node %s failed when %s",
node->GetName().c_str(), send_node->GetName().c_str(), __FUNCTION__);
GELOGE(status, "Add edge for node %s and node %s failed.", node->GetName().c_str(),
send_node->GetName().c_str());
return status;
@@ -1228,6 +1278,8 @@ Status StreamAllocator::InsertSyncEventNodes() {

Status status = whole_graph_->InsertGraphEvents();
if (status != SUCCESS) {
REPORT_CALL_ERROR("E19999", "Insert Graph Events fail, graph:%s, when %s",
whole_graph_->GetName().c_str(), __FUNCTION__);
GELOGE(status, "Graph ReorderEventNodes failed");
return status;
}
@@ -1274,6 +1326,8 @@ Status StreamAllocator::GetMaxStreamAndTask(bool huge_stream, uint32_t &max_stre
}
rtError_t ret = rtGetMaxStreamAndTask(stream_type, &max_stream_count, &max_task_count);
if (ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "call rtGetMaxStreamAndTask fail, ret:%d, stream_type:%u, when %s",
static_cast<int>(ret), stream_type, __FUNCTION__);
GELOGE(FAILED, "Get max stream and task count by rts failed.");
return FAILED;
}
@@ -1416,6 +1470,7 @@ Status StreamAllocator::AddActiveNodes(NodePtr &switch_node, const vector<string
for (size_t i = 0; i < label_num; i++) {
const string &active_label = ori_active_label_list[i];
if (labeled_streams_.find(active_label) == labeled_streams_.end()) {
REPORT_INNER_ERROR("E19999", "can not find stream label:%s when %s", active_label.c_str(), __FUNCTION__);
GELOGE(FAILED, "can not find stream label %s", active_label.c_str());
return FAILED;
}
@@ -1442,11 +1497,15 @@ Status StreamAllocator::AddActiveNodes(NodePtr &switch_node, const vector<string
}
GE_CHECK_NOTNULL(switch_node->GetOutControlAnchor());
if (switch_node->GetOutControlAnchor()->Unlink(node->GetInControlAnchor()) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("Unlink %s to %s failed when %s",
switch_node->GetName().c_str(), node->GetName().c_str(), __FUNCTION__);
GELOGE(FAILED, "Unlink %s to %s failed.", switch_node->GetName().c_str(), node->GetName().c_str());
return FAILED;
}
GE_CHECK_NOTNULL(active_node->GetOutControlAnchor());
if (active_node->GetOutControlAnchor()->LinkTo(node->GetInControlAnchor()) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("Link %s to %s failed when %s",
active_node->GetName().c_str(), node->GetName().c_str(), __FUNCTION__);
GELOGE(FAILED, "Link %s to %s failed.", active_node->GetName().c_str(), node->GetName().c_str());
return FAILED;
}
@@ -1477,12 +1536,15 @@ Status StreamAllocator::AddActiveNodes(NodePtr &switch_node, const vector<string

Status StreamAllocator::SetActiveStreamList(NodePtr &active_node, const string &active_label) {
if (labeled_streams_.find(active_label) == labeled_streams_.end()) {
REPORT_INNER_ERROR("E19999", "Can not find stream label:%s when %s", active_label.c_str(), __FUNCTION__);
GELOGE(FAILED, "Can not find stream label %s.", active_label.c_str());
return FAILED;
}
set<int64_t> &streams = labeled_streams_[active_label];
vector<int64_t> active_streams(streams.begin(), streams.end());
if (!AttrUtils::SetListInt(active_node->GetOpDesc(), ATTR_NAME_ACTIVE_STREAM_LIST, active_streams)) {
REPORT_INNER_ERROR("E19999", "Set Attr:%s fail for op:%s(%s) when %s", ATTR_NAME_ACTIVE_STREAM_LIST.c_str(),
active_node->GetName().c_str(), active_node->GetType().c_str(), __FUNCTION__);
GELOGE(FAILED, "SetListInt of %s failed.", ATTR_NAME_ACTIVE_STREAM_LIST.c_str());
return FAILED;
}


+ 9
- 0
ge/graph/build/stream_graph_optimizer.cc View File

@@ -14,6 +14,9 @@
* limitations under the License.
*/
#include "stream_graph_optimizer.h"

#include <securec.h>

#include "common/util.h"
#include "framework/common/debug/ge_log.h"
#include "graph/utils/node_utils.h"
@@ -122,6 +125,9 @@ Status StreamGraphOptimizer::OptimizeStreamedSubGraph(const ComputeGraphPtr &com
GE_CHECK_NOTNULL(op_desc);
int64_t stream_id = op_desc->GetStreamId();
if (static_cast<size_t>(stream_id) >= run_context.graphStreamList.size()) {
REPORT_INNER_ERROR("E19999", "Check stream_id:%ld in op:%s(%s) is bigger than run_context.graphStreamList.size():%zu "
"when %s", stream_id, op_desc->GetName().c_str(),
op_desc->GetType().c_str(), run_context.graphStreamList.size(), __FUNCTION__);
GELOGE(FAILED, "stream_id %ld is bigger than run_context.graphStreamList.size() %zu", stream_id,
run_context.graphStreamList.size());
return FAILED;
@@ -135,6 +141,9 @@ Status StreamGraphOptimizer::OptimizeStreamedSubGraph(const ComputeGraphPtr &com
for (auto iter = graph_optimizers.begin(); iter != graph_optimizers.end(); ++iter) {
GE_CHECK_NOTNULL(*iter);
Status ret = (*iter)->OptimizeStreamGraph(*subgraph, run_context);
REPORT_CALL_ERROR("E19999", "Call optimize streamed subgraph failed, subgraph: %s, engine_name: %s, graph "
"Optimizer num: %zu, ret: %u", subgraph->GetName().c_str(), engine_name.c_str(),
graph_optimizers.size(), ret);
if (ret != SUCCESS) {
GELOGE(
ret,


+ 83
- 6
ge/graph/build/task_generator.cc View File

@@ -49,6 +49,7 @@ const char *const kIsLastNode = "is_last_node";
const char *const kIsInputVar = "INPUT_IS_VAR";
const char *const kIsOutputVar = "OUTPUT_IS_VAR";
const char *const kProfilingMode = "PROFILING_MODE";
const char *const kIteratorV2 = "IteratorV2";
const uint32_t kProfilingArStep = 2;
const uint64_t kProfilingFpStartLogid = 1;
const uint64_t kProfilingBpEndLogid = 2;
@@ -57,6 +58,7 @@ const uint64_t kProfilingArEndLogid = 4;
const uint64_t kProfilingIterEndLogid = 65535;
const int64_t kHashFactor = 100000;
const int64_t kInvalidGroupId = -1;
const std::set<std::string> kFpNodeTypes = {ge::DATA, ge::GETNEXT, kIteratorV2};
} // namespace
namespace ge {
TaskGenerator::TaskGenerator(uint8_t *var_mem_base, uint64_t var_mem_size) {
@@ -69,6 +71,7 @@ Status TaskGenerator::GetTaskInfo(Model &model, ComputeGraphPtr &graph, uint64_t
GELOGD("Begin to Get TaskInfo. session_id=%lu", session_id);
// Check params
if (graph == nullptr) {
REPORT_INNER_ERROR("E19999", "Check param graph is null, session_id:%lu, when %s", session_id, __FUNCTION__);
GELOGE(PARAM_INVALID, "GetTaskInfo param graph is null. session_id=%lu", session_id);
return PARAM_INVALID;
}
@@ -93,6 +96,8 @@ Status TaskGenerator::GetTaskInfo(Model &model, ComputeGraphPtr &graph, uint64_t
op_name.push_back(iter.second);
}
GE_CHK_BOOL_EXEC(ge::AttrUtils::SetListStr(model, ATTR_MODEL_TASK_INDEX_OP_NAME, op_name),
REPORT_INNER_ERROR("E19999", "Set Attr:%s fail for model:%s when %s",
ATTR_MODEL_TASK_INDEX_OP_NAME.c_str(), model.GetName().c_str(), __FUNCTION__);
GELOGE(FAILED, "SetListStr failed.");
return FAILED);

@@ -106,6 +111,8 @@ Status TaskGenerator::GetTaskInfo(Model &model, ComputeGraphPtr &graph, uint64_t
for (const TaskDef &task_def_temp : task_def_list) {
TaskDef *task_def = model_task_def.add_task();
if (task_def == nullptr) {
REPORT_INNER_ERROR("E19999", "Add task_def in ModelTaskDef fail, session_id:%lu, graph:%s, model:%s, when %s",
session_id, graph->GetName().c_str(), model.GetName().c_str(), __FUNCTION__);
GELOGE(FAILED, "task_def is nullptr.");
return FAILED;
}
@@ -126,30 +133,44 @@ Status TaskGenerator::AddModelTaskToModel(const ModelTaskDef &model_task_def, ui
RunContext &run_context) {
GE_CHK_BOOL_EXEC(
AttrUtils::SetInt(model, MODEL_ATTR_TASK_GEN_BASE_ADDR, reinterpret_cast<uintptr_t>(run_context.dataMemBase)),
REPORT_INNER_ERROR("E19999", "Set Attr:%s fail for model:%s when %s",
MODEL_ATTR_TASK_GEN_BASE_ADDR.c_str(), model.GetName().c_str(), __FUNCTION__);
GELOGE(FAILED, "SetInt MODEL_ATTR_TASK_GEN_BASE_ADDR failed.");
return FAILED);
GE_CHK_BOOL_EXEC(
AttrUtils::SetInt(model, MODEL_ATTR_TASK_GEN_WEIGHT_ADDR, reinterpret_cast<uintptr_t>(run_context.weightMemBase)),
REPORT_INNER_ERROR("E19999", "Set Attr:%s fail for model:%s when %s",
MODEL_ATTR_TASK_GEN_WEIGHT_ADDR.c_str(), model.GetName().c_str(), __FUNCTION__);
GELOGE(FAILED, "SetInt MODEL_ATTR_TASK_GEN_WEIGHT_ADDR failed.");
return FAILED);
GE_CHK_BOOL_EXEC(AttrUtils::SetInt(model, ATTR_MODEL_TASK_GEN_VAR_ADDR, reinterpret_cast<uintptr_t>(var_mem_base_)),
REPORT_INNER_ERROR("E19999", "Set Attr:%s fail for model:%s when %s",
ATTR_MODEL_TASK_GEN_VAR_ADDR.c_str(), model.GetName().c_str(), __FUNCTION__);
GELOGE(FAILED, "SetInt ATTR_MODEL_TASK_GEN_VAR_ADDR failed.");
return FAILED);
GE_CHK_BOOL_EXEC(AttrUtils::SetInt(model, ATTR_MODEL_VAR_SIZE, var_mem_size_),
REPORT_INNER_ERROR("E19999", "Set Attr:%s fail for model:%s when %s",
ATTR_MODEL_VAR_SIZE.c_str(), model.GetName().c_str(), __FUNCTION__);
GELOGE(FAILED, "SetInt ATTR_MODEL_VAR_SIZE failed.");
return FAILED);
GE_CHK_BOOL_EXEC(AttrUtils::SetInt(model, MODEL_ATTR_SESSION_ID, session_id),
REPORT_INNER_ERROR("E19999", "Set Attr:%s fail for mode:%s when %s",
MODEL_ATTR_SESSION_ID.c_str(), model.GetName().c_str(), __FUNCTION__);
GELOGE(FAILED, "SetInt MODEL_ATTR_SESSION_ID failed.");
return FAILED);

size_t task_size = model_task_def.ByteSizeLong();
ge::Buffer serial_buff(task_size);
if (!model_task_def.SerializePartialToArray(serial_buff.GetData(), static_cast<int>(task_size))) {
REPORT_INNER_ERROR("E19999", "model_task_def's serialize failed, model name = %s, task_size=%zu when %s",
model.GetName().c_str(), task_size, __FUNCTION__);
GELOGE(FAILED, "model_task_def's serialize failed, model name = %s, task_size=%zu.", model.GetName().c_str(),
task_size);
return FAILED;
}
if (!AttrUtils::SetZeroCopyBytes(model, MODEL_ATTR_TASKS, std::move(serial_buff))) {
REPORT_INNER_ERROR("E19999", "Set model task to model failed, model name = %s, task_size=%zu when %s",
model.GetName().c_str(), task_size, __FUNCTION__);
GELOGE(FAILED, "Set model task to model failed, model name = %s, task_size=%zu.", model.GetName().c_str(),
task_size);
return FAILED;
@@ -167,7 +188,10 @@ Status TaskGenerator::UpdateOpIsVarAttr(const OpDescPtr &op_desc, uint64_t sessi
for (int64_t input : input_offsets) {
input_var.push_back(VarManager::Instance(session_id)->IsVarAddr(input));
}
GE_CHK_BOOL_EXEC(AttrUtils::SetListBool(op_desc, kIsInputVar, input_var), GELOGE(FAILED, "SetListBool failed.");
GE_CHK_BOOL_EXEC(AttrUtils::SetListBool(op_desc, kIsInputVar, input_var),
REPORT_INNER_ERROR("E19999", "Set Attr:%s fail for op:%s(%s) when %s", kIsInputVar,
op_desc->GetName().c_str(), op_desc->GetType().c_str(), __FUNCTION__);
GELOGE(FAILED, "SetListBool failed.");
return FAILED);
}

@@ -177,7 +201,10 @@ Status TaskGenerator::UpdateOpIsVarAttr(const OpDescPtr &op_desc, uint64_t sessi
for (int64_t output : output_offsets) {
output_var.push_back(VarManager::Instance(session_id)->IsVarAddr(output));
}
GE_CHK_BOOL_EXEC(AttrUtils::SetListBool(op_desc, kIsOutputVar, output_var), GELOGE(FAILED, "SetListBool failed.");
GE_CHK_BOOL_EXEC(AttrUtils::SetListBool(op_desc, kIsOutputVar, output_var),
REPORT_INNER_ERROR("E19999", "Set Attr:%s fail for op:%s(%s) when %s", kIsOutputVar,
op_desc->GetName().c_str(), op_desc->GetType().c_str(), __FUNCTION__);
GELOGE(FAILED, "SetListBool failed.");
return FAILED);
}
return SUCCESS;
@@ -252,6 +279,7 @@ Status TaskGenerator::GenerateTask(RunContext &run_context, ComputeGraphPtr &gra
GELOGD("Beign to generate task, graph name is %s.", graph->GetName().c_str());
std::shared_ptr<GELib> ge_lib = GELib::GetInstance();
if ((ge_lib == nullptr) || !ge_lib->InitFlag()) {
REPORT_INNER_ERROR("E19999", "Check GELib instance not init before when %s", __FUNCTION__);
GELOGE(GE_CLI_GE_NOT_INITIALIZED, "GenerateTask failed.");
return GE_CLI_GE_NOT_INITIALIZED;
}
@@ -319,6 +347,8 @@ Status TaskGenerator::GenerateTask(RunContext &run_context, ComputeGraphPtr &gra
}
auto kernel_info_store = ops_kernel_manager.GetOpsKernelInfoStore(op_kernel_lib_name);
if (kernel_info_store == nullptr) {
REPORT_INNER_ERROR("E19999", "Get ops kernel info store failed for op:%s(%s), op_kernel_name:%s when %s",
node->GetName().c_str(), node->GetType().c_str(), op_kernel_lib_name.c_str(), __FUNCTION__);
GELOGE(INTERNAL_ERROR,
"No ops kernel store or ops kernel builder found. node:%s(%s), op_kernel_lib_name=%s.",
name.c_str(),
@@ -344,6 +374,8 @@ Status TaskGenerator::GenerateTask(RunContext &run_context, ComputeGraphPtr &gra
auto ret = OpsKernelBuilderManager::Instance().GenerateTask(*node, run_context, task_def_list);
GE_TIMESTAMP_ADD(GenerateTask);
if (ret != SUCCESS) {
REPORT_CALL_ERROR("E19999", "Call OpsKernelBuilderManager GenerateTask fail for op:%s(%s) when %s",
node->GetName().c_str(), node->GetType().c_str(), __FUNCTION__);
GELOGE(ret, "Call %s to generate node[name:%s(%s), id:%ld, stream_id:%ld] task failed.",
op_kernel_lib_name.c_str(), name.c_str(), type.c_str(), op_id, stream_id);
return ret;
@@ -353,6 +385,9 @@ Status TaskGenerator::GenerateTask(RunContext &run_context, ComputeGraphPtr &gra
size_t task_list_size_after = task_def_list.size();
// If tasks is reduced
if (task_list_size_after < task_list_size_before) {
REPORT_INNER_ERROR("E19999", "Call %s to generate node[name:%s(%s), id:%ld, stream_id:%ld] task "
"but task num from %zu to %zu, check invalid", op_kernel_lib_name.c_str(), name.c_str(),
type.c_str(), op_id, stream_id, task_list_size_before, task_list_size_after);
GELOGE(FAILED, "Call %s to generate node[name:%s(%s), id:%ld, stream_id:%ld] task. but task num from %zu to %zu.",
op_kernel_lib_name.c_str(), name.c_str(), type.c_str(), op_id, stream_id, task_list_size_before,
task_list_size_after);
@@ -417,6 +452,9 @@ Status TaskGenerator::GenerateTaskForFusionNode(FusionTaskInfo &fusion_task_info
size_t task_list_size_before = task_def_list.size();
OpsKernelInfoStorePtr kernel_info_store = ops_kernel_manager.GetOpsKernelInfoStore(op_kernel_lib_name);
if (kernel_info_store == nullptr) {
REPORT_INNER_ERROR("E19999", "Get ops kernel info store failed for op:%s(%s), op_kernel_name:%s when %s",
op_desc->GetName().c_str(), op_desc->GetType().c_str(),
op_kernel_lib_name.c_str(), __FUNCTION__);
GELOGE(INTERNAL_ERROR,
"Fusion: No ops kernel store or ops kernel builder found. fusion_node:%s(%s), op_kernel_lib_name=%s.",
fusion_node_name.c_str(), fusion_node_type.c_str(), op_kernel_lib_name.c_str());
@@ -433,6 +471,9 @@ Status TaskGenerator::GenerateTaskForFusionNode(FusionTaskInfo &fusion_task_info
int64_t op_id = op_desc->GetId();
int64_t stream_id = op_desc->GetStreamId();
if (stream_id < 0 || stream_id >= (int64_t)run_context.graphStreamList.size()) {
REPORT_INNER_ERROR("E19999", "Fusion: fusion_node[name:%s(%s), id:%ld] stream id is invalid, "
"stream list size=%zu, when %s", fusion_node_name.c_str(), fusion_node_type.c_str(),
op_id, run_context.graphStreamList.size(), __FUNCTION__);
GELOGE(INTERNAL_ERROR, "Fusion: fusion_node[name:%s(%s), id:%ld] stream id is invalid, stream list size=%zu",
fusion_node_name.c_str(), fusion_node_type.c_str(), op_id, run_context.graphStreamList.size());
return INTERNAL_ERROR;
@@ -444,6 +485,9 @@ Status TaskGenerator::GenerateTaskForFusionNode(FusionTaskInfo &fusion_task_info
op_kernel_lib_name.c_str(), fusion_node_name.c_str(), fusion_node_type.c_str(), op_id, stream_id);
ret = OpsKernelBuilderManager::Instance().GenerateTask(*fusion_node, run_context, task_def_list);
if (ret != SUCCESS) {
REPORT_CALL_ERROR("E19999", " Call %s to generate fusion_node:[fusion_node_name:%s(%s), "
"id:%ld, stream_id:%ld] task failed when %s", op_kernel_lib_name.c_str(),
fusion_node_name.c_str(), fusion_node_type.c_str(), op_id, stream_id, __FUNCTION__);
GELOGE(ret,
"Fusion: Call %s to generate fusion_node:[fusion_node_name:%s(%s), "
"id:%ld, stream_id:%ld] task failed.",
@@ -455,6 +499,10 @@ Status TaskGenerator::GenerateTaskForFusionNode(FusionTaskInfo &fusion_task_info
size_t task_list_size_after = task_def_list.size();
// if tasks is reduced
if (task_list_size_after < task_list_size_before) {
REPORT_INNER_ERROR("E19999", "InsertProfilingTask for fusion_node:[fusion_node_name:%s(%s), kernel_name:%s"
"id:%ld, stream_id:%ld] task, but task num from %zu to %zu, check invalid when %s",
fusion_node_name.c_str(), fusion_node_type.c_str(), op_kernel_lib_name.c_str(),
op_id, stream_id, task_list_size_before, task_list_size_after, __FUNCTION__);
GELOGE(FAILED,
"Fusion: Call %s to generate fusion_node:[fusion_node_name:%s(%s), "
"id:%ld, stream_id:%ld] task. but task num from %zu to %zu.",
@@ -489,6 +537,8 @@ Status TaskGenerator::GenerateTaskForFusionNode(FusionTaskInfo &fusion_task_info

Status TaskGenerator::UpdateAnchorStatus(const NodePtr &node) {
if (NodeUtils::SetAllAnchorStatus(node) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "SetAllAnchorStatus fail for op:%s(%s) when %s",
node->GetName().c_str(), node->GetType().c_str(), __FUNCTION__);
GELOGE(INTERNAL_ERROR, "NodeUtils::SetAllAnchorStatus failed.");
return INTERNAL_ERROR;
}
@@ -496,6 +546,8 @@ Status TaskGenerator::UpdateAnchorStatus(const NodePtr &node) {
auto peer_anchor = anchor->GetPeerOutAnchor();
if (peer_anchor == nullptr) {
if (AnchorUtils::SetStatus(anchor, ANCHOR_SUSPEND) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Set in peer anchor status fail for op:%s(%s), anchor_index:%d, when %s",
node->GetName().c_str(), node->GetType().c_str(), anchor->GetIdx(), __FUNCTION__);
GELOGE(INTERNAL_ERROR, "AnchorUtils::SetStatus failed.");
return INTERNAL_ERROR;
}
@@ -506,11 +558,15 @@ Status TaskGenerator::UpdateAnchorStatus(const NodePtr &node) {
bool is_const = NodeUtils::GetConstOpType(peer_anchor->GetOwnerNode(), const_type);
if (is_const && (const_type == CONSTANT)) {
if (AnchorUtils::SetStatus(anchor, ANCHOR_CONST) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Set in anchor CONST status fail for op:%s(%s), anchor_index:%d, when %s",
node->GetName().c_str(), node->GetType().c_str(), anchor->GetIdx(), __FUNCTION__);
GELOGE(INTERNAL_ERROR, "AnchorUtils::SetStatus failed.");
return INTERNAL_ERROR;
}
} else {
if (AnchorUtils::SetStatus(anchor, ANCHOR_DATA) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Set in anchor DATA status fail for op:%s(%s), anchor_index:%d, when %s",
node->GetName().c_str(), node->GetType().c_str(), anchor->GetIdx(), __FUNCTION__);
GELOGE(INTERNAL_ERROR, "AnchorUtils::SetStatus failed.");
return INTERNAL_ERROR;
}
@@ -523,12 +579,15 @@ Status TaskGenerator::UpdateAnchorStatus(const NodePtr &node) {
Status TaskGenerator::MarkNodeAndSetIndex(ComputeGraphPtr &graph) {
auto ge_lib = GELib::GetInstance();
if ((ge_lib == nullptr) || !ge_lib->InitFlag()) {
REPORT_INNER_ERROR("E19999", "Check GELib instance not init before when %s", __FUNCTION__);
GELOGE(GE_CLI_GE_NOT_INITIALIZED, "GE is not initialized or is finalized.");
return GE_CLI_GE_NOT_INITIALIZED;
}

const auto all_nodes = graph->GetNodes(graph->GetGraphUnknownFlag());
if (all_nodes.empty()) {
REPORT_INNER_ERROR("E19999", "Check param all_nodes empty in graph:%s when %s",
graph->GetName().c_str(), __FUNCTION__);
GELOGE(GE_GRAPH_GRAPH_NODE_NULL, "Graph's node is empty");
return GE_GRAPH_GRAPH_NODE_NULL;
}
@@ -584,6 +643,9 @@ Status TaskGenerator::MarkFirstAndLastOps(const vector<OpDescPtr> &ops, bool is_
for (auto &op_desc : continuous_ops) {
string op_kernel_lib_name = op_desc->GetOpKernelLibName();
if (op_kernel_lib_name.empty()) {
REPORT_INNER_ERROR("E19999", "Get ops kernel info store failed for op:%s(%s), op_kernel_name:%s when %s",
op_desc->GetName().c_str(), op_desc->GetType().c_str(),
op_kernel_lib_name.c_str(), __FUNCTION__);
GELOGE(INTERNAL_ERROR, "node:%s(%s) get op kernel lib failed.", op_desc->GetName().c_str(),
op_desc->GetType().c_str());
return INTERNAL_ERROR;
@@ -599,9 +661,17 @@ Status TaskGenerator::MarkFirstAndLastOps(const vector<OpDescPtr> &ops, bool is_

for (auto &it : first_and_last_ops) {
auto &op_pair = it.second;
GE_CHK_BOOL_EXEC(ge::AttrUtils::SetBool(op_pair.first, kIsFirstNode, true), GELOGE(FAILED, "SetBool failed.");
GE_CHK_BOOL_EXEC(ge::AttrUtils::SetBool(op_pair.first, kIsFirstNode, true),
REPORT_INNER_ERROR("E19999", "Set Attr:%s fail for op:%s(%s) when %s", kIsFirstNode,
op_pair.first->GetName().c_str(), op_pair.first->GetType().c_str(),
__FUNCTION__);
GELOGE(FAILED, "SetBool failed.");
return FAILED);
GE_CHK_BOOL_EXEC(ge::AttrUtils::SetBool(op_pair.second, kIsLastNode, true), GELOGE(FAILED, "SetBool failed.");
GE_CHK_BOOL_EXEC(ge::AttrUtils::SetBool(op_pair.second, kIsLastNode, true),
REPORT_INNER_ERROR("E19999", "Set Attr:%s fail for op:%s(%s) when %s", kIsLastNode,
op_pair.second->GetName().c_str(), op_pair.second->GetType().c_str(),
__FUNCTION__);
GELOGE(FAILED, "SetBool failed.");
return FAILED);
}
}
@@ -621,8 +691,10 @@ Status TaskGenerator::AutoFindFpOpIndex(const ComputeGraphPtr &graph, ProfilingP
if (op_kernel_lib_name.empty()) {
continue;
}

if (op_desc->GetType() == GETNEXT || op_desc->GetType() == DATA) {
auto type = op_desc->GetType();
std::string original_type;
(void)AttrUtils::GetStr(op_desc, ATTR_NAME_FRAMEWORK_ORIGINAL_TYPE, original_type);
if (kFpNodeTypes.find(type) != kFpNodeTypes.end() || kFpNodeTypes.find(original_type) != kFpNodeTypes.end()) {
auto out_anchor = node->GetOutDataAnchor(0);
for (auto &peer_in_anchor : out_anchor->GetPeerInDataAnchors()) {
GE_CHECK_NOTNULL(peer_in_anchor);
@@ -906,6 +978,8 @@ Status TaskGenerator::InsertProfilingArTaskBefore(const OpDescPtr &op_desc, std:
for (size_t i = 0; i < all_reduce_nodes.size(); i++) {
if (all_reduce_nodes[i] == node_index) {
GE_IF_BOOL_EXEC(TypeUtils::CheckUint64MulOverflow(i, kProfilingArStep),
REPORT_INNER_ERROR("E19999", "Multiply result is out of range when calc profiling ar log id "
"for node:%s(%s)", op_desc->GetName().c_str(), op_desc->GetType().c_str());
GELOGE(FAILED, "Multiply result is out of range.");
return FAILED);
ar_log_id = i * kProfilingArStep + kProfilingArStartLogid;
@@ -998,6 +1072,8 @@ Status TaskGenerator::InsertProfilingArTaskAfter(const OpDescPtr &op_desc, std::
for (size_t i = 0; i < all_reduce_nodes.size(); i++) {
if (all_reduce_nodes[i] == node_index) {
GE_IF_BOOL_EXEC(TypeUtils::CheckUint64MulOverflow(i, kProfilingArStep),
REPORT_INNER_ERROR("E19999", "Multiply result is out of range when calc profiling ar log id "
"for node:%s(%s)", op_desc->GetName().c_str(), op_desc->GetType().c_str());
GELOGE(FAILED, "Multiply result is out of range.");
return FAILED);
ar_log_id = i * kProfilingArStep + kProfilingArEndLogid;
@@ -1107,6 +1183,7 @@ Status TaskGenerator::SetUnknownShapeStream(RunContext &run_context, rtStream_t
run_context.stream = stream;
rtError_t rt_ret = rtModelBindStream(run_context.model, stream, 0);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtModelBindStream fail, ret:0x%X when %s", rt_ret, __FUNCTION__);
GELOGE(FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
GE_CHK_RT_RET(rtStreamDestroy(stream));
return FAILED;


+ 2
- 0
ge/graph/common/bcast.cc View File

@@ -73,6 +73,8 @@ Status BCast::SetShapeDifferentInfo(const kVecInt &x, const kVecInt &y) {
y_bcast_i = x_i;
grad_y_reduce_idx_.push_back(n - 1 - i);
} else {
REPORT_INNER_ERROR("E19999", "SetShapeDifferentInfo failed. Two tensor shapes are not compatible "
"according to the broadcasting rule.");
GELOGE(domi::PARAM_INVALID,
"SetShapeDifferentInfo failed. Two tensor shapes are not compatible "
"according to the broadcasting rule.");


+ 7
- 0
ge/graph/common/bcast.h View File

@@ -111,11 +111,14 @@ class BCast {
const std::function<OutT(InT const &, InT const &)> &func) {
Status ret;
if (func == nullptr) {
REPORT_INNER_ERROR("E19999", "Check param func nullptr when %s", __FUNCTION__);
GELOGE(domi::PARAM_INVALID, "Param func is null");
return domi::PARAM_INVALID;
}
// Min input num is 2
if (input.size() < kMinDimNum) {
REPORT_INNER_ERROR("E19999", "Param input.size():%zu < %zu, check invalid when %s",
input.size(), kMinDimNum, __FUNCTION__);
GELOGE(domi::PARAM_INVALID, "Input size is smaller than two.");
return domi::PARAM_INVALID;
}
@@ -149,11 +152,14 @@ class BCast {
Status BCastComputeCheck(const std::vector<ConstGeTensorPtr> &input, std::vector<OutT> &v_output,
const std::function<OutT(InT const &, InT const &, DataType &type, Status &)> &func) {
if (func == nullptr) {
REPORT_INNER_ERROR("E19999", "Check param func nullptr when %s", __FUNCTION__);
GELOGE(PARAM_INVALID, "Param func is null");
return PARAM_INVALID;
}
// Min input num is 2
if (input.size() < kMinDimNum) {
REPORT_INNER_ERROR("E19999", "Param input.size():%zu < %zu, check invalid when %s",
input.size(), kMinDimNum, __FUNCTION__);
GELOGE(PARAM_INVALID, "Input size is smaller than two.");
return PARAM_INVALID;
}
@@ -179,6 +185,7 @@ class BCast {
auto value = func((*(reinterpret_cast<const InT *>(x1_data) + x_index)),
(*(reinterpret_cast<const InT *>(x2_data) + y_index)), data_type, ret);
if (ret != SUCCESS) {
REPORT_INNER_ERROR("E19999", "BCastComputeCheck func execute failed, datatype is %d.", data_type);
GELOGE(ret, "BCastComputeCheck func execute failed, datatype is %d.", data_type);
return ret;
}


+ 18
- 0
ge/graph/common/omg_util.cc View File

@@ -36,6 +36,8 @@ Status GetOriginalType(const ge::NodePtr &node, string &type) {
GE_CHECK_NOTNULL(node->GetOpDesc());
bool ret = ge::AttrUtils::GetStr(node->GetOpDesc(), ATTR_NAME_FRAMEWORK_ORIGINAL_TYPE, type);
if (!ret) {
REPORT_INNER_ERROR("E19999", "Get Attr:%s fail for op:%s(%s) when %s", ATTR_NAME_FRAMEWORK_ORIGINAL_TYPE.c_str(),
node->GetName().c_str(), node->GetType().c_str(), __FUNCTION__);
GELOGE(INTERNAL_ERROR, "Get FrameWorkOp original type [%s]", type.c_str());
return INTERNAL_ERROR;
}
@@ -55,6 +57,8 @@ Status SetStreamLabel(const ge::NodePtr &node, const std::string &label) {
GE_CHECK_NOTNULL(tmp_desc);

if (!AttrUtils::SetStr(tmp_desc, ge::ATTR_NAME_STREAM_LABEL, label)) {
REPORT_INNER_ERROR("E19999", "Set Attr:%s fail for op:%s(%s) when %s", ATTR_NAME_STREAM_LABEL.c_str(),
node->GetName().c_str(), node->GetType().c_str(), __FUNCTION__);
GELOGE(FAILED, "Op: %s set ATTR_NAME_STREAM_LABEL failed", node->GetName().c_str());
return FAILED;
}
@@ -72,6 +76,8 @@ Status SetCycleEvent(const ge::NodePtr &node) {
OpDescPtr tmp_desc = node->GetOpDesc();
GE_CHECK_NOTNULL(tmp_desc);
if (!AttrUtils::SetBool(tmp_desc, ge::ATTR_NAME_STREAM_CYCLE_EVENT_FLAG, true)) {
REPORT_INNER_ERROR("E19999", "Set Attr:%s fail for op:%s(%s) when %s", ATTR_NAME_STREAM_CYCLE_EVENT_FLAG.c_str(),
node->GetName().c_str(), node->GetType().c_str(), __FUNCTION__);
GELOGE(FAILED, "Op: %s set ATTR_NAME_STREAM_CYCLE_EVENT_FLAG failed", node->GetName().c_str());
return FAILED;
}
@@ -90,6 +96,8 @@ Status SetActiveLabelList(const ge::NodePtr &node, const std::vector<std::string
OpDescPtr tmp_desc = node->GetOpDesc();
GE_CHECK_NOTNULL(tmp_desc);
if (!AttrUtils::SetListStr(tmp_desc, ge::ATTR_NAME_ACTIVE_LABEL_LIST, active_label_list)) {
REPORT_INNER_ERROR("E19999", "Set Attr:%s fail for op:%s(%s) when %s", ATTR_NAME_ACTIVE_LABEL_LIST.c_str(),
node->GetName().c_str(), node->GetType().c_str(), __FUNCTION__);
GELOGE(FAILED, "Op: %s set ATTR_NAME_ACTIVE_LABEL_LIST failed", node->GetName().c_str());
return FAILED;
}
@@ -108,6 +116,8 @@ Status SetSwitchBranchNodeLabel(const ge::NodePtr &node, const std::string &bran
OpDescPtr tmp_desc = node->GetOpDesc();
GE_CHECK_NOTNULL(tmp_desc);
if (!AttrUtils::SetStr(tmp_desc, ge::ATTR_NAME_SWITCH_BRANCH_NODE_LABEL, branch_label)) {
REPORT_INNER_ERROR("E19999", "Set Attr:%s fail for op:%s(%s) when %s", ATTR_NAME_SWITCH_BRANCH_NODE_LABEL.c_str(),
node->GetName().c_str(), node->GetType().c_str(), __FUNCTION__);
GELOGE(FAILED, "Op: %s set ATTR_NAME_SWITCH_BRANCH_NODE_LABEL failed", node->GetName().c_str());
return FAILED;
}
@@ -126,6 +136,8 @@ Status SetSwitchTrueBranchFlag(const ge::NodePtr &node, bool value) {
OpDescPtr tmp_desc = node->GetOpDesc();
GE_CHECK_NOTNULL(tmp_desc);
if (!AttrUtils::SetBool(tmp_desc, ge::ATTR_NAME_SWITCH_TRUE_BRANCH_FLAG, value)) {
REPORT_INNER_ERROR("E19999", "Set Attr:%s fail for op:%s(%s) when %s", ATTR_NAME_SWITCH_TRUE_BRANCH_FLAG.c_str(),
node->GetName().c_str(), node->GetType().c_str(), __FUNCTION__);
GELOGE(FAILED, "Op: %s set ATTR_NAME_SWITCH_TRUE_BRANCH_FLAG failed", node->GetName().c_str());
return FAILED;
}
@@ -144,6 +156,8 @@ Status SetOriginalNodeName(const ge::NodePtr &node, const std::string &orig_name
OpDescPtr tmp_desc = node->GetOpDesc();
GE_CHECK_NOTNULL(tmp_desc);
if (!AttrUtils::SetStr(tmp_desc, ge::ATTR_NAME_ORIG_NODE_NAME, orig_name)) {
REPORT_INNER_ERROR("E19999", "Set Attr:%s fail for op:%s(%s) when %s", ATTR_NAME_ORIG_NODE_NAME.c_str(),
node->GetName().c_str(), node->GetType().c_str(), __FUNCTION__);
GELOGE(FAILED, "Op: %s set ATTR_NAME_ORIG_NODE_NAME failed", node->GetName().c_str());
return FAILED;
}
@@ -161,6 +175,8 @@ Status SetCyclicDependenceFlag(const ge::NodePtr &node) {
OpDescPtr tmp_desc = node->GetOpDesc();
GE_CHECK_NOTNULL(tmp_desc);
if (!AttrUtils::SetBool(tmp_desc, ge::ATTR_NAME_CYCLIC_DEPENDENCE_FLAG, true)) {
REPORT_INNER_ERROR("E19999", "Set Attr:%s fail for op:%s(%s) when %s", ATTR_NAME_CYCLIC_DEPENDENCE_FLAG.c_str(),
node->GetName().c_str(), node->GetType().c_str(), __FUNCTION__);
GELOGE(FAILED, "Op: %s set ATTR_NAME_CYCLIC_DEPENDENCE_FLAG failed", node->GetName().c_str());
return FAILED;
}
@@ -180,6 +196,8 @@ Status SetNextIteration(const ge::NodePtr &node, const std::string &next) {
GE_CHECK_NOTNULL(tmp_desc);

if (!AttrUtils::SetStr(tmp_desc, ge::ATTR_NAME_NEXT_ITERATION, next)) {
REPORT_INNER_ERROR("E19999", "Set Attr:%s fail for op:%s(%s) when %s", ATTR_NAME_NEXT_ITERATION.c_str(),
node->GetName().c_str(), node->GetType().c_str(), __FUNCTION__);
GELOGE(FAILED, "Op: %s set ATTR_NAME_NEXT_ITERATION failed", node->GetName().c_str());
return FAILED;
}


+ 30
- 0
ge/graph/execute/graph_execute.cc View File

@@ -40,6 +40,7 @@ GraphExecutor::~GraphExecutor() {
rtError_t rt_ret;
rt_ret = rtFreeHost(buffer_addr);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtFreeHost fail, ret:0x%X when %s", rt_ret, __FUNCTION__);
GELOGE(RT_FAILED, "[GraphManager] subgraph free buffer failed, ret: 0x%X", rt_ret);
}
}
@@ -51,14 +52,17 @@ GraphExecutor::~GraphExecutor() {
Status GraphExecutor::SetCondition(std::mutex *mutex, std::condition_variable *cond,
std::shared_ptr<GraphModelListener> listener) {
if (mutex == nullptr) {
REPORT_INNER_ERROR("E19999", "Check param mutex nullptr when %s", __FUNCTION__);
GELOGE(GE_GRAPH_PARAM_NULLPTR, "[SetCondition] input param mutex is nullptr.");
return GE_GRAPH_PARAM_NULLPTR;
}
if (cond == nullptr) {
REPORT_INNER_ERROR("E19999", "Check param cond nullptr when %s", __FUNCTION__);
GELOGE(GE_GRAPH_PARAM_NULLPTR, "[SetCondition] input param cond is nullptr.");
return GE_GRAPH_PARAM_NULLPTR;
}
if (listener == nullptr) {
REPORT_INNER_ERROR("E19999", "Check param listener nullptr when %s", __FUNCTION__);
GELOGE(GE_GRAPH_PARAM_NULLPTR, "[SetCondition] input param listener is nullptr.");
return GE_GRAPH_PARAM_NULLPTR;
}
@@ -75,6 +79,7 @@ Status GraphExecutor::SetCondition(std::mutex *mutex, std::condition_variable *c

Status GraphExecutor::SetGraphContext(GraphContextPtr graph_context_ptr) {
if (graph_context_ptr == nullptr) {
REPORT_INNER_ERROR("E19999", "Check param graph_context_ptr nullptr when %s", __FUNCTION__);
GELOGE(GE_GRAPH_PARAM_NULLPTR, "[SetGraphContext] input param graph_context_ptr is nullptr");
return GE_GRAPH_PARAM_NULLPTR;
}
@@ -101,6 +106,7 @@ Status GraphExecutor::FreeInOutBuffer() {
rtError_t rt_ret;
rt_ret = rtFreeHost(*iter);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtFreeHost fail, ret:0x%X when %s", rt_ret, __FUNCTION__);
GELOGE(RT_FAILED, "[GraphManager] subgraph free buffer failed, ret: 0x%X", rt_ret);
(void)buffer_addr_.erase(buffer_addr_.begin(), iter);
return GE_GRAPH_FREE_FAILED;
@@ -146,6 +152,8 @@ Status GraphExecutor::MallocInOutBuffer(const std::vector<uint64_t> &buffer_size
void *tmp_buf = nullptr;
rt_ret = rtMallocHost(&tmp_buf, buffer_size[i]);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMallocHost fail, size:%lu, ret:0x%X when %s",
buffer_size[i], rt_ret, __FUNCTION__);
GELOGE(RT_FAILED, "[GraphManager] subgraph malloc buffer failed, ret: 0x%X", rt_ret);
return GE_GRAPH_MALLOC_FAILED;
}
@@ -191,6 +199,8 @@ Status GraphExecutor::PrepareInputData(const std::vector<GeTensor> &input_tensor
rtError_t rt_ret = rtMemcpy(addrVec[i], bufferSizeVec[i], in_tensor->GetData().data(),
in_tensor->GetData().size(), RT_MEMCPY_HOST_TO_HOST);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMemcpy fail, dst_size:%lu, src_size:%zu, ret:0x%X when %s",
bufferSizeVec[i], in_tensor->GetData().size(), rt_ret, __FUNCTION__);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
return RT_FAILED;
}
@@ -250,6 +260,8 @@ Status GraphExecutor::SyncExecuteModel(uint32_t model_id, const std::vector<GeTe
}

if (graph_run_listener_->ResetResult() != SUCCESS) {
REPORT_CALL_ERROR("E19999", "Call graph_run_listener_.ResetResult fail, model_id:%u, when %s",
model_id, __FUNCTION__);
GELOGE(GE_GRAPH_EXECUTE_FAILED, "Reset result failed");
return GE_GRAPH_EXECUTE_FAILED;
}
@@ -273,6 +285,8 @@ Status GraphExecutor::SyncExecuteModel(uint32_t model_id, const std::vector<GeTe
// Run graph return
uint32_t result_code = graph_run_listener_->GetResultCode();
if (result_code != SUCCESS && result_code != END_OF_SEQUENCE) {
REPORT_CALL_ERROR("E19999", "Graph_run_listener_ run fail, result:%u, model_id:%u, when %s",
result_code, model_id, __FUNCTION__);
GELOGE(GE_GRAPH_EXECUTE_FAILED, "[GraphExecutor] execute model failed, ret=%u, modelId=%u.", result_code,
model_id);
return GE_GRAPH_EXECUTE_FAILED;
@@ -281,10 +295,14 @@ Status GraphExecutor::SyncExecuteModel(uint32_t model_id, const std::vector<GeTe
for (size_t i = 0; i < output_data.blobs.size(); ++i) {
DataBuffer outputDataTmp = output_data.blobs[i];
CHECK_FALSE_EXEC(outputDataTmp.length != 0,
REPORT_INNER_ERROR("E19999", "Param output_data.length is 0 in model:%u, check invalid, when %s",
model_id, __FUNCTION__);
GELOGE(GE_GRAPH_EXECUTE_FAILED, "Failed to allocate memory, length is 0.");
return GE_GRAPH_EXECUTE_FAILED);
std::unique_ptr<uint8_t> outBufTmp(new (std::nothrow) uint8_t[outputDataTmp.length]);
if (outBufTmp == nullptr) {
REPORT_INNER_ERROR("E19999", "New output buffer fail, length:%lu, model:%u, when %s",
outputDataTmp.length, model_id, __FUNCTION__);
GELOGE(FAILED, "Failed to allocate memory.");
return FAILED;
}
@@ -292,6 +310,8 @@ Status GraphExecutor::SyncExecuteModel(uint32_t model_id, const std::vector<GeTe
rtError_t ret_value = rtMemcpy(outBufTmp.get(), outputDataTmp.length, outputDataTmp.data, outputDataTmp.length,
RT_MEMCPY_HOST_TO_HOST);
CHECK_FALSE_EXEC(ret_value == RT_ERROR_NONE,
REPORT_CALL_ERROR("E19999", "Call rtMemcpy fail, dst_size:%lu, src_size:%zu, ret:0x%X when %s",
outputDataTmp.length, outputDataTmp.length, ret_value, __FUNCTION__);
GELOGE(GE_GRAPH_EXECUTE_FAILED, "Call rt api rtMemcpy failed, ret: 0x%X", ret);
return GE_GRAPH_EXECUTE_FAILED);
GeTensor outTensor;
@@ -344,6 +364,8 @@ Status GraphExecutor::ExecuteGraph(GraphId graph_id, const GeRootModelPtr &ge_ro
last_graph_id_ = graph_id;

if (!init_flag_) {
REPORT_INNER_ERROR("E19999", "No SetCondition called before, graph:%u, check invalid when %s",
graph_id, __FUNCTION__);
GELOGE(GE_GRAPH_EXECUTE_NOT_INIT, "[GraphExecutor] AI Core Engine without calling SetCondition!");
return GE_GRAPH_EXECUTE_NOT_INIT;
}
@@ -392,10 +414,12 @@ Status GraphExecutor::AsyncExecuteModel(uint32_t model_id, const std::vector<Inp

GELOGI("RunAsync success.");
} catch (std::bad_alloc &) {
REPORT_INNER_ERROR("E19999", "Bad memory allocation exception occur when %s failed", __FUNCTION__);
GELOGE(MEMALLOC_FAILED, "RunAsync failed, bad memory allocation occur !");
CsaInteract::GetInstance().WriteErrorCode(FAILED, ERROR_MODULE_FMK, JOBSUBSTATE_GRAPH_EXEC);
return MEMALLOC_FAILED;
} catch (...) {
REPORT_INNER_ERROR("E19999", "Some exceptions occur when %s failed", __FUNCTION__);
GELOGE(FAILED, "RunAsync failed, some exceptions occur !");
CsaInteract::GetInstance().WriteErrorCode(FAILED, ERROR_MODULE_FMK, JOBSUBSTATE_GRAPH_EXEC);
return FAILED;
@@ -415,10 +439,12 @@ Status GraphExecutor::DataInput(const InputData &input_data, OutputData &output_
return ret;
}
} catch (std::bad_alloc &) {
REPORT_INNER_ERROR("E19999", "Bad memory allocation exception occur when %s failed", __FUNCTION__);
GELOGE(MEMALLOC_FAILED, "DataInput failed, bad memory allocation occur !");
CsaInteract::GetInstance().WriteErrorCode(FAILED, ERROR_MODULE_FMK, JOBSUBSTATE_GRAPH_EXEC);
return MEMALLOC_FAILED;
} catch (...) {
REPORT_INNER_ERROR("E19999", "Some exceptions occur when %s failed", __FUNCTION__);
GELOGE(FAILED, "DataInput failed, some exceptions occur !");
CsaInteract::GetInstance().WriteErrorCode(FAILED, ERROR_MODULE_FMK, JOBSUBSTATE_GRAPH_EXEC);
return FAILED;
@@ -439,10 +465,12 @@ Status GraphExecutor::GetInputOutputDescInfo(const uint32_t model_id, vector<Inp
return ret;
}
} catch (std::bad_alloc &) {
REPORT_INNER_ERROR("E19999", "Bad memory allocation exception occur when %s failed", __FUNCTION__);
GELOGE(MEMALLOC_FAILED, "GetInputOutputDescInfo failed, bad memory allocation occur !");
CsaInteract::GetInstance().WriteErrorCode(FAILED, ERROR_MODULE_FMK, JOBSUBSTATE_GRAPH_EXEC);
return MEMALLOC_FAILED;
} catch (...) {
REPORT_INNER_ERROR("E19999", "Some exceptions occur when %s failed", __FUNCTION__);
GELOGE(FAILED, "GetInputOutputDescInfo failed, some exceptions occur !");
CsaInteract::GetInstance().WriteErrorCode(FAILED, ERROR_MODULE_FMK, JOBSUBSTATE_GRAPH_EXEC);
return FAILED;
@@ -466,10 +494,12 @@ Status GraphExecutor::GetInputOutputDescInfo(const uint32_t model_id, vector<Inp
return ret;
}
} catch (std::bad_alloc &) {
REPORT_INNER_ERROR("E19999", "Bad memory allocation exception occur when %s failed", __FUNCTION__);
GELOGE(MEMALLOC_FAILED, "GetInputOutputDescInfo failed, bad memory allocation occur !");
CsaInteract::GetInstance().WriteErrorCode(FAILED, ERROR_MODULE_FMK, JOBSUBSTATE_GRAPH_EXEC);
return MEMALLOC_FAILED;
} catch (...) {
REPORT_INNER_ERROR("E19999", "Some exceptions occur when %s failed", __FUNCTION__);
GELOGE(FAILED, "GetInputOutputDescInfo failed, some exceptions occur !");
CsaInteract::GetInstance().WriteErrorCode(FAILED, ERROR_MODULE_FMK, JOBSUBSTATE_GRAPH_EXEC);
return FAILED;


+ 16
- 0
ge/graph/label/case_label_maker.cc View File

@@ -42,6 +42,8 @@ Status CaseOpLabelMaker::Run(uint32_t &label_index) {

const auto graph_names = case_desc->GetSubgraphInstanceNames();
if (graph_names.empty() || graph_names.size() > kMaxCaseBranch) {
REPORT_INNER_ERROR("E19999", "Node:%s(%s) subgraph size: %zu, check invalid when %s", case_desc->GetName().c_str(),
case_desc->GetType().c_str(), graph_names.size(), __FUNCTION__);
GELOGE(INTERNAL_ERROR, "Node: %s has invalid subgraph, graph size: %zu.", case_desc->GetName().c_str(),
graph_names.size());
return FAILED;
@@ -67,6 +69,8 @@ Status CaseOpLabelMaker::Run(uint32_t &label_index) {
parent_node_->GetName() + "/StreamActive_" + std::to_string(index); // rtStreamActive
NodePtr stream_active = AddStreamActive(graph, stream_active_name);
if (stream_active == nullptr) {
REPORT_CALL_ERROR("E19999", "Add StreamActive node in graph:%s fail when %s",
graph->GetName().c_str(), __FUNCTION__);
GELOGE(INTERNAL_ERROR, "Subgraph: %s add stream active failed.", graph->GetName().c_str());
return FAILED;
}
@@ -75,6 +79,8 @@ Status CaseOpLabelMaker::Run(uint32_t &label_index) {
std::string label_set_name = parent_node_->GetName() + "/LabelSet_" + std::to_string(index); // rtLabelSet
NodePtr label = AddLabelSetEnter(graph, label_set_name, curr_label_index, stream_active);
if (label == nullptr) {
REPORT_CALL_ERROR("E19999", "Add LabelSetEnter node in graph:%s fail when %s",
graph->GetName().c_str(), __FUNCTION__);
GELOGE(INTERNAL_ERROR, "Subgraph: %s add label set failed.", graph->GetName().c_str());
return FAILED;
}
@@ -88,6 +94,8 @@ Status CaseOpLabelMaker::Run(uint32_t &label_index) {
// middle node, add goto node to tail.
std::string label_goto_name = parent_node_->GetName() + "/LabelGoto_" + std::to_string(index); // rtLabelGoto
if (AddLabelGotoLeave(graph, label_goto_name, last_label_index) == nullptr) {
REPORT_CALL_ERROR("E19999", "Add LabelGotoLeave node in graph:%s fail when %s",
graph->GetName().c_str(), __FUNCTION__);
GELOGE(INTERNAL_ERROR, "Subgraph: %s add label goto failed.", graph->GetName().c_str());
return FAILED;
}
@@ -95,6 +103,8 @@ Status CaseOpLabelMaker::Run(uint32_t &label_index) {
// last node, add label node to tail.
std::string last_label_name = parent_node_->GetName() + "/LabelSet_Last"; // rtLabelSet
if (AddLabelSetLeave(graph, last_label_name, last_label_index) == nullptr) {
REPORT_CALL_ERROR("E19999", "Add LabelSetLeave node in graph:%s fail when %s",
graph->GetName().c_str(), __FUNCTION__);
GELOGE(INTERNAL_ERROR, "Subgraph: %s add label set failed.", graph->GetName().c_str());
return FAILED;
}
@@ -110,12 +120,16 @@ Status CaseOpLabelMaker::Run(uint32_t &label_index) {
const GeTensorDesc &pred_desc = case_desc->GetInputDesc(kCasePredIndex);
NodePtr switch_node = AddLabelSwitchEnter(first_graph, label_switch_name, pred_desc, switch_labels);
if (switch_node == nullptr) {
REPORT_CALL_ERROR("E19999", "Add LabelSwitchEnter node in graph:%s fail when %s",
first_graph->GetName().c_str(), __FUNCTION__);
GELOGE(INTERNAL_ERROR, "Subgraph: %s add label switch failed.", first_graph->GetName().c_str());
return FAILED;
}

// Link control edge to then branch head.
if (GraphUtils::AddEdge(switch_node->GetOutControlAnchor(), first_label->GetInControlAnchor()) != SUCCESS) {
REPORT_CALL_ERROR("E19999", "Add ctrl edge from %s to %s in graph:%s fail when %s", switch_node->GetName().c_str(),
first_label->GetName().c_str(), first_graph->GetName().c_str(), __FUNCTION__);
GELOGE(INTERNAL_ERROR, "LabelSwitchByIndex: Add ctrl edge to %s failed.", first_label->GetName().c_str());
return FAILED;
}
@@ -123,6 +137,8 @@ Status CaseOpLabelMaker::Run(uint32_t &label_index) {
uint32_t parent_index = 0; // Case cond input is first.
const std::string data_name = parent_node_->GetName() + "/SwitchIndexData";
if (AddLabelSwitchIndex(first_graph, data_name, pred_desc, switch_node, parent_index) == nullptr) {
REPORT_CALL_ERROR("E19999", "Add LabelSwitchIndex node in graph:%s fail when %s",
first_graph->GetName().c_str(), __FUNCTION__);
GELOGE(INTERNAL_ERROR, "Subgraph: %s add switch input failed.", first_graph->GetName().c_str());
return FAILED;
}


+ 22
- 0
ge/graph/label/if_label_maker.cc View File

@@ -43,6 +43,10 @@ Status IfOpLabelMaker::Run(uint32_t &label_index) {
const std::string then_branch_name = if_desc->GetSubgraphInstanceName(kThenBranchIndex);
const std::string else_branch_name = if_desc->GetSubgraphInstanceName(kElseBranchIndex);
if (then_branch_name.empty() || else_branch_name.empty()) {
REPORT_INNER_ERROR("E19999", "Node:%s(%s), check subgraph invalid, "
"then branch graph: %s, else branch graph: %s, when %s",
if_desc->GetName().c_str(), if_desc->GetType().c_str(),
then_branch_name.c_str(), else_branch_name.c_str(), __FUNCTION__);
GELOGE(INTERNAL_ERROR, "Node: %s has invalid subgraph, then branch: %s, else branch: %s.",
if_desc->GetName().c_str(), then_branch_name.c_str(), else_branch_name.c_str());
return FAILED;
@@ -66,32 +70,44 @@ Status IfOpLabelMaker::Run(uint32_t &label_index) {

NodePtr then_stream_active = AddStreamActive(then_sub_graph, then_active_name);
if (then_stream_active == nullptr) {
REPORT_CALL_ERROR("E19999", "Add StreamActive node in graph:%s fail when %s",
then_sub_graph->GetName().c_str(), __FUNCTION__);
GELOGE(INTERNAL_ERROR, "Subgraph: %s add stream active failed.", then_sub_graph->GetName().c_str());
return FAILED;
}

NodePtr then_enter_label = AddLabelSetEnter(then_sub_graph, then_label_name, then_enter_index, then_stream_active);
if (then_enter_label == nullptr) {
REPORT_CALL_ERROR("E19999", "Add LabelSetEnter node in graph:%s fail when %s",
then_sub_graph->GetName().c_str(), __FUNCTION__);
GELOGE(INTERNAL_ERROR, "Subgraph: %s add label set failed.", then_sub_graph->GetName().c_str());
return FAILED;
}

if (AddLabelGotoLeave(then_sub_graph, then_leave_name, else_leave_index) == nullptr) {
REPORT_CALL_ERROR("E19999", "Add LabelGotoLeave node in graph:%s fail when %s",
then_sub_graph->GetName().c_str(), __FUNCTION__);
GELOGE(INTERNAL_ERROR, "Subgraph: %s add label goto failed.", then_sub_graph->GetName().c_str());
return FAILED;
}

NodePtr else_stream_active = AddStreamActive(else_sub_graph, else_active_name);
if (else_stream_active == nullptr) {
REPORT_CALL_ERROR("E19999", "Add StreamActive node in graph:%s fail when %s",
else_stream_active->GetName().c_str(), __FUNCTION__);
GELOGE(INTERNAL_ERROR, "Subgraph: %s add stream active failed.", else_sub_graph->GetName().c_str());
return FAILED;
}

if (AddLabelSetEnter(else_sub_graph, else_enter_name, else_enter_index, else_stream_active) == nullptr) {
REPORT_CALL_ERROR("E19999", "Add LabelSetEnter node in graph:%s fail when %s",
else_sub_graph->GetName().c_str(), __FUNCTION__);
GELOGE(INTERNAL_ERROR, "Subgraph: %s add label set failed.", else_sub_graph->GetName().c_str());
return FAILED;
}
if (AddLabelSetLeave(else_sub_graph, else_leave_name, else_leave_index) == nullptr) {
REPORT_CALL_ERROR("E19999", "Add LabelSetLeave node in graph:%s fail when %s",
else_sub_graph->GetName().c_str(), __FUNCTION__);
GELOGE(INTERNAL_ERROR, "Subgraph: %s add label set failed.", else_sub_graph->GetName().c_str());
return FAILED;
}
@@ -103,12 +119,16 @@ Status IfOpLabelMaker::Run(uint32_t &label_index) {
const GeTensorDesc &pred_desc = if_desc->GetInputDesc(kIfPredIndex);
NodePtr switch_node = AddLabelSwitchEnter(then_sub_graph, then_enter_name, pred_desc, switch_labels);
if (switch_node == nullptr) {
REPORT_CALL_ERROR("E19999", "Add LabelSwitchEnter node in graph:%s fail when %s",
then_sub_graph->GetName().c_str(), __FUNCTION__);
GELOGE(INTERNAL_ERROR, "Subgraph: %s add label switch failed.", then_sub_graph->GetName().c_str());
return FAILED;
}

// Link control edge to then branch head.
if (GraphUtils::AddEdge(switch_node->GetOutControlAnchor(), then_enter_label->GetInControlAnchor()) != SUCCESS) {
REPORT_CALL_ERROR("E19999", "Add ctrl edge from %s to %s in graph:%s fail when %s", switch_node->GetName().c_str(),
then_enter_label->GetName().c_str(), then_sub_graph->GetName().c_str(), __FUNCTION__);
GELOGE(INTERNAL_ERROR, "LabelSwitchByIndex: Add ctrl edge to %s failed.", then_enter_label->GetName().c_str());
return FAILED;
}
@@ -116,6 +136,8 @@ Status IfOpLabelMaker::Run(uint32_t &label_index) {
uint32_t parent_index = 0; // If cond input is first.
const std::string data_name = parent_node_->GetName() + "/SwitchIndexData";
if (AddLabelSwitchIndex(then_sub_graph, data_name, pred_desc, switch_node, parent_index) == nullptr) {
REPORT_CALL_ERROR("E19999", "Add LabelSwitchIndex node in graph:%s fail when %s",
then_sub_graph->GetName().c_str(), __FUNCTION__);
GELOGE(INTERNAL_ERROR, "Subgraph: %s add switch input failed.", then_sub_graph->GetName().c_str());
return FAILED;
}


+ 30
- 0
ge/graph/label/label_maker.cc View File

@@ -56,6 +56,8 @@ void LabelMaker::LinkToGraphHead(const ComputeGraphPtr &graph, const NodePtr &no
}

if (GraphUtils::AddEdge(node->GetOutControlAnchor(), n->GetInControlAnchor()) != SUCCESS) {
REPORT_CALL_ERROR("E19999", "Add ctrl edge from %s to %s in graph:%s fail when %s", node->GetName().c_str(),
n->GetName().c_str(), graph->GetName().c_str(), __FUNCTION__);
GELOGE(INTERNAL_ERROR, "Add ctrl edge from %s to %s failed.", node->GetName().c_str(), n->GetName().c_str());
}
}
@@ -78,6 +80,8 @@ void LabelMaker::LinkToGraphTail(const ComputeGraphPtr &graph, const NodePtr &no
}

if (GraphUtils::AddEdge(tail->GetOutControlAnchor(), node->GetInControlAnchor()) != SUCCESS) {
REPORT_CALL_ERROR("E19999", "Add ctrl edge from %s to %s in graph:%s fail when %s", tail->GetName().c_str(),
node->GetName().c_str(), graph->GetName().c_str(), __FUNCTION__);
GELOGE(INTERNAL_ERROR, "Add ctrl edge from %s to %s failed.", tail->GetName().c_str(), node->GetName().c_str());
}
return;
@@ -96,6 +100,7 @@ NodePtr LabelMaker::AddStreamActive(const ComputeGraphPtr &graph, const std::str

const auto &node_list = graph->GetDirectNode();
if (node_list.empty()) {
REPORT_INNER_ERROR("E19999", "Check param graph has no node when %s", graph->GetName().c_str(), __FUNCTION__);
GELOGE(INTERNAL_ERROR, "LabelSet: Graph %s node is empty.", graph->GetName().c_str());
return nullptr;
}
@@ -131,6 +136,7 @@ NodePtr LabelMaker::AddLabelSetEnter(const ComputeGraphPtr &graph, const std::st

const auto &node_list = graph->GetDirectNode();
if (node_list.empty()) {
REPORT_INNER_ERROR("E19999", "Check param graph has no node when %s", graph->GetName().c_str(), __FUNCTION__);
GELOGE(INTERNAL_ERROR, "LabelSet: Graph %s node is empty.", graph->GetName().c_str());
return nullptr;
}
@@ -145,6 +151,8 @@ NodePtr LabelMaker::AddLabelSetEnter(const ComputeGraphPtr &graph, const std::st
GE_CHECK_NOTNULL_EXEC(label_set, return nullptr);

if (GraphUtils::AddEdge(label_set->GetOutControlAnchor(), stream_active->GetInControlAnchor()) != SUCCESS) {
REPORT_CALL_ERROR("E19999", "Add ctrl edge from %s to %s in graph:%s fail when %s", label_set->GetName().c_str(),
stream_active->GetName().c_str(), graph->GetName().c_str(), __FUNCTION__);
GELOGE(INTERNAL_ERROR, "Add ctrl edge from %s to %s failed.", label_set->GetName().c_str(),
stream_active->GetName().c_str());
return nullptr;
@@ -193,6 +201,7 @@ NodePtr LabelMaker::AddLabelGotoEnter(const ComputeGraphPtr &graph, const std::s
const auto &node_list = graph->GetDirectNode();
auto it = node_list.begin();
if (it == node_list.end()) {
REPORT_INNER_ERROR("E19999", "Check param graph has no node when %s", graph->GetName().c_str(), __FUNCTION__);
GELOGE(INTERNAL_ERROR, "LabelGoto: Graph %s node is empty.", graph->GetName().c_str());
return nullptr;
}
@@ -205,6 +214,8 @@ NodePtr LabelMaker::AddLabelGotoEnter(const ComputeGraphPtr &graph, const std::s
(void)AttrUtils::SetInt(op_desc, ATTR_NAME_LABEL_SWITCH_INDEX, index);
NodePtr label_goto = graph->AddNodeFront(op_desc);
if (label_goto == nullptr) {
REPORT_CALL_ERROR("E19999", "Add node:%s(%s) to graph:%s fail when %s",
op_desc->GetName().c_str(), op_desc->GetType().c_str(), graph->GetName().c_str(), __FUNCTION__);
GELOGE(INTERNAL_ERROR, "LabelGoto: Add to graph %s failed.", graph->GetName().c_str());
return nullptr;
}
@@ -253,6 +264,7 @@ NodePtr LabelMaker::AddLabelSwitchEnter(const ComputeGraphPtr &graph, const std:
const auto &node_list = graph->GetDirectNode();
auto it = node_list.begin();
if (it == node_list.end()) {
REPORT_INNER_ERROR("E19999", "Check param graph has no node when %s", graph->GetName().c_str(), __FUNCTION__);
GELOGE(INTERNAL_ERROR, "LabelSwitchByIndex: Graph %s node is empty.", graph->GetName().c_str());
return nullptr;
}
@@ -263,17 +275,23 @@ NodePtr LabelMaker::AddLabelSwitchEnter(const ComputeGraphPtr &graph, const std:

GELOGI("LabelSwitchByIndex: Create node %s.", op_desc->GetName().c_str());
if (op_desc->AddInputDesc(desc) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Add input desc into node:%s(%s) in graph:%s fail when %s",
op_desc->GetName().c_str(), op_desc->GetType().c_str(), graph->GetName().c_str(), __FUNCTION__);
GELOGE(INTERNAL_ERROR, "LabelSwitchByIndex: Add input desc failed.");
return nullptr;
}

if (!AttrUtils::SetListInt(op_desc, ATTR_NAME_LABEL_SWITCH_LIST, labels)) {
REPORT_INNER_ERROR("E19999", "Set Attr:%s fail for op:%s(%s) when %s", ATTR_NAME_LABEL_SWITCH_LIST.c_str(),
op_desc->GetName().c_str(), op_desc->GetType().c_str(), __FUNCTION__);
GELOGE(INTERNAL_ERROR, "LabelSwitchByIndex: Add %s failed.", ATTR_NAME_LABEL_SWITCH_INDEX.c_str());
return nullptr;
}

NodePtr label_switch = graph->AddNodeFront(op_desc);
if (label_switch == nullptr) {
REPORT_CALL_ERROR("E19999", "Add node:%s(%s) to graph:%s ahead fail when %s",
op_desc->GetName().c_str(), op_desc->GetType().c_str(), graph->GetName().c_str(), __FUNCTION__);
GELOGE(INTERNAL_ERROR, "LabelSwitchByIndex: Add to graph %s failed.", graph->GetName().c_str());
return nullptr;
}
@@ -300,11 +318,15 @@ NodePtr LabelMaker::AddLabelSwitchLeave(const ComputeGraphPtr &graph, const std:

GELOGI("LabelSwitchByIndex: Create node %s.", op_desc->GetName().c_str());
if (op_desc->AddInputDesc(desc) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Add input desc into node:%s(%s) in graph:%s fail when %s",
op_desc->GetName().c_str(), op_desc->GetType().c_str(), graph->GetName().c_str(), __FUNCTION__);
GELOGE(INTERNAL_ERROR, "LabelSwitchByIndex: Add input desc failed.");
return nullptr;
}

if (!AttrUtils::SetListInt(op_desc, ATTR_NAME_LABEL_SWITCH_LIST, labels)) {
REPORT_INNER_ERROR("E19999", "Set Attr:%s fail for op:%s(%s) when %s", ATTR_NAME_LABEL_SWITCH_LIST.c_str(),
op_desc->GetName().c_str(), op_desc->GetType().c_str(), __FUNCTION__);
GELOGE(INTERNAL_ERROR, "LabelSwitchByIndex: Add %s failed.", ATTR_NAME_LABEL_SWITCH_INDEX.c_str());
return nullptr;
}
@@ -336,15 +358,21 @@ NodePtr LabelMaker::AddLabelSwitchIndex(const ComputeGraphPtr &graph, const std:

GELOGI("Data: Create node %s.", op_desc->GetName().c_str());
if (op_desc->AddInputDesc(desc) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Add input desc into node:%s(%s) in graph:%s fail when %s",
op_desc->GetName().c_str(), op_desc->GetType().c_str(), graph->GetName().c_str(), __FUNCTION__);
GELOGE(INTERNAL_ERROR, "LabelSwitchByIndex: Add data input desc failed.");
return nullptr;
}
if (op_desc->AddOutputDesc(desc) != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Add output desc into node:%s(%s) in graph:%s fail when %s",
op_desc->GetName().c_str(), op_desc->GetType().c_str(), graph->GetName().c_str(), __FUNCTION__);
GELOGE(INTERNAL_ERROR, "LabelSwitchByIndex: Add data output desc failed.");
return nullptr;
}

if (!AttrUtils::SetInt(op_desc, ATTR_NAME_PARENT_NODE_INDEX, parent_index)) {
REPORT_INNER_ERROR("E19999", "Set Attr:%s fail for op:%s(%s) when %s", ATTR_NAME_PARENT_NODE_INDEX.c_str(),
op_desc->GetName().c_str(), op_desc->GetType().c_str(), __FUNCTION__);
GELOGE(INTERNAL_ERROR, "LabelSwitchByIndex: Add %s failed.", ATTR_NAME_PARENT_NODE_INDEX.c_str());
return nullptr;
}
@@ -354,6 +382,8 @@ NodePtr LabelMaker::AddLabelSwitchIndex(const ComputeGraphPtr &graph, const std:

// Link control edge to graph head.
if (GraphUtils::AddEdge(op_data->GetOutDataAnchor(0), sw_node->GetInDataAnchor(0)) != SUCCESS) {
REPORT_CALL_ERROR("E19999", "Add ctrl edge from %s to %s in graph:%s fail when %s", op_data->GetName().c_str(),
sw_node->GetName().c_str(), graph->GetName().c_str(), __FUNCTION__);
GELOGE(INTERNAL_ERROR, "LabelSwitchByIndex: Add input edge to %s failed.", op_data->GetName().c_str());
return nullptr;
}


+ 7
- 0
ge/graph/label/partitioned_call_label_maker.cc View File

@@ -39,12 +39,17 @@ Status PartitionedCallLabelMaker::Run(uint32_t &label_index) {

std::string sub_graph_name = call_desc->GetSubgraphInstanceName(kSubGraphIndex);
if (sub_graph_name.empty()) {
REPORT_INNER_ERROR("E19999", "Node:%s(%s) subgraph_index:%d name is empty, check invalid when %s",
call_desc->GetName().c_str(), call_desc->GetType().c_str(), kSubGraphIndex, __FUNCTION__);
GELOGE(INTERNAL_ERROR, "Node: %s has no subgraph name.", sub_graph_name.c_str());
return FAILED;
}

ComputeGraphPtr sub_graph = parent_graph_->GetSubgraph(sub_graph_name);
if (sub_graph == nullptr) {
REPORT_INNER_ERROR("E19999", "Node:%s(%s) subgraph_name:%s is not exist in parent_graph, check invalid when %s",
call_desc->GetName().c_str(), call_desc->GetType().c_str(),
sub_graph_name.c_str(), __FUNCTION__);
GELOGE(INTERNAL_ERROR, "Node: %s has no subgraph.", sub_graph_name.c_str());
return FAILED;
}
@@ -52,6 +57,8 @@ Status PartitionedCallLabelMaker::Run(uint32_t &label_index) {
const std::string stream_active_name = parent_node_->GetName() + "/StreamActive"; // rtStreamActive
NodePtr stream_active = AddStreamActive(sub_graph, stream_active_name);
if (stream_active == nullptr) {
REPORT_CALL_ERROR("E19999", "Add StreamActive node in graph:%s fail when %s",
sub_graph->GetName().c_str(), __FUNCTION__);
GELOGE(INTERNAL_ERROR, "Subgraph: %s add stream active node failed.", sub_graph->GetName().c_str());
return FAILED;
}


+ 20
- 0
ge/graph/label/while_label_maker.cc View File

@@ -44,6 +44,9 @@ Status WhileOpLabelMaker::Run(uint32_t &label_index) {
std::string cond_name = while_desc->GetSubgraphInstanceName(kCondBranchIndex);
std::string body_name = while_desc->GetSubgraphInstanceName(kBodyBranchIndex);
if (cond_name.empty() || body_name.empty()) {
REPORT_INNER_ERROR("E19999", "Node:%s(%s) cond subgraph index:%d or body subgraph index:%d name is empty, "
"check invalid when %s", while_desc->GetName().c_str(), while_desc->GetType().c_str(),
kCondBranchIndex, kBodyBranchIndex, __FUNCTION__);
GELOGE(INTERNAL_ERROR, "Node: %s has invalid subgraph, cond branch: %s, body branch: %s.",
while_desc->GetName().c_str(), cond_name.c_str(), body_name.c_str());
return FAILED;
@@ -67,32 +70,44 @@ Status WhileOpLabelMaker::Run(uint32_t &label_index) {

NodePtr cond_stream_active = AddStreamActive(cond_graph, cond_active_name);
if (cond_stream_active == nullptr) {
REPORT_CALL_ERROR("E19999", "Add StreamActive node in graph:%s fail when %s",
cond_graph->GetName().c_str(), __FUNCTION__);
GELOGE(INTERNAL_ERROR, "Subgraph: %s add stream active failed.", cond_graph->GetName().c_str());
return FAILED;
}

if (AddLabelSetEnter(cond_graph, cond_enter_name, cond_enter_index, cond_stream_active) == nullptr) {
REPORT_CALL_ERROR("E19999", "Add LabelSetEnter node in graph:%s fail when %s",
cond_graph->GetName().c_str(), __FUNCTION__);
GELOGE(INTERNAL_ERROR, "Subgraph: %s add label set failed.", cond_graph->GetName().c_str());
return FAILED;
}

NodePtr body_stream_active = AddStreamActive(body_graph, body_active_name);
if (body_stream_active == nullptr) {
REPORT_CALL_ERROR("E19999", "Add StreamActive node in graph:%s fail when %s",
body_graph->GetName().c_str(), __FUNCTION__);
GELOGE(INTERNAL_ERROR, "Subgraph: %s add stream active failed.", body_graph->GetName().c_str());
return FAILED;
}

if (AddLabelSetEnter(body_graph, body_enter_name, body_enter_index, body_stream_active) == nullptr) {
REPORT_CALL_ERROR("E19999", "Add LabelSetEnter node in graph:%s fail when %s",
body_graph->GetName().c_str(), __FUNCTION__);
GELOGE(INTERNAL_ERROR, "Subgraph: %s add label set failed.", body_graph->GetName().c_str());
return FAILED;
}

if (AddLabelGotoLeave(body_graph, goto_leave_name, cond_enter_index) == nullptr) {
REPORT_CALL_ERROR("E19999", "Add LabelGotoLeave node in graph:%s fail when %s",
body_graph->GetName().c_str(), __FUNCTION__);
GELOGE(INTERNAL_ERROR, "Subgraph: %s add label goto failed.", body_graph->GetName().c_str());
return FAILED;
}

if (AddLabelSetLeave(body_graph, body_leave_name, body_leave_index) == nullptr) {
REPORT_CALL_ERROR("E19999", "Add LabelSetLeave node in graph:%s fail when %s",
body_graph->GetName().c_str(), __FUNCTION__);
GELOGE(INTERNAL_ERROR, "Subgraph: %s add label set failed.", body_graph->GetName().c_str());
return FAILED;
}
@@ -109,6 +124,8 @@ Status WhileOpLabelMaker::Run(uint32_t &label_index) {
const std::vector<uint32_t> switch_labels = {body_leave_index, body_enter_index};
NodePtr switch_node = AddLabelSwitchLeave(cond_graph, cond_leave_name, pred_desc, switch_labels);
if (switch_node == nullptr) {
REPORT_CALL_ERROR("E19999", "Add LabelSwitchLeave node in graph:%s fail when %s",
cond_graph->GetName().c_str(), __FUNCTION__);
GELOGE(INTERNAL_ERROR, "Subgraph: %s add label switch failed.", cond_graph->GetName().c_str());
return FAILED;
}
@@ -124,6 +141,9 @@ Status WhileOpLabelMaker::Run(uint32_t &label_index) {
InDataAnchorPtr in_anchor = all_in_data.at(kCondOutputIndex);
GE_CHECK_NOTNULL(in_anchor);
if (GraphUtils::AddEdge(in_anchor->GetPeerOutAnchor(), switch_node->GetInDataAnchor(kCondOutputIndex)) != SUCCESS) {
REPORT_CALL_ERROR("E19999", "Add ctrl edge from %s to %s in graph:%s fail when %s",
in_anchor->GetPeerOutAnchor()->GetOwnerNode()->GetName().c_str(),
switch_node->GetName().c_str(), cond_graph ->GetName().c_str(), __FUNCTION__);
GELOGE(FAILED, "Node: %s Add pred data input failed.", switch_node->GetName().c_str());
return FAILED;
}


+ 8
- 7
ge/graph/load/model_manager/davinci_model.cc View File

@@ -3683,33 +3683,34 @@ Status DavinciModel::NnExecute(rtStream_t stream, bool async_mode, const InputDa
GE_CHK_STATUS_RET(InitModelStream(stream), "Init model stream failed.");
is_dynamic_ = input_data.is_dynamic_batch;

GE_IF_BOOL_EXEC(ProfilingManager::Instance().ProfilingModelExecuteOn(), SetProfileTime(MODEL_PRE_PROC_START));
bool profiling_model_execute_on = ProfilingManager::Instance().ProfilingModelExecuteOn();
GE_IF_BOOL_EXEC(profiling_model_execute_on, SetProfileTime(MODEL_PRE_PROC_START));
Status ret = CopyModelData(input_data, output_data, is_dynamic_);
GE_CHK_BOOL_TRUE_EXEC_WITH_LOG(ret != SUCCESS, return ret, "Copy input data to model failed. model id: %u",
model_id_);

GELOGD("current_data.index=%u", input_data.index);
GE_IF_BOOL_EXEC(ProfilingManager::Instance().ProfilingModelExecuteOn(), SetProfileTime(MODEL_PRE_PROC_END));
GE_IF_BOOL_EXEC(profiling_model_execute_on, SetProfileTime(MODEL_PRE_PROC_END));

if (!task_list_.empty()) {
GELOGD("rtModelExecute do");
GE_IF_BOOL_EXEC(ProfilingManager::Instance().ProfilingModelExecuteOn(), SetProfileTime(MODEL_INFER_START));
GE_IF_BOOL_EXEC(profiling_model_execute_on, SetProfileTime(MODEL_INFER_START));
rtError_t rt_ret = rtModelExecute(rt_model_handle_, rt_model_stream_, 0);
GE_CHK_RT_EXEC(rt_ret, return RT_ERROR_TO_GE_STATUS(rt_ret));
GE_IF_BOOL_EXEC(ProfilingManager::Instance().ProfilingModelExecuteOn(), SetProfileTime(MODEL_INFER_END));
GE_IF_BOOL_EXEC(profiling_model_execute_on, SetProfileTime(MODEL_INFER_END));
GELOGD("rtModelExecute end");
}

if (!is_async_mode_) {
GE_IF_BOOL_EXEC(ProfilingManager::Instance().ProfilingModelExecuteOn(), SetProfileTime(MODEL_AFTER_PROC_START));
GE_IF_BOOL_EXEC(profiling_model_execute_on, SetProfileTime(MODEL_AFTER_PROC_START));
ret = CopyOutputData(input_data.index, output_data, RT_MEMCPY_DEVICE_TO_DEVICE);
GE_CHK_BOOL_TRUE_EXEC_WITH_LOG(ret != SUCCESS, return ACL_ERROR_GE_INTERNAL_ERROR,
"Copy Output data to user failed.");
GE_IF_BOOL_EXEC(ProfilingManager::Instance().ProfilingModelExecuteOn(), SetProfileTime(MODEL_AFTER_PROC_END));
GE_IF_BOOL_EXEC(profiling_model_execute_on, SetProfileTime(MODEL_AFTER_PROC_END));
}

// report model time data
GE_IF_BOOL_EXEC(ProfilingManager::Instance().ProfilingModelExecuteOn(), (void)SinkTimeProfile(input_data));
GE_IF_BOOL_EXEC(profiling_model_execute_on, (void)SinkTimeProfile(input_data));
GELOGD("Model run end, model id:%u", model_id_);
return SUCCESS;
}


+ 4
- 0
ge/graph/load/model_manager/task_info/end_graph_task_info.cc View File

@@ -27,6 +27,7 @@ namespace ge {
Status EndGraphTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davinci_model) {
GELOGI("InitEndGraphTaskInfo Init Start.");
if (davinci_model == nullptr) {
REPORT_INNER_ERROR("E19999", "Check param davinci_model nullptr when EndGraphTaskInfo %s", __FUNCTION__);
GELOGE(PARAM_INVALID, "davinci_model is null!");
return PARAM_INVALID;
}
@@ -52,6 +53,7 @@ Status EndGraphTaskInfo::Distribute() {
GELOGI("Start to call rtEndGraphEx");
rtError_t rt_ret = rtEndGraphEx(model_, stream_, kDumpFlag);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtEndGraphEx fail ret:0x%X, when EndGraphTaskInfo %s", rt_ret, __FUNCTION__);
GELOGE(RT_FAILED, "Call rtEndGraphEx failed, ret: 0x%x", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
@@ -59,6 +61,7 @@ Status EndGraphTaskInfo::Distribute() {
GELOGI("Start to call rtEndGraph");
rtError_t rt_ret = rtEndGraph(model_, stream_);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtEndGraph fail ret:0x%X, when EndGraphTaskInfo %s", rt_ret, __FUNCTION__);
GELOGE(RT_FAILED, "Call rtEndGraph failed, ret: 0x%x", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
@@ -68,6 +71,7 @@ Status EndGraphTaskInfo::Distribute() {
uint32_t stream_id = 0;
rtError_t rt_ret = rtModelGetTaskId(davinci_model_->GetRtModelHandle(), &task_id, &stream_id);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtModelGetTaskId fail ret:0x%X, when EndGraphTaskInfo %s", rt_ret, __FUNCTION__);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}


+ 4
- 0
ge/graph/load/model_manager/task_info/event_record_task_info.cc View File

@@ -23,6 +23,7 @@ namespace ge {
Status EventRecordTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davinci_model) {
GELOGI("EventRecordTaskInfo Init Start.");
if (davinci_model == nullptr) {
REPORT_INNER_ERROR("E19999", "Check param davinci_model nullptr when EventRecordTaskInfo %s", __FUNCTION__);
GELOGE(PARAM_INVALID, "davinci_model is null!");
return PARAM_INVALID;
}
@@ -34,6 +35,8 @@ Status EventRecordTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *da

const auto &eventList = davinci_model->GetEventList();
if (task_def.event_id() >= eventList.size()) {
REPORT_INNER_ERROR("E19999", "Task event_id:%u > model event size:%zu, check invalid when EventRecordTaskInfo %s",
task_def.event_id(), eventList.size(), __FUNCTION__);
GELOGE(INTERNAL_ERROR, "event list size:%zu, cur:%u!", eventList.size(), task_def.event_id());
return INTERNAL_ERROR;
}
@@ -47,6 +50,7 @@ Status EventRecordTaskInfo::Distribute() {
GELOGI("EventRecordTaskInfo Distribute Start.");
rtError_t rt_ret = rtEventRecord(event_, stream_);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtEventRecord fail ret:0x%X, when EventRecordTaskInfo %s", rt_ret, __FUNCTION__);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}


+ 7
- 0
ge/graph/load/model_manager/task_info/event_wait_task_info.cc View File

@@ -23,6 +23,7 @@ namespace ge {
Status EventWaitTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davinci_model) {
GELOGI("EventWaitTaskInfo Init Start.");
if (davinci_model == nullptr) {
REPORT_INNER_ERROR("E19999", "Check param davinci_model nullptr when EventWaitTaskInfo %s", __FUNCTION__);
GELOGE(PARAM_INVALID, "davinci_model is null!");
return PARAM_INVALID;
}
@@ -34,6 +35,8 @@ Status EventWaitTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davi

const auto &eventList = davinci_model->GetEventList();
if (task_def.event_id() >= eventList.size()) {
REPORT_INNER_ERROR("E19999", "Task event_id:%u > model event size:%zu, check invalid when EventWaitTaskInfo %s",
task_def.event_id(), eventList.size(), __FUNCTION__);
GELOGE(INTERNAL_ERROR, "event list size:%zu, cur:%u!", eventList.size(), task_def.event_id());
return INTERNAL_ERROR;
}
@@ -48,12 +51,16 @@ Status EventWaitTaskInfo::Distribute() {
GELOGI("EventWaitTaskInfo Distribute Start.");
rtError_t rt_ret = rtStreamWaitEvent(stream_, event_);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtStreamWaitEvent fail ret:0x%X, when EventWaitTaskInfo %s",
rt_ret, __FUNCTION__);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}

rt_ret = rtEventReset(event_, stream_);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtEventReset fail ret:0x%X, when EventWaitTaskInfo %s",
rt_ret, __FUNCTION__);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}


+ 3
- 0
ge/graph/load/model_manager/task_info/fusion_start_task_info.cc View File

@@ -23,6 +23,7 @@ namespace ge {
Status FusionStartTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davinci_model) {
GELOGI("FusionStartTaskInfo Init Start.");
if (davinci_model == nullptr) {
REPORT_INNER_ERROR("E19999", "Check param davinci_model nullptr when FusionStartTaskInfo %s", __FUNCTION__);
GELOGE(PARAM_INVALID, "davinci_model is null!");
return PARAM_INVALID;
}
@@ -39,6 +40,8 @@ Status FusionStartTaskInfo::Distribute() {
GELOGI("FusionStartTaskInfo Distribute Start.");
rtError_t rt_ret = rtKernelFusionStart(stream_);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtKernelFusionStart fail ret:0x%X, when FusionStartTaskInfo %s",
rt_ret, __FUNCTION__);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}


+ 3
- 0
ge/graph/load/model_manager/task_info/fusion_stop_task_info.cc View File

@@ -23,6 +23,7 @@ namespace ge {
Status FusionStopTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davinci_model) {
GELOGI("FusionStopTaskInfo Init Start.");
if (davinci_model == nullptr) {
REPORT_INNER_ERROR("E19999", "Check param davinci_model nullptr when FusionStopTaskInfo %s", __FUNCTION__);
GELOGE(PARAM_INVALID, "davinci_model is null!");
return PARAM_INVALID;
}
@@ -39,6 +40,8 @@ Status FusionStopTaskInfo::Distribute() {
GELOGI("FusionStopTaskInfo Distribute Start.");
rtError_t rt_ret = rtKernelFusionEnd(stream_);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtKernelFusionEnd fail ret:0x%X, when FusionStopTaskInfo %s",
rt_ret, __FUNCTION__);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}


+ 20
- 0
ge/graph/load/model_manager/task_info/hccl_task_info.cc View File

@@ -30,6 +30,7 @@ HcclTaskInfo::~HcclTaskInfo() {
if (private_def_ != nullptr) {
rtError_t ret = rtFreeHost(private_def_);
if (ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtFreeHost fail ret:0x%X, when HcclTaskInfo %s", ret, __FUNCTION__);
GELOGE(RT_FAILED, "Call rtFree Fail, ret = 0x%X.", ret);
}
private_def_ = nullptr;
@@ -41,6 +42,7 @@ HcclTaskInfo::~HcclTaskInfo() {
Status HcclTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davinci_model) {
GELOGI("HcclTaskInfo Init Start.");
if (davinci_model == nullptr) {
REPORT_INNER_ERROR("E19999", "Check param davinci_model nullptr when HcclTaskInfo %s", __FUNCTION__);
GELOGE(PARAM_INVALID, "davinci_model is null!");
return PARAM_INVALID;
}
@@ -67,22 +69,30 @@ Status HcclTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davinci_m
// Only in Horovod scenario should get the inputName and GeShape
ret = HcomOmeUtil::GetHorovodInputs(op_desc, kernel_hccl_infos_);
if (ret != SUCCESS) {
REPORT_CALL_ERROR("E19999", "Call GetHorovodInputs fail for op:%s(%s), when HcclTaskInfo %s",
op_desc->GetName().c_str(), op_desc->GetType().c_str(), __FUNCTION__);
GELOGE(ret, "davinci_model: GetHorovodInputs fail! domi error: %u", ret);
return ret;
}
Status dmrt = HcomOmeUtil::GetHcclDataType(op_desc, kernel_hccl_infos_);
if (dmrt != SUCCESS) {
REPORT_CALL_ERROR("E19999", "Call GetHcclDataType fail for op:%s(%s), when HcclTaskInfo %s",
op_desc->GetName().c_str(), op_desc->GetType().c_str(), __FUNCTION__);
GELOGE(dmrt, "davinci_model: GetHcomDataType fail! domi error: %u", dmrt);
return dmrt;
}
dmrt = HcomOmeUtil::GetHcclCount(op_desc, kernel_hccl_infos_);
if (dmrt != SUCCESS) {
REPORT_CALL_ERROR("E19999", "Call GetHcclCount fail for op:%s(%s), when HcclTaskInfo %s",
op_desc->GetName().c_str(), op_desc->GetType().c_str(), __FUNCTION__);
GELOGE(dmrt, "davinci_model: GetHcomCount fail! domi error: %u", dmrt);
return dmrt;
}
// Only HCOMBROADCAST and HVDCALLBACKBROADCAST need to get the rootId
dmrt = HcomOmeUtil::GetAllRootId(op_desc, kernel_hccl_infos_);
if (dmrt != SUCCESS) {
REPORT_CALL_ERROR("E19999", "Call GetAllRootId fail for op:%s(%s), when HcclTaskInfo %s",
op_desc->GetName().c_str(), op_desc->GetType().c_str(), __FUNCTION__);
GELOGE(dmrt, "davinci_model: Get rootId fail! domi error: %u", dmrt);
return dmrt;
}
@@ -169,12 +179,16 @@ Status HcclTaskInfo::CreateStream(int64_t stream_num, DavinciModel *davinci_mode
rtError_t rt_ret =
rtStreamCreateWithFlags(&stream, davinci_model->Priority(), RT_STREAM_PERSISTENT | RT_STREAM_FORCE_COPY);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtStreamCreateWithFlags fail ret:0x%X, stream_idx:%ld, stream_num:%ld, "
"when HcclTaskInfo %s", rt_ret, i, stream_num, __FUNCTION__);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
// Create slave stream, inactive by default, activated by hccl
rt_ret = rtModelBindStream(davinci_model->GetRtModelHandle(), stream, RT_MODEL_WAIT_ACTIVE_STREAM);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtModelBindStream fail ret:0x%X, stream_idx:%ld, stream_num:%ld, "
"when HcclTaskInfo %s", rt_ret, i, stream_num, __FUNCTION__);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
(void)rtStreamDestroy(stream);
return RT_ERROR_TO_GE_STATUS(rt_ret);
@@ -192,6 +206,7 @@ Status HcclTaskInfo::CreateStream(int64_t stream_num, DavinciModel *davinci_mode
Status HcclTaskInfo::Distribute() {
GELOGI("HcclTaskInfo Distribute Start. begin to call function LoadTask in hccl.");
if (ops_kernel_store_ == nullptr) {
REPORT_INNER_ERROR("E19999", "Check param ops_kernel_store_ nullptr when HcclTaskInfo %s", __FUNCTION__);
GELOGE(INTERNAL_ERROR, "ops kernel store is null.");
return INTERNAL_ERROR;
}
@@ -201,6 +216,7 @@ Status HcclTaskInfo::Distribute() {
TransToGETaskInfo(ge_task);
auto result = ops_kernel_info_store->LoadTask(ge_task);
if (result != HCCL_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Call ops_kernel_info_store LoadTask fail when HcclTaskInfo %s", __FUNCTION__);
GELOGE(INTERNAL_ERROR, "davinci_model : load task fail, return ret: %u", result);
return INTERNAL_ERROR;
}
@@ -316,6 +332,8 @@ void HcclTaskInfo::GetPrivateDefByTaskDef(const domi::TaskDef &task) {
private_def_len_ = private_def_temp.size();
rtError_t ret = rtMallocHost(&private_def_, private_def_len_);
if (ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMallocHost fail ret:0x%X, size:%u, when HcclTaskInfo %s",
ret, private_def_len_, __FUNCTION__);
GELOGE(RT_FAILED, "Call rtMallocHost Fail, ret = 0x%X.", ret);
return;
}
@@ -323,6 +341,8 @@ void HcclTaskInfo::GetPrivateDefByTaskDef(const domi::TaskDef &task) {
ret = rtMemcpy(private_def_, private_def_len_, task.private_def().c_str(), private_def_len_,
RT_MEMCPY_HOST_TO_HOST);
if (ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMemcpy fail ret:0x%X, size:%u, when HcclTaskInfo %s",
ret, private_def_len_, __FUNCTION__);
GELOGE(RT_FAILED, "Call rtMemcpy Fail, ret = 0x%X.", ret);
return;
}


+ 67
- 7
ge/graph/load/model_manager/task_info/kernel_ex_task_info.cc View File

@@ -75,11 +75,15 @@ Status KernelExTaskInfo::InitTaskExtInfo(const std::string &ext_info, const OpDe
}
auto rt_ret = rtMalloc(&ext_info_addr_, ext_handle->GetExtInfoLen(), RT_MEMORY_HBM);
GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE,
REPORT_CALL_ERROR("E19999", "Call rtMalloc fail, size:%zu, ret:0x%X, when KernelExTaskInfo %s",
ext_info.size(), rt_ret, __FUNCTION__);
GELOGE(RT_FAILED, "rtMalloc ext_info error: 0x%X, size=%zu", rt_ret, ext_info.size());
return RT_ERROR_TO_GE_STATUS(rt_ret);)
rt_ret = rtMemcpy(ext_info_addr_, ext_handle->GetExtInfoLen(), ext_handle->GetExtInfo(),
ext_handle->GetExtInfoLen(), RT_MEMCPY_HOST_TO_DEVICE);
GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE,
REPORT_CALL_ERROR("E19999", "Call rtMemcpy fail, size:%zu, ret:0x%X, when KernelExTaskInfo %s",
ext_handle->GetExtInfoLen(), rt_ret, __FUNCTION__);
GELOGE(RT_FAILED, "rtMemcpy ext_info error: 0x%X, size=%zu", rt_ret, ext_info.size());
return RT_ERROR_TO_GE_STATUS(rt_ret);)
return SUCCESS;
@@ -101,6 +105,8 @@ Status KernelExTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davin
uint32_t op_index = kernel_ex_def.op_index();
OpDescPtr op_desc = davinci_model_->GetOpByIndex(op_index);
if (op_desc == nullptr) {
REPORT_INNER_ERROR("E19999", "Can't get op_desc from davinci_model by index:%u when KernelExTaskInfo %s",
op_index, __FUNCTION__);
GELOGE(INTERNAL_ERROR, "Init aicpu task info error, index is out of range!");
return INTERNAL_ERROR;
}
@@ -108,6 +114,9 @@ Status KernelExTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davin
// 2. Reconstruct kernelExDef.args to STR_FWK_OP_KERNEL
STR_FWK_OP_KERNEL fwk_op_kernel = {0};
if (sizeof(STR_FWK_OP_KERNEL) < kernel_ex_def.args_size()) {
REPORT_INNER_ERROR("E19999", "Param kernel_ex_def.args_size():%u > sizeof(STR_FWK_OP_KERNEL):%zu, "
"check invalid when KernelExTaskInfo %s", kernel_ex_def.args_size(), sizeof(STR_FWK_OP_KERNEL),
__FUNCTION__);
GELOGE(FAILED, "sizeof STR_FWK_OP_KERNEL is: %zu, but args_size is: %u", sizeof(STR_FWK_OP_KERNEL),
kernel_ex_def.args_size());
return FAILED;
@@ -115,6 +124,8 @@ Status KernelExTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davin
errno_t sec_ret =
memcpy_s(&fwk_op_kernel, sizeof(STR_FWK_OP_KERNEL), kernel_ex_def.args().data(), kernel_ex_def.args_size());
if (sec_ret != EOK) {
REPORT_CALL_ERROR("E19999", "Call memcpy_s fail, size:%zu, ret:0x%X, when KernelExTaskInfo %s",
sizeof(STR_FWK_OP_KERNEL), sec_ret, __FUNCTION__);
GELOGE(FAILED, "memcpy failed, ret: %d", sec_ret);
return FAILED;
}
@@ -136,12 +147,17 @@ Status KernelExTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davin
uint64_t kernel_id = fwk_op_kernel.fwkKernelBase.fwk_kernel.kernelID;
GE_IF_BOOL_EXEC(ModelManager::GetInstance()->CreateAicpuKernel(session_id, davinci_model->Id(),
davinci_model->SubModelId(), kernel_id) != SUCCESS,
REPORT_CALL_ERROR("E19999", "CreateAicpuKernel fail, session_id:%lu, model_id:%u, kernel_id:%lu "
"when KernelExTaskInfo %s",
session_id, davinci_model->Id(), kernel_id, __FUNCTION__);
GELOGE(FAILED, "CreateAicpuKernel error.");
return FAILED;)
// 2.3 Create session
GE_CHECK_NOTNULL(ModelManager::GetInstance());
ret = ModelManager::GetInstance()->CreateAicpuSession(session_id);
GE_IF_BOOL_EXEC(ret != SUCCESS,
REPORT_CALL_ERROR("E19999", "CreateAicpuSession fail, session_id:%lu when KernelExTaskInfo %s",
session_id, __FUNCTION__);
GELOGE(ret, "CreateAicpuSession error. session id: %lu", session_id);
return ret;)

@@ -152,7 +168,10 @@ Status KernelExTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davin
static_cast<uint64_t>(reinterpret_cast<uintptr_t>(input_output_addr));
void *workspace_base_addr = nullptr;
rtError_t rt_ret = rtMalloc(&workspace_base_addr, kernel_ex_def.task_info_size(), RT_MEMORY_HBM);
GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE, GELOGE(RT_FAILED, "rtMalloc error, ret: Ox%X", rt_ret);
GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE,
REPORT_CALL_ERROR("E19999", "Call rtMalloc fail, size:%u, ret:0x%X, when KernelExTaskInfo %s",
kernel_ex_def.task_info_size(), rt_ret, __FUNCTION__);
GELOGE(RT_FAILED, "rtMalloc error, ret: Ox%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret););
rt_ret = rtMemcpy(workspace_base_addr, kernel_ex_def.task_info_size(), kernel_ex_def.task_info().data(),
kernel_ex_def.task_info_size(), RT_MEMCPY_HOST_TO_DEVICE);
@@ -163,12 +182,18 @@ Status KernelExTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davin
fwk_op_kernel.fwkKernelBase.fwk_kernel.extInfoAddr = reinterpret_cast<uintptr_t>(ext_info_addr_);

rt_ret = rtMalloc(&kernel_buf_, kernel_buf_size_, RT_MEMORY_HBM);
GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE, GELOGE(RT_FAILED, "rtMalloc error: 0x%X", rt_ret);
GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE,
REPORT_CALL_ERROR("E19999", "Call rtMalloc fail ret:0x%X, size:%u, when KernelExTaskInfo %s",
rt_ret, kernel_buf_size_, __FUNCTION__);
GELOGE(RT_FAILED, "rtMalloc error: 0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);)

rt_ret = rtMemcpy(kernel_buf_, kernel_buf_size_, static_cast<void *>(&fwk_op_kernel), kernel_buf_size_,
RT_MEMCPY_HOST_TO_DEVICE);
GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE, GELOGE(RT_FAILED, "rtMemcpy error, ret: Ox%X", rt_ret);
GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE,
REPORT_CALL_ERROR("E19999", "Call rtMemcpy fail ret:0x%X, size:%u, when KernelExTaskInfo %s",
rt_ret, kernel_buf_size_, __FUNCTION__);
GELOGE(RT_FAILED, "rtMemcpy error, ret: Ox%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);)

SetIoAddrs(op_desc);
@@ -186,6 +211,8 @@ Status KernelExTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davin

const vector<void *> workspace_data_addrs = ModelUtils::GetWorkspaceDataAddrs(rts_param, op_desc);
if (workspace_data_addrs.empty()) {
REPORT_CALL_ERROR("E19999", "workspace_data_addrs is empty in op:%s(%s), check invalid when KernelExTaskInfo %s",
op_desc->GetName().c_str(), op_desc->GetType().c_str(), __FUNCTION__);
GELOGE(FAILED, "workspace_data_addrs is empty.");
return FAILED;
}
@@ -200,11 +227,17 @@ Status KernelExTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davin
auto addrs_size = sizeof(uint64_t) * (io_addrs.size());
if (addrs_size > 0) {
rtError_t rt_ret = rtMalloc(&input_output_addr_, addrs_size, RT_MEMORY_HBM);
GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE, GELOGE(RT_FAILED, "rtMalloc error, ret: 0x%X", rt_ret);
GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE,
REPORT_CALL_ERROR("E19999", "Call rtMalloc fail ret:0x%X, size:%lu, when KernelExTaskInfo %s",
rt_ret, addrs_size, __FUNCTION__);
GELOGE(RT_FAILED, "rtMalloc error, ret: 0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);)

rt_ret = rtMemcpy(input_output_addr_, addrs_size, io_addrs.data(), addrs_size, RT_MEMCPY_HOST_TO_DEVICE);
GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE, GELOGE(RT_FAILED, "rtMemcpy to input_output_addr_ error: 0x%X", rt_ret);
GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE,
REPORT_CALL_ERROR("E19999", "Call rtMemcpy fail ret:0x%X, size:%lu, when KernelExTaskInfo %s",
rt_ret, addrs_size, __FUNCTION__);
GELOGE(RT_FAILED, "rtMemcpy to input_output_addr_ error: 0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);)

InitDumpTask(input_output_addr_, op_desc);
@@ -223,12 +256,18 @@ Status KernelExTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davin

// 4. Return result
rtError_t rt_ret = rtMalloc(&kernel_buf_, sizeof(STR_FWK_OP_KERNEL), RT_MEMORY_HBM);
GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE, GELOGE(RT_FAILED, "rtMalloc error: 0x%X", rt_ret);
GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE,
REPORT_CALL_ERROR("E19999", "Call rtMalloc fail ret:0x%X, size:%zu, when KernelExTaskInfo %s",
rt_ret, sizeof(STR_FWK_OP_KERNEL), __FUNCTION__);
GELOGE(RT_FAILED, "rtMalloc error: 0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);)

rt_ret = rtMemcpy(kernel_buf_, sizeof(STR_FWK_OP_KERNEL), static_cast<void *>(&fwk_op_kernel),
sizeof(STR_FWK_OP_KERNEL), RT_MEMCPY_HOST_TO_DEVICE);
GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE, GELOGE(RT_FAILED, "rtMemcpy error, ret: Ox%X", rt_ret);
GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE,
REPORT_CALL_ERROR("E19999", "Call rtMemcpy fail ret:0x%X, size:%zu, when KernelExTaskInfo %s",
rt_ret, sizeof(STR_FWK_OP_KERNEL), __FUNCTION__);
GELOGE(RT_FAILED, "rtMemcpy error, ret: Ox%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);)

davinci_model_->SetZeroCopyAddr(op_desc, io_addrs, io_addrs.data(), input_output_addr_, addrs_size, 0);
@@ -250,6 +289,8 @@ Status KernelExTaskInfo::CalculateArgs(const domi::TaskDef &task_def, DavinciMod
uint32_t op_index = kernel_ex_def.op_index();
OpDescPtr op_desc = davinci_model->GetOpByIndex(op_index);
if (op_desc == nullptr) {
REPORT_INNER_ERROR("E19999", "Can't get op_desc from davinci_model by index:%u when KernelExTaskInfo %s",
op_index, __FUNCTION__);
GELOGE(INTERNAL_ERROR, "Init aicpu task info error, index is out of range!");
return INTERNAL_ERROR;
}
@@ -267,6 +308,9 @@ Status KernelExTaskInfo::CalculateArgs(const domi::TaskDef &task_def, DavinciMod
if (AttrUtils::GetStr(op_desc, ATTR_DYNAMIC_SHAPE_FIXED_ADDR, peer_input_name) && !peer_input_name.empty()) {
uint32_t output_index = davinci_model->GetFixedAddrOutputIndex(peer_input_name);
if (output_index > outputs_size) {
REPORT_INNER_ERROR("E19999", "The output size[%zu] and output index[%u] in op:%s(%s) are inconsistent, "
"check invalid when KernelExTaskInfo %s", outputs_size, output_index,
op_desc->GetName().c_str(), op_desc->GetType().c_str(), __FUNCTION__);
GELOGE(FAILED, "The output size[%zu] and output index[%u] are inconsistent.", outputs_size, output_index);
return FAILED;
}
@@ -293,6 +337,9 @@ void KernelExTaskInfo::SetIoAddrs(const OpDescPtr &op_desc) {
if (AttrUtils::GetStr(op_desc, ATTR_DYNAMIC_SHAPE_FIXED_ADDR, peer_input_name)) {
uint32_t output_index = davinci_model_->GetFixedAddrOutputIndex(peer_input_name);
if (output_index > output_data_addrs.size()) {
REPORT_INNER_ERROR("E19999", "The output data addr size[%zu] and output index[%u] in op:%s(%s) are inconsistent"
", check invalid when KernelExTaskInfo %s", output_data_addrs.size(), output_index,
op_desc->GetName().c_str(), op_desc->GetType().c_str(), __FUNCTION__);
GELOGE(FAILED, "The output data addr size[%zu] and output index[%u] are inconsistent.",
output_data_addrs.size(), output_index);
return;
@@ -323,17 +370,25 @@ Status KernelExTaskInfo::CopyTaskInfo(const domi::KernelExDef &kernel_def, const
const vector<int64_t> workspace_data_sizes = ModelUtils::GetWorkspaceSize(op_desc);
const vector<void *> workspace_data_addrs = ModelUtils::GetWorkspaceDataAddrs(rts_param, op_desc);
if (workspace_data_addrs.empty() || workspace_data_sizes.empty()) {
REPORT_INNER_ERROR("E19999", "Node:%s(%s) workspace addr:%zu or size:%zu empty, check invalid "
"when KernelExTaskInfo %s", op_desc->GetName().c_str(), op_desc->GetType().c_str(),
workspace_data_addrs.size(), workspace_data_sizes.size(), __FUNCTION__);
GELOGE(FAILED, "Node:%s invalid workspace, addrs is %zu, size is %zu.", op_desc->GetName().c_str(),
workspace_data_addrs.size(), workspace_data_sizes.size());
return FAILED;
}

if (workspace_data_addrs[0] == nullptr) {
REPORT_INNER_ERROR("E19999", "Node:%s(%s) workspace addr is nullptr, check invalid when KernelExTaskInfo %s",
op_desc->GetName().c_str(), op_desc->GetType().c_str(), __FUNCTION__);
GELOGE(FAILED, "Node:%s workspace addrs is null.", op_desc->GetName().c_str());
return FAILED;
}

if (workspace_data_sizes[0] < static_cast<int64_t>(kernel_def.task_info_size())) {
REPORT_INNER_ERROR("E19999", "Node:%s(%s) workspace size:%ld < task info size:%d, check invalid "
"when KernelExTaskInfo %s", op_desc->GetName().c_str(), op_desc->GetType().c_str(),
workspace_data_sizes[0], kernel_def.task_info_size(), __FUNCTION__);
GELOGE(FAILED, "Node:%s workspace size is %ld, task info size is %d.", op_desc->GetName().c_str(),
workspace_data_sizes[0], kernel_def.task_info_size());
return FAILED;
@@ -342,6 +397,8 @@ Status KernelExTaskInfo::CopyTaskInfo(const domi::KernelExDef &kernel_def, const
rtError_t rt_ret = rtMemcpy(workspace_data_addrs[0], kernel_def.task_info_size(), kernel_def.task_info().data(),
kernel_def.task_info_size(), RT_MEMCPY_HOST_TO_DEVICE);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMemcpy fail ret:0x%X, size:%d, when KernelExTaskInfo %s",
rt_ret, kernel_def.task_info_size(), __FUNCTION__);
GELOGE(RT_FAILED, "rtMemcpy error: 0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
@@ -353,11 +410,13 @@ Status KernelExTaskInfo::Distribute() {
GELOGI("KernelExTaskInfo Distribute Start.");
rtError_t rt_ret = rtKernelLaunchEx(kernel_buf_, kernel_buf_size_, dump_flag_, stream_);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtKernelLaunchEx fail ret:0x%X when KernelExTaskInfo %s", rt_ret, __FUNCTION__);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}

if (davinci_model_ == nullptr) {
REPORT_INNER_ERROR("E19999", "Check param davinci_model nullptr when KernelExTaskInfo %s", __FUNCTION__);
GELOGE(PARAM_INVALID, "davinci_model_ is null.");
return PARAM_INVALID;
}
@@ -366,6 +425,7 @@ Status KernelExTaskInfo::Distribute() {
uint32_t stream_id = 0; // for profiling
rt_ret = rtModelGetTaskId(davinci_model_->GetRtModelHandle(), &task_id, &stream_id);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtModelGetTaskId fail ret:0x%X when KernelExTaskInfo %s", rt_ret, __FUNCTION__);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}


+ 155
- 2
ge/graph/load/model_manager/task_info/kernel_task_info.cc View File

@@ -93,8 +93,13 @@ Status KernelTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davinci
// new aicpu kernel(rtCpuKernelLaunch) no need to check function
if (kernel_type_ == ccKernelType::CCE_AI_CORE) {
rtError_t rt_ret = rtGetFunctionByName(const_cast<char *>(kernel_def.stub_func().c_str()), &stub_func_);
GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE, GELOGE(RT_FAILED, "execute rtGetFunctionByName failed. stub_func: %s",
kernel_def.stub_func().c_str());
GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE,
REPORT_CALL_ERROR("E19999", "Call rtGetFunctionByName fail for op:%s(%s), "
"bin_file_key:%s, ret:0x%X, when KernelTaskInfo %s",
op_desc_->GetName().c_str(), op_desc_->GetType().c_str(),
kernel_def.stub_func().c_str(), rt_ret, __FUNCTION__);
GELOGE(RT_FAILED, "execute rtGetFunctionByName failed. stub_func: %s",
kernel_def.stub_func().c_str());
return RT_ERROR_TO_GE_STATUS(rt_ret););
} else if (kernel_type_ == ccKernelType::TE) {
// get bin_file_key
@@ -103,11 +108,18 @@ Status KernelTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davinci
const char *bin_file_key = davinci_model_->GetRegisterStub(op_desc_->GetName(), session_graph_model_id);
rtError_t rt_ret = rtGetFunctionByName(bin_file_key, &stub_func_);
GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE,
REPORT_CALL_ERROR("E19999", "Call rtGetFunctionByName fail for op:%s(%s), "
"bin_file_key:%s, ret:0x%X, when KernelTaskInfo %s",
op_desc_->GetName().c_str(), op_desc_->GetType().c_str(),
bin_file_key, rt_ret, __FUNCTION__);
GELOGE(RT_FAILED, "execute rtGetFunctionByName failed. bin_file_key: %s", bin_file_key);
return RT_ERROR_TO_GE_STATUS(rt_ret););
}

if (context.origin_op_index_size() > CC_FUSION_OP_MAX) {
REPORT_INNER_ERROR("E19999", "context.origin_op_index_size():%d is more than CC_FUSION_OP_MAX(%d), op:%s(%s) ,"
"check invalid when KernelTaskInfo %s", context.origin_op_index_size(), CC_FUSION_OP_MAX,
op_desc_->GetName().c_str(), op_desc_->GetType().c_str(), __FUNCTION__);
GELOGE(PARAM_INVALID, "context.origin_op_index_size() is more than CC_FUSION_OP_MAX(%d)", CC_FUSION_OP_MAX);
return PARAM_INVALID;
}
@@ -120,6 +132,9 @@ Status KernelTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davinci
ctx_.opIndex = context.op_index();
uint16_t *args_offset_tmp = reinterpret_cast<uint16_t *>(const_cast<char *>(context.args_offset().data()));
if (context.args_offset().size() / sizeof(uint16_t) < 1) {
REPORT_INNER_ERROR("E19999", "context.args_offset().size():%zu / sizeof(uint16_t) less than 1, op:%s(%s) ,"
"check invalid when KernelTaskInfo %s", context.args_offset().size(),
op_desc_->GetName().c_str(), op_desc_->GetType().c_str(), __FUNCTION__);
GELOGE(FAILED, "context.args_offset().size() / sizeof(uint16_t) less than 1");
return FAILED;
}
@@ -132,6 +147,8 @@ Status KernelTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davinci
ret = InitAicpuTask(context.op_index(), kernel_def);
} else {
if (kernel_def.args().empty() || args_size_ == 0) {
REPORT_INNER_ERROR("E19999", "kernel_def.args() is empty, op:%s(%s), check invalid when KernelTaskInfo %s",
op_desc_->GetName().c_str(), op_desc_->GetType().c_str(), __FUNCTION__);
GELOGE(FAILED, "args is null.");
return FAILED;
}
@@ -164,6 +181,8 @@ void KernelTaskInfo::UpdateSKTTaskId() {
if (davinci_model_ != nullptr) {
rtError_t rt_ret = rtModelGetTaskId(davinci_model_->GetRtModelHandle(), &task_id, &stream_id);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtModelGetTaskId fail, ret:0x%X, when KernelTaskInfo %s",
rt_ret, __FUNCTION__);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
return;
}
@@ -182,6 +201,8 @@ void KernelTaskInfo::UpdateTaskId() {
if (davinci_model_ != nullptr) {
rtError_t rt_ret = rtModelGetTaskId(davinci_model_->GetRtModelHandle(), &task_id, &stream_id);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtModelGetTaskId fail, ret:0x%X, when KernelTaskInfo %s",
rt_ret, __FUNCTION__);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
return;
}
@@ -237,6 +258,8 @@ Status KernelTaskInfo::SuperKernelLaunch() {
static_cast<rtSmDesc_t *>(skt_info.last_sm_desc), skt_info.last_stream,
skt_info.last_dump_flag);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtKernelLaunchWithFlag fail, ret:0x%X, when KernelTaskInfo %s",
rt_ret, __FUNCTION__);
GELOGE(RT_FAILED, "SuperKernelLaunch: Call rt api failed, ret: 0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
@@ -249,6 +272,8 @@ Status KernelTaskInfo::SuperKernelLaunch() {
// Init super kernel factory
Status ge_ret = factory->Init();
if (ge_ret != SUCCESS) {
REPORT_CALL_ERROR("E19999", "Call SuperKernelFactory init fail, ret:0x%X, when KernelTaskInfo %s",
ge_ret, __FUNCTION__);
GELOGE(ge_ret, "SuperKernelLaunch: SuperKernelFactory init failed");
return ge_ret;
}
@@ -256,6 +281,8 @@ Status KernelTaskInfo::SuperKernelLaunch() {
std::unique_ptr<skt::SuperKernel> superKernel = nullptr;
ge_ret = factory->FuseKernels(skt_kernel_list, skt_arg_list, skt_info.last_block_dim, superKernel);
if (ge_ret != SUCCESS) {
REPORT_CALL_ERROR("E19999", "Call SuperKernelFactory FuseKernels fail, ret:0x%X, when KernelTaskInfo %s",
ge_ret, __FUNCTION__);
GELOGE(ge_ret, "SuperKernelLaunch: fuse call failed");
return ge_ret;
}
@@ -263,6 +290,8 @@ Status KernelTaskInfo::SuperKernelLaunch() {
skt_dump_flag_ = GetDumpFlag();
ge_ret = superKernel->Launch(skt_info.last_stream, skt_dump_flag_);
if (ge_ret != SUCCESS) {
REPORT_CALL_ERROR("E19999", "Call SuperKernelFactory Launch fail, ret:0x%X, when KernelTaskInfo %s",
ge_ret, __FUNCTION__);
GELOGE(ge_ret, "SuperKernelLaunch: launch failed");
return ge_ret;
}
@@ -301,11 +330,14 @@ Status KernelTaskInfo::SaveSuperKernelInfo() {

bool KernelTaskInfo::IsMarkedLastNode() {
if (davinci_model_ == nullptr) {
REPORT_INNER_ERROR("E19999", "Check param davinci_model nullptr when KernelTaskInfo %s", __FUNCTION__);
GELOGE(PARAM_INVALID, "davinci_model is null!");
return false;
}
OpDescPtr op_desc = davinci_model_->GetOpByIndex(ctx_.opIndex);
if (op_desc == nullptr) {
REPORT_INNER_ERROR("E19999", "Can't get op_desc from davinci_model by index:%u when KernelTaskInfo %s",
ctx_.opIndex, __FUNCTION__);
GELOGE(INTERNAL_ERROR, "InitTVMTaskInfo error, index is out of range!");
return false;
}
@@ -316,11 +348,14 @@ bool KernelTaskInfo::IsMarkedLastNode() {

bool KernelTaskInfo::IsMarkedFirstNode() {
if (davinci_model_ == nullptr) {
REPORT_INNER_ERROR("E19999", "Check param davinci_model nullptr when KernelTaskInfo %s", __FUNCTION__);
GELOGE(PARAM_INVALID, "davinci_model is null!");
return false;
}
OpDescPtr op_desc = davinci_model_->GetOpByIndex(ctx_.opIndex);
if (op_desc == nullptr) {
REPORT_INNER_ERROR("E19999", "Can't get op_desc from davinci_model by index:%u when KernelTaskInfo %s",
ctx_.opIndex, __FUNCTION__);
GELOGE(INTERNAL_ERROR, "InitTVMTaskInfo error, index is out of range!");
return false;
}
@@ -361,6 +396,8 @@ Status KernelTaskInfo::SuperKernelDistribute() {
rtError_t rt_ret = rtKernelLaunchWithFlag(stub_func_, block_dim_, args_, args_size_,
static_cast<rtSmDesc_t *>(sm_desc_), stream_, dump_flag_);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtKernelLaunchWithFlag fail, ret:0x%X, when KernelTaskInfo %s",
rt_ret, __FUNCTION__);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
return rt_ret;
}
@@ -425,6 +462,8 @@ Status KernelTaskInfo::Distribute() {
}
}
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtKernelLaunchWithFlag or rtCpuKernelLaunchWithFlag fail, "
"ret:0x%X, when KernelTaskInfo %s", rt_ret, __FUNCTION__);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
@@ -459,6 +498,8 @@ Status KernelTaskInfo::CopyNoncontinuousArgs(uint16_t offset) {
// copy io addr
errno_t sec_ret = memcpy_s(args_addr.get() + offset, addr_size, io_addrs_.data(), addr_size);
if (sec_ret != EOK) {
REPORT_CALL_ERROR("E19999", "Call memcpy_s fail, size:%zu, ret:0x%X, when KernelTaskInfo %s",
addr_size, sec_ret, __FUNCTION__);
GELOGE(FAILED, "memcpy failed, ret: %d", sec_ret);
return FAILED;
}
@@ -466,6 +507,8 @@ Status KernelTaskInfo::CopyNoncontinuousArgs(uint16_t offset) {
// copy args to device
rtError_t rt_ret = rtMemcpy(args_, args_size_, args_addr.get(), args_size_, RT_MEMCPY_HOST_TO_DEVICE);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMemcpy, size:%u, ret:0x%X, when KernelTaskInfo %s",
args_size_, rt_ret, __FUNCTION__);
GELOGE(RT_FAILED, "Call rt api(rtMemcpy) failed, ret: 0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
@@ -514,6 +557,7 @@ Status KernelTaskInfo::Release() {

ret = (sm_desc_ != nullptr) ? rtMemFreeManaged(sm_desc_) : RT_ERROR_NONE;
if (ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMemFreeManaged fail, ret:0x%X, when KernelTaskInfo %s", ret, __FUNCTION__);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", static_cast<int>(ret));
return RT_ERROR_TO_GE_STATUS(ret);
}
@@ -544,12 +588,16 @@ Status KernelTaskInfo::UpdateL2Data(const domi::KernelDef &kernel_def) {

rtError_t rt_ret = rtMemAllocManaged(&sm_desc_, sm_desc.size(), RT_MEMORY_SPM);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMemAllocManaged fail, ret:0x%X, when KernelTaskInfo %s",
rt_ret, __FUNCTION__);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}

rt_ret = rtMemcpy(sm_desc_, sm_desc.size(), sm_desc.data(), sm_desc.size(), RT_MEMCPY_HOST_TO_DEVICE);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMemcpy fail, size:%zu, ret:0x%X, when KernelTaskInfo %s",
sm_desc.size(), rt_ret, __FUNCTION__);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
@@ -596,6 +644,8 @@ Status KernelTaskInfo::InitTVMTask(uint16_t offset, const domi::KernelDef &kerne
args_addr = std::unique_ptr<uint8_t[]>(new (std::nothrow) uint8_t[args_size_]);
errno_t sec_ret = memcpy_s(args_addr.get(), args_size_, kernel_def.args().data(), args_size_);
if (sec_ret != EOK) {
REPORT_CALL_ERROR("E19999", "Call memcpy_s fail, size:%u, ret:0x%X, when KernelTaskInfo %s",
args_size_, sec_ret, __FUNCTION__);
GELOGE(FAILED, "memcpy failed, ret: %d", sec_ret);
return FAILED;
}
@@ -638,6 +688,8 @@ Status KernelTaskInfo::InitTVMTask(uint16_t offset, const domi::KernelDef &kerne
// malloc args memory
rt_ret = rtMalloc(&args_, args_size_, RT_MEMORY_HBM);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMalloc fail, size:%u, ret:0x%X, when KernelTaskInfo %s",
args_size_, rt_ret, __FUNCTION__);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
@@ -645,11 +697,17 @@ Status KernelTaskInfo::InitTVMTask(uint16_t offset, const domi::KernelDef &kerne
// copy orign args
rt_ret = rtMemcpy(args_, args_size_, kernel_def.args().data(), args_size_, RT_MEMCPY_HOST_TO_DEVICE);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMemcpy fail, size:%u, ret:0x%X, when KernelTaskInfo %s",
args_size_, rt_ret, __FUNCTION__);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}

if ((args_size_ <= offset) || (args_size_ - offset < kAddrLen * tensor_device_addrs.size())) {
REPORT_INNER_ERROR("E19999", "offset:%u >= kernelInfo.argsSize:%u or copy content:%zu beyond applied memory:%u, "
"check invalid, when KernelTaskInfo %s",
offset, args_size_, kAddrLen * tensor_device_addrs.size(), args_size_ - offset,
__FUNCTION__);
GELOGE(FAILED, "offset >= kernelInfo.argsSize or copy content beyond applied memory.");
return FAILED;
}
@@ -658,12 +716,16 @@ Status KernelTaskInfo::InitTVMTask(uint16_t offset, const domi::KernelDef &kerne
rt_ret = rtMemcpy(static_cast<char *>(args_) + offset, args_size_ - offset, tensor_device_addrs.data(),
kAddrLen * tensor_device_addrs.size(), RT_MEMCPY_HOST_TO_DEVICE);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMemcpy fail, size:%u, ret:0x%X, when KernelTaskInfo %s",
args_size_ - offset, rt_ret, __FUNCTION__);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
sec_ret = memcpy_s(args_addr.get() + offset, args_size_ - offset, tensor_device_addrs.data(),
kAddrLen * tensor_device_addrs.size());
if (sec_ret != EOK) {
REPORT_CALL_ERROR("E19999", "Call memcpy_s fail, size:%u, ret:0x%X, when KernelTaskInfo %s",
args_size_ - offset, sec_ret, __FUNCTION__);
GELOGE(FAILED, "memcpy failed, ret: %d", sec_ret);
return FAILED;
}
@@ -708,6 +770,8 @@ Status KernelTaskInfo::InitAICPUCustomTask(uint32_t op_index, const domi::Kernel
GELOGI("Do InitAICPUCustomTask");
OpDescPtr op_desc = davinci_model_->GetOpByIndex(op_index);
if (op_desc == nullptr) {
REPORT_INNER_ERROR("E19999", "Can't get op_desc from davinci_model by index:%u when KernelTaskInfo %s",
op_index, __FUNCTION__);
GELOGE(INTERNAL_ERROR, "index is out of range, index: %u", op_index);
return INTERNAL_ERROR;
}
@@ -718,11 +782,17 @@ Status KernelTaskInfo::InitAICPUCustomTask(uint32_t op_index, const domi::Kernel
const uint32_t kCustomAicpuArgsLen = 5;
ctx_.argsOffset = new (std::nothrow) uint16_t[kCustomAicpuArgsLen]();
if (ctx_.argsOffset == nullptr) {
REPORT_INNER_ERROR("E19999", "New ctx_.argsOffset fail, size:%u, op:%s(%s), when KernelTaskInfo %s",
kCustomAicpuArgsLen, op_desc->GetName().c_str(), op_desc->GetType().c_str(), __FUNCTION__);
GELOGE(PARAM_INVALID, "ctx_.argsOffset is null!");
return PARAM_INVALID;
}

if (context.args_offset().size() / sizeof(uint16_t) < kCustomAicpuArgsLen) {
REPORT_INNER_ERROR("E19999", "context.args_offset().size():%zu / sizeof(uint16_t) is less than "
"kCustomAicpuArgsLen:%u, op:%s(%s), check invalid when KernelTaskInfo %s",
context.args_offset().size(), kCustomAicpuArgsLen,
op_desc->GetName().c_str(), op_desc->GetType().c_str(), __FUNCTION__);
GELOGE(PARAM_INVALID, "context.args_offset().size() / sizeof(uint16_t) is less than kCustomAicpuArgsLen");
return PARAM_INVALID;
}
@@ -743,24 +813,32 @@ Status KernelTaskInfo::InitAICPUCustomTask(uint32_t op_index, const domi::Kernel
// attrHandle
Buffer buffer;
if (!AttrUtils::GetBytes(op_desc, ATTR_NAME_OPATTR, buffer)) {
REPORT_INNER_ERROR("E19999", "Get Attr:%s in op:%s(%s) fail when KernelTaskInfo %s", ATTR_NAME_OPATTR.c_str(),
op_desc->GetName().c_str(), op_desc->GetType().c_str(), __FUNCTION__);
GELOGE(FAILED, "can't find opattr bytes!.");
return FAILED;
}

uint32_t op_attr_size = buffer.GetSize();
if (op_attr_size == 0) {
REPORT_INNER_ERROR("E19999", "Attr:%s in op:%s(%s) size is 0, check invalid when KernelTaskInfo %s",
ATTR_NAME_OPATTR.c_str(), op_desc->GetName().c_str(), op_desc->GetType().c_str(), __FUNCTION__);
GELOGE(PARAM_INVALID, "param op_attr_size is out of range");
return PARAM_INVALID;
}

rtError_t rt_ret = rtMalloc(&custom_info_.attr_handle, op_attr_size, RT_MEMORY_HBM);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMalloc fail for op:%s(%s), size:%u, ret:0x%X, when KernelTaskInfo %s",
op_desc->GetName().c_str(), op_desc->GetType().c_str(), op_attr_size, rt_ret, __FUNCTION__);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}

rt_ret = rtMemcpy(custom_info_.attr_handle, op_attr_size, buffer.GetData(), op_attr_size, RT_MEMCPY_HOST_TO_DEVICE);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMemcpy fail for op:%s(%s), size:%u, ret:0x%X, when KernelTaskInfo %s",
op_desc->GetName().c_str(), op_desc->GetType().c_str(), op_attr_size, rt_ret, __FUNCTION__);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
@@ -770,6 +848,10 @@ Status KernelTaskInfo::InitAICPUCustomTask(uint32_t op_index, const domi::Kernel

for (uint32_t i = 0; i < kCustomAicpuArgsLen; ++i) {
if (kernel_def.args().size() < ((size_t)ctx_.argsOffset[i] + sizeof(uint64_t))) {
REPORT_INNER_ERROR("E19999", "ctx.argsOffset[%u]: %u + sizeof(uint64_t): %zu >= kernelDef.args().size():%zu, "
"op:%s(%s) check invalid when KernelTaskInfo %s", i, (uint32_t)ctx_.argsOffset[i],
sizeof(uint64_t), kernel_def.args().size(),
op_desc->GetName().c_str(), op_desc->GetType().c_str(), __FUNCTION__);
GELOGE(FAILED, "ctx.argsOffset[%u]: %u + sizeof(uint64_t): %zu >= kernelDef.args().size():%zu", i,
(uint32_t)ctx_.argsOffset[i], sizeof(uint64_t), kernel_def.args().size());
return FAILED;
@@ -788,6 +870,8 @@ Status KernelTaskInfo::InitAICPUCustomTask(uint32_t op_index, const domi::Kernel

rt_ret = rtMalloc(&args_, args_size_, RT_MEMORY_HBM);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMalloc fail for op:%s(%s), size:%u, ret:0x%X, when KernelTaskInfo %s",
op_desc->GetName().c_str(), op_desc->GetType().c_str(), args_size_, rt_ret, __FUNCTION__);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
@@ -795,6 +879,9 @@ Status KernelTaskInfo::InitAICPUCustomTask(uint32_t op_index, const domi::Kernel
rt_ret = rtMemcpy(args_, kernel_def.args_size(), kernel_def.args().data(), kernel_def.args_size(),
RT_MEMCPY_HOST_TO_DEVICE);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMemcpy fail for op:%s(%s), size:%u, ret:0x%X, when KernelTaskInfo %s",
op_desc->GetName().c_str(), op_desc->GetType().c_str(),
kernel_def.args_size(), rt_ret, __FUNCTION__);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
@@ -809,6 +896,7 @@ Status KernelTaskInfo::InitAICPUCustomTask(uint32_t op_index, const domi::Kernel
Status KernelTaskInfo::InitCceTask(const domi::KernelDef &kernel_def) {
GELOGI("Do InitCCETask");
if (davinci_model_ == nullptr) {
REPORT_INNER_ERROR("E19999", "Check param davinci_model nullptr when KernelTaskInfo %s", __FUNCTION__);
GELOGE(PARAM_INVALID, "davinci_model is null!");
return PARAM_INVALID;
}
@@ -823,6 +911,8 @@ Status KernelTaskInfo::InitCceTask(const domi::KernelDef &kernel_def) {

if (context.is_flowtable()) {
if (flowtable.empty()) {
REPORT_INNER_ERROR("E19999", "kernel_def.flowtable is empty, check invalid when KernelTaskInfo %s",
__FUNCTION__);
GELOGE(FAILED, "flowtable is null.");
return FAILED;
}
@@ -857,6 +947,8 @@ Status KernelTaskInfo::InitCceTask(const domi::KernelDef &kernel_def) {
// args
rtError_t rt_ret = rtMalloc(&args_, kernel_def.args_size(), RT_MEMORY_HBM);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMalloc fail, size:%u, ret:0x%X, when KernelTaskInfo %s",
kernel_def.args_size(), rt_ret, __FUNCTION__);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
@@ -865,6 +957,8 @@ Status KernelTaskInfo::InitCceTask(const domi::KernelDef &kernel_def) {
rt_ret = rtMemcpy(args_, kernel_def.args_size(), kernel_def.args().data(), kernel_def.args_size(),
RT_MEMCPY_HOST_TO_DEVICE);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMemcpy fail, size:%u, ret:0x%X, when KernelTaskInfo %s",
kernel_def.args_size(), rt_ret, __FUNCTION__);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
@@ -873,12 +967,16 @@ Status KernelTaskInfo::InitCceTask(const domi::KernelDef &kernel_def) {
if (!sm_desc.empty()) {
rt_ret = rtMemAllocManaged(&sm_desc_, sm_desc.size(), RT_MEMORY_SPM);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMemAllocManaged fail, ret:0x%X, when KernelTaskInfo %s",
rt_ret, __FUNCTION__);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}

rt_ret = rtMemcpy(sm_desc_, sm_desc.size(), sm_desc.data(), sm_desc.size(), RT_MEMCPY_HOST_TO_DEVICE);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMemcpy fail, size:%zu, ret:0x%X, when KernelTaskInfo %s",
sm_desc.size(), rt_ret, __FUNCTION__);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
@@ -893,6 +991,8 @@ Status KernelTaskInfo::InitAicpuTask(uint32_t op_index, const domi::KernelDef &k

OpDescPtr op_desc = davinci_model_->GetOpByIndex(op_index);
if (op_desc == nullptr) {
REPORT_INNER_ERROR("E19999", "Can't get op_desc from davinci_model by index:%u when KernelTaskInfo %s",
op_index, __FUNCTION__);
GELOGE(INTERNAL_ERROR, "index is out of range, index: %u", op_index);
return INTERNAL_ERROR;
}
@@ -910,6 +1010,8 @@ Status KernelTaskInfo::InitAicpuTask(uint32_t op_index, const domi::KernelDef &k
GE_PRINT_DYNAMIC_MEMORY(new, "cce task physical memory.", sizeof(uint8_t) * args_size_)
errno_t sec_ret = memcpy_s(args_addr.get(), args_size_, kernel_def.args().data(), args_size_);
if (sec_ret != EOK) {
REPORT_CALL_ERROR("E19999", "Call memcpy_s fail, size:%u, ret:0x%X, when KernelTaskInfo %s",
args_size_, sec_ret, __FUNCTION__);
GELOGE(FAILED, "memcpy failed, ret: %d", sec_ret);
return FAILED;
}
@@ -944,6 +1046,8 @@ Status KernelTaskInfo::InitAicpuTask(uint32_t op_index, const domi::KernelDef &k
auto addrs_size = sizeof(uint64_t) * io_addrs.size();
sec_ret = memcpy_s(reinterpret_cast<void *>(io_addr), addrs_size, io_addrs.data(), addrs_size);
if (sec_ret != EOK) {
REPORT_CALL_ERROR("E19999", "Call memcpy_s fail, size:%lu, ret:0x%X, when KernelTaskInfo %s",
addrs_size, sec_ret, __FUNCTION__);
GELOGE(FAILED, "memcpy failed, ret: %d", sec_ret);
return FAILED;
}
@@ -952,6 +1056,8 @@ Status KernelTaskInfo::InitAicpuTask(uint32_t op_index, const domi::KernelDef &k
// malloc device memory for args
rtError_t rt_ret = rtMalloc(static_cast<void **>(&args_), args_size_, RT_MEMORY_HBM);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMalloc fail for op:%s(%s), size:%u, ret:0x%X, when KernelTaskInfo %s",
op_desc->GetName().c_str(), op_desc->GetType().c_str(), args_size_, rt_ret, __FUNCTION__);
GELOGE(RT_FAILED, "Call rt api(rtMalloc) failed, ret: 0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
@@ -960,6 +1066,8 @@ Status KernelTaskInfo::InitAicpuTask(uint32_t op_index, const domi::KernelDef &k
// copy args to device
rt_ret = rtMemcpy(args_, args_size_, args_addr.get(), args_size_, RT_MEMCPY_HOST_TO_DEVICE);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMemcpy fail for op:%s(%s), size:%u, ret:0x%X, when KernelTaskInfo %s",
op_desc->GetName().c_str(), op_desc->GetType().c_str(), args_size_, rt_ret, __FUNCTION__);
GELOGE(RT_FAILED, "Call rt api(rtMemcpy) failed, ret: 0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
@@ -1032,12 +1140,18 @@ Status KernelTaskInfo::InitAicpuTaskExtInfo(const std::string &ext_info) {
}
auto rt_ret = rtMalloc(&aicpu_ext_info_addr_, ext_handle->GetExtInfoLen(), RT_MEMORY_HBM);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMalloc fail for op:%s(%s), size:%zu, ret:0x%X, when KernelTaskInfo %s",
op_desc_->GetName().c_str(), op_desc_->GetType().c_str(),
ext_handle->GetExtInfoLen(), rt_ret, __FUNCTION__);
GELOGE(RT_FAILED, "rtMalloc ext_info error: 0x%X, size=%zu", rt_ret, ext_info.size());
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
rt_ret = rtMemcpy(aicpu_ext_info_addr_, ext_handle->GetExtInfoLen(), ext_handle->GetExtInfo(),
ext_handle->GetExtInfoLen(), RT_MEMCPY_HOST_TO_DEVICE);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMemcpy fail for op:%s(%s), size:%zu, ret:0x%X, when KernelTaskInfo %s",
op_desc_->GetName().c_str(), op_desc_->GetType().c_str(),
ext_handle->GetExtInfoLen(), rt_ret, __FUNCTION__);
GELOGE(RT_FAILED, "rtMemcpy ext_info error: 0x%X, size=%zu", rt_ret, ext_info.size());
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
@@ -1055,6 +1169,8 @@ Status KernelTaskInfo::StoreInputOutputTensor(const std::vector<void *> &input_d
// inputDescs
rtError_t rt_ret = rtMalloc(&custom_info_.input_descs, sizeof(opTensor_t) * input_size, RT_MEMORY_HBM);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMalloc fail, size:%zu, ret:0x%X, when KernelTaskInfo %s",
sizeof(opTensor_t) * input_size, rt_ret, __FUNCTION__);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
@@ -1063,6 +1179,8 @@ Status KernelTaskInfo::StoreInputOutputTensor(const std::vector<void *> &input_d
rt_ret = rtMemcpy(static_cast<opTensor_t *>(custom_info_.input_descs) + i, sizeof(opTensor_t),
const_cast<tagOpTensor *>(&input_descs[i]), sizeof(opTensor_t), RT_MEMCPY_HOST_TO_DEVICE);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMemcpy fail, size:%zu, ret:0x%X, when KernelTaskInfo %s",
sizeof(opTensor_t), rt_ret, __FUNCTION__);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
@@ -1071,6 +1189,8 @@ Status KernelTaskInfo::StoreInputOutputTensor(const std::vector<void *> &input_d
// inputAddrs
rt_ret = rtMalloc(&custom_info_.input_addrs, sizeof(opTensor_t) * input_size, RT_MEMORY_HBM);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMalloc fail, size:%zu, ret:0x%X, when KernelTaskInfo %s",
sizeof(opTensor_t) * input_size, rt_ret, __FUNCTION__);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
@@ -1079,6 +1199,8 @@ Status KernelTaskInfo::StoreInputOutputTensor(const std::vector<void *> &input_d
rt_ret = rtMemcpy(custom_info_.input_addrs, kAddrLen * input_size, &input_data_addrs[0], kAddrLen * input_size,
RT_MEMCPY_HOST_TO_DEVICE);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMemcpy fail, size:%zu, ret:0x%X, when KernelTaskInfo %s",
kAddrLen * input_size, rt_ret, __FUNCTION__);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
@@ -1087,6 +1209,8 @@ Status KernelTaskInfo::StoreInputOutputTensor(const std::vector<void *> &input_d
// outputDescs
rt_ret = rtMalloc(&custom_info_.output_descs, sizeof(opTensor_t) * output_size, RT_MEMORY_HBM);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMalloc fail, size:%zu, ret:0x%X, when KernelTaskInfo %s",
sizeof(opTensor_t) * output_size, rt_ret, __FUNCTION__);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
@@ -1094,6 +1218,8 @@ Status KernelTaskInfo::StoreInputOutputTensor(const std::vector<void *> &input_d
rt_ret = rtMemcpy(static_cast<opTensor_t *>(custom_info_.output_descs) + i, sizeof(opTensor_t),
const_cast<tagOpTensor *>(&input_descs[i]), sizeof(opTensor_t), RT_MEMCPY_HOST_TO_DEVICE);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMemcpy fail, size:%zu, ret:0x%X, when KernelTaskInfo %s",
sizeof(opTensor_t), rt_ret, __FUNCTION__);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
@@ -1102,6 +1228,8 @@ Status KernelTaskInfo::StoreInputOutputTensor(const std::vector<void *> &input_d
// outputAddrs
rt_ret = rtMalloc(&custom_info_.output_addrs, sizeof(opTensor_t) * output_size, RT_MEMORY_HBM);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMalloc fail, size:%zu, ret:0x%X, when KernelTaskInfo %s",
sizeof(opTensor_t) * output_size, rt_ret, __FUNCTION__);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
@@ -1110,6 +1238,8 @@ Status KernelTaskInfo::StoreInputOutputTensor(const std::vector<void *> &input_d
rt_ret = rtMemcpy(custom_info_.output_addrs, kAddrLen * output_size, &output_data_addrs[0], kAddrLen * output_size,
RT_MEMCPY_HOST_TO_DEVICE);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMemcpy fail, size:%zu, ret:0x%X, when KernelTaskInfo %s",
kAddrLen * output_size, rt_ret, __FUNCTION__);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
@@ -1126,11 +1256,16 @@ Status KernelTaskInfo::SetContext(const domi::KernelDef &kernel_def) {
ctx_.isFlowtable = context.is_flowtable();
ctx_.argsCount = context.args_count();
if (ctx_.argsCount == 0) {
REPORT_INNER_ERROR("E19999", "kernel_def.context.args_count is 0, check invalid when KernelTaskInfo %s",
__FUNCTION__);
GELOGE(INTERNAL_ERROR, "check argsCount fail:%u.", ctx_.argsCount);
return INTERNAL_ERROR;
}

if (context.args_offset().size() / sizeof(uint16_t) < ctx_.argsCount) {
REPORT_INNER_ERROR("E19999", "param [context.args_offset().size():%zu / sizeof(uint16_t)] "
"is less than [ctx_.argsCount:%u], check invalid when KernelTaskInfo %s",
context.args_offset().size(), ctx_.argsCount, __FUNCTION__);
GELOGE(PARAM_INVALID, "param [context.args_offset().size() / sizeof(uint16_t)] is less than [ctx_.argsCount]");
return PARAM_INVALID;
}
@@ -1138,6 +1273,8 @@ Status KernelTaskInfo::SetContext(const domi::KernelDef &kernel_def) {
// ctx_.argsOffset stores the offset of the internal information of agrs_, equal to the ctx_.argsCount
ctx_.argsOffset = new (std::nothrow) uint16_t[ctx_.argsCount]();
if (ctx_.argsOffset == nullptr) {
REPORT_INNER_ERROR("E19999", "New ctx_.argsOffset fail, size:%u, when KernelTaskInfo %s",
ctx_.argsCount, __FUNCTION__);
GELOGE(PARAM_INVALID, "(param [ctx_.argsOffset] must not be null.");
return PARAM_INVALID;
}
@@ -1155,6 +1292,7 @@ void KernelTaskInfo::FreeRtMem(void **ptr) {
}
rtError_t ret = rtFree(*ptr);
if (ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtFree fail, ret:0x%X, when KernelTaskInfo %s", ret, __FUNCTION__);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", ret);
}

@@ -1202,6 +1340,8 @@ Status KernelTaskInfo::CceUpdateKernelArgs(const domi::KernelContext &context, u
if (handle == nullptr) {
error = mmDlerror();
GE_IF_BOOL_EXEC(error == nullptr, error = "");
REPORT_INNER_ERROR("E19999", "Failed in dlopen:%s, dlerror:%s, when KernelTaskInfo %s",
canonicalPath.c_str(), error, __FUNCTION__);
GELOGE(GE_PLGMGR_SO_NOT_EXIST, "Failed in dlopen %s! ", error);
return FAILED;
}
@@ -1210,6 +1350,8 @@ Status KernelTaskInfo::CceUpdateKernelArgs(const domi::KernelContext &context, u
auto cceUpdateKernelArgs = (ccStatus_t(*)(ccOpContext &, uint64_t, uint64_t,
uint64_t, void *, uint64_t, void *))mmDlsym(handle, const_cast<char *>(update_kernel_args.c_str()));
if (cceUpdateKernelArgs == nullptr) {
REPORT_INNER_ERROR("E19999", "No symbol:%s in %s, check invalid when KernelTaskInfo %s",
update_kernel_args.c_str(), canonicalPath.c_str(), __FUNCTION__);
GELOGE(FAILED, "Failed to invoke function ccUpdateKernelArgs");
if (mmDlclose(handle) != 0) {
error = mmDlerror();
@@ -1234,6 +1376,8 @@ Status KernelTaskInfo::CceUpdateKernelArgs(const domi::KernelContext &context, u
return FAILED;
}
if (cc_ret != CC_STATUS_SUCCESS) {
REPORT_CALL_ERROR("E19999", "Call cceUpdateKernelArgs fail, ret:0x%X, when KernelTaskInfo %s",
cc_ret, __FUNCTION__);
GELOGE(CCE_FAILED, "Call cce api failed, ret: 0x%X", cc_ret);
return CCE_FAILED;
}
@@ -1247,6 +1391,8 @@ Status KernelTaskInfo::SetFlowtable(std::string &flowtable, const domi::KernelDe
if (context.is_flowtable()) {
rtError_t rt_ret = rtMalloc(&flowtable_, flowtable.size(), RT_MEMORY_HBM);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMalloc fail, size:%zu, ret:0x%X, when KernelTaskInfo %s",
flowtable.size(), rt_ret, __FUNCTION__);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
@@ -1254,6 +1400,8 @@ Status KernelTaskInfo::SetFlowtable(std::string &flowtable, const domi::KernelDe

rt_ret = rtMemcpy(flowtable_, flowtable.size(), flowtable.data(), flowtable.size(), RT_MEMCPY_HOST_TO_DEVICE);
if (rt_ret != RT_ERROR_NONE) {
REPORT_CALL_ERROR("E19999", "Call rtMemcpy fail, size:%zu, ret:0x%X, when KernelTaskInfo %s",
flowtable.size(), rt_ret, __FUNCTION__);
GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
@@ -1263,6 +1411,11 @@ Status KernelTaskInfo::SetFlowtable(std::string &flowtable, const domi::KernelDe

if (kernel_def.args().size() <
((reinterpret_cast<uint16_t *>(const_cast<char *>(context.args_offset().data())))[0] + sizeof(uint64_t))) {
REPORT_INNER_ERROR(
"E19999", "(context.args_offset().data()))[0]:%u + sizeof(uint64_t):%zu > "
"kernelDef.args().size():%zu, check invalid when %s",
(uint32_t)((reinterpret_cast<uint16_t *>(const_cast<char *>(context.args_offset().data())))[0]),
sizeof(uint64_t), kernel_def.args().size(), __FUNCTION__);
GELOGE(FAILED, "(context.args_offset().data()))[0]:%u + sizeof(uint64_t):%zu > kernelDef.args().size():%zu",
(uint32_t)((reinterpret_cast<uint16_t *>(const_cast<char *>(context.args_offset().data())))[0]),
sizeof(uint64_t), kernel_def.args().size());


+ 12
- 3
ge/graph/load/model_manager/task_info/super_kernel/super_kernel.cc View File

@@ -26,15 +26,24 @@ Status SuperKernel::Launch(rtStream_t stream, uint32_t dump_flag) {
reinterpret_cast<const void *>(static_cast<uintptr_t>(this->GetNavTableSize()))};

rtError_t rt_ret = rtMalloc(reinterpret_cast<void **>(&device_args_addr_), sizeof(args), RT_MEMORY_HBM);
GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE, GELOGE(RT_FAILED, "rtMalloc failied. error: 0x%X", rt_ret);
GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE,
REPORT_CALL_ERROR("E19999", "Call rtMalloc fail, size:%lu, ret:0x%X when %s",
sizeof(args), rt_ret, __FUNCTION__);
GELOGE(RT_FAILED, "rtMalloc failied. error: 0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);)
rt_ret = rtMemcpy(reinterpret_cast<void *>(device_args_addr_), sizeof(args), reinterpret_cast<void *>(args),
sizeof(args), RT_MEMCPY_HOST_TO_DEVICE);
GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE, GELOGE(RT_FAILED, "rtMemcpy failied. error: 0x%X", rt_ret);
GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE,
REPORT_CALL_ERROR("E19999", "Call rtMemcpy fail, size:%lu, ret:0x%X when %s",
sizeof(args), rt_ret, __FUNCTION__);
GELOGE(RT_FAILED, "rtMemcpy failied. error: 0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);)
rt_ret = rtKernelLaunchWithFlag((void *const)func_stub_, block_dim_, device_args_addr_, sizeof(args), NULL, stream,
dump_flag);
GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE, GELOGE(RT_FAILED, "rtKernelLaunchWithFlag failied. error: 0x%X", rt_ret);
GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE,
REPORT_CALL_ERROR("E19999", "Call rtKernelLaunchWithFlag fail, dump_flag:%u, ret:0x%X when %s",
dump_flag, rt_ret, __FUNCTION__);
GELOGE(RT_FAILED, "rtKernelLaunchWithFlag failied. error: 0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);)
return SUCCESS;
}


+ 19
- 9
ge/graph/load/model_manager/task_info/super_kernel/super_kernel_factory.cc View File

@@ -35,14 +35,16 @@ Status SuperKernelFactory::Init() {
}
rtError_t rt_ret;
rt_ret = rtGetFunctionByName(this->sk_stub_name_.c_str(), &this->func_stub_);
GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE, GELOGE(RT_FAILED,
"rtGetFunctionByName "
"failed. stub_func: %s, please export LD_LIBRARY_PATH for "
"libcce_aicore.so",
this->sk_stub_name_.c_str());
GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE,
REPORT_CALL_ERROR("E19999", "Call rtGetFunctionByName fail, stub_func:%s, ret:0x%X, when %s",
this->sk_stub_name_.c_str(), rt_ret, __FUNCTION__);
GELOGE(RT_FAILED, "rtGetFunctionByName failed. stub_func: %s, please export LD_LIBRARY_PATH for "
"libcce_aicore.so", this->sk_stub_name_.c_str());
return RT_ERROR_TO_GE_STATUS(rt_ret);)
rt_ret = rtGetAddrByFun(this->func_stub_, &this->func_ptr_);
GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE, GELOGE(RT_FAILED, "rtGetAddrByFun failed. error: 0x%X", rt_ret);
GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE,
REPORT_CALL_ERROR("E19999", "Call rtGetAddrByFun fail, ret:0x%X, when %s", rt_ret, __FUNCTION__);
GELOGE(RT_FAILED, "rtGetAddrByFun failed. error: 0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);)
GELOGD(
"SKT: fuseKernels super_kernel_template subFunc %p, device func "
@@ -98,7 +100,9 @@ Status SuperKernelFactory::FuseKernels(const std::vector<void *> &stub_func_list
for (unsigned i = 0; i < stub_func_list.size(); i++) {
void *sub_device_func = nullptr;
rt_ret = rtGetAddrByFun(stub_func_list[i], &sub_device_func);
GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE, GELOGE(RT_FAILED, "rtGetAddrByFun failed. error: 0x%X", rt_ret);
GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE,
REPORT_CALL_ERROR("E19999", "Call rtGetAddrByFun fail, ret:0x%X, when %s", rt_ret, __FUNCTION__);
GELOGE(RT_FAILED, "rtGetAddrByFun failed. error: 0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);)
GELOGD("SKT: fuseKernels subFunc %p, device func address %p", stub_func_list[i], sub_device_func);
// store two uint64_t address
@@ -109,11 +113,17 @@ Status SuperKernelFactory::FuseKernels(const std::vector<void *> &stub_func_list
GELOGD("SKT: fuseKernels args base address %lu", nav_table[i * kFusedKernelSizeUnit + 1]);
}
rt_ret = rtMalloc(reinterpret_cast<void **>(&hbm_nav_table_addr), nav_table_size, RT_MEMORY_HBM);
GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE, GELOGE(RT_FAILED, "rtMalloc failed. error: 0x%X", rt_ret);
GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE,
REPORT_CALL_ERROR("E19999", "Call rtMalloc fail, size:%lu, ret:0x%X, when %s",
nav_table_size, rt_ret, __FUNCTION__);
GELOGE(RT_FAILED, "rtMalloc failed. error: 0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);)
rt_ret = rtMemcpy(reinterpret_cast<void *>(hbm_nav_table_addr), nav_table_size,
reinterpret_cast<void *>(nav_table.get()), nav_table_size, RT_MEMCPY_HOST_TO_DEVICE);
GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE, GELOGE(RT_FAILED, "rtMemcpy failed. error: 0x%X", rt_ret);
GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE,
REPORT_CALL_ERROR("E19999", "Call rtMemcpy fail, size:%lu, ret:0x%X when %s",
nav_table_size, rt_ret, __FUNCTION__);
GELOGE(RT_FAILED, "rtMemcpy failed. error: 0x%X", rt_ret);
GE_CHK_RT(rtFree(hbm_nav_table_addr)); return RT_ERROR_TO_GE_STATUS(rt_ret);)
// Create the necessary metadata for the super kernel
h =


+ 1
- 1
ge/graph/load/model_manager/task_info/super_kernel/super_kernel_factory.h View File

@@ -28,7 +28,7 @@ class SuperKernelFactory {
void *func_stub_ = nullptr;
void *func_ptr_ = nullptr;
void *handle_ = nullptr;
std::string sk_stub_name_ = "_Z21super_kernel_templatePmm";
std::string sk_stub_name_ = "super_kernel_template";
bool is_init_ = false;
SuperKernelFactory() {};
~SuperKernelFactory() {


+ 3
- 3
ge/graph/load/model_manager/zero_copy_offset.h View File

@@ -58,15 +58,15 @@ class ZeroCopyOffset {
uint32_t GetDataCount() const { return data_count_; }
uint32_t GetAddrCount() const { return addr_count_; }
// value of *data_info_ from davinci_model
std::vector<std::pair<int64_t, void *>> GetDataInfo() const { return data_info_; }
const std::vector<std::pair<int64_t, void *>> &GetDataInfo() const { return data_info_; }
// relative_offset from zero_copy_relative_offset_
std::vector<int64_t> GetRelativeOffset() const { return relative_offset_; }
const std::vector<int64_t> &GetRelativeOffset() const { return relative_offset_; }
// data_size of Data/Netoutput
int64_t GetDataSize() const { return data_size_; }
// value of *outside_addrs_ from davinci_model
const std::vector<std::map<const void *, std::vector<void *>>> &GetOutsideAddrs() const { return outside_addrs_; }
// name of op
std::string GetOpName() const { return op_name_; }
const std::string &GetOpName() const { return op_name_; }
const bool IsRelativeOffsetValid() const { return valid_relative_offset_; }

private:


+ 6
- 6
ge/graph/passes/atomic_addr_clean_pass.cc View File

@@ -126,11 +126,11 @@ bool AtomicAddrCleanPass::IsOutputIndexPeerInputAtomic(const NodePtr &node, int6

bool AtomicAddrCleanPass::CheckSkipInsertInLoopGraph(const NodePtr &node) {
OpDescPtr op_desc = node->GetOpDesc();
std::map<string, std::map<int, int>> node_workspace_offset;
std::map<string, std::map<int64_t, int64_t>> atomic_workspace_index_size;
bool has_atomic_input = op_desc->HasAttr(ATOMIC_ATTR_INPUT_INDEX);
bool has_atomic_output = op_desc->HasAttr(ATOMIC_ATTR_OUTPUT_INDEX);
node_workspace_offset = op_desc->TryGetExtAttr(EXT_ATTR_ATOMIC_WORKSPACE_OFFSET, node_workspace_offset);
if (!has_atomic_input && has_atomic_output && node_workspace_offset.empty()) {
atomic_workspace_index_size = op_desc->TryGetExtAttr(EXT_ATTR_ATOMIC_WORKSPACE_INFO, atomic_workspace_index_size);
if (!has_atomic_input && has_atomic_output && atomic_workspace_index_size.empty()) {
std::vector<int64_t> atomic_output_index;
(void) ge::AttrUtils::GetListInt(op_desc, ATOMIC_ATTR_OUTPUT_INDEX, atomic_output_index);
bool is_all_output_peer_also_atomic = true;
@@ -332,11 +332,11 @@ bool AtomicAddrCleanPass::IsAtomicOp(const NodePtr &node) {
}

// 2.Check atomic attr in node
std::map<string, std::map<int, int>> node_workspace_offset;
std::map<string, std::map<int64_t, int64_t>> atomic_workspace_index_size;
bool has_atomic_input = op_desc->HasAttr(ATOMIC_ATTR_INPUT_INDEX);
bool has_atomic_output = op_desc->HasAttr(ATOMIC_ATTR_OUTPUT_INDEX);
node_workspace_offset = op_desc->TryGetExtAttr(EXT_ATTR_ATOMIC_WORKSPACE_OFFSET, node_workspace_offset);
if (!has_atomic_input && !has_atomic_output && node_workspace_offset.empty()) {
atomic_workspace_index_size = op_desc->TryGetExtAttr(EXT_ATTR_ATOMIC_WORKSPACE_INFO, atomic_workspace_index_size);
if (!has_atomic_input && !has_atomic_output && atomic_workspace_index_size.empty()) {
return false;
}



+ 0
- 1
ge/graph/passes/attach_stream_label_pass.cc View File

@@ -137,7 +137,6 @@ Status AttachStreamLabelPass::AttachFlag(const NodePtr &node, std::string &strea
return INTERNAL_ERROR;
}
stream_label = node->GetInDataNodes().at(0)->GetName();
GE_CHK_STATUS_RET(SetStreamLabel(node, stream_label), "Set stream label failed.");
bool value = false;
OpDescPtr op_desc = node->GetOpDesc();
GE_CHECK_NOTNULL(op_desc);


+ 7
- 1
ge/graph/passes/pass_utils.cc View File

@@ -35,9 +35,9 @@
#include "graph/utils/op_desc_utils.h"
#include "graph/utils/tensor_utils.h"
#include "graph/utils/type_utils.h"
#include "utils/node_utils.h"

namespace ge {

Status PassUtils::ConstructTensorDescWithData(const GeTensorDesc &out_desc, std::vector<int64_t> &data,
std::vector<GeTensorPtr> &v_output, const bool scalar_output) {
Status ret = SUCCESS;
@@ -246,6 +246,12 @@ NodePtr PassUtils::GetInDataNode(const ConstNodePtr &node, int index) {
return src_node;
}

NodePtr PassUtils::GetInNodeCrossSubgraphByIndex(const ConstNodePtr &node, int index) {
auto src_node = GetInDataNode(node, index);

return NodeUtils::GetInNodeCrossSubgraph(src_node);
}

bool PassUtils::IsNeedTrainIteFlowCtrl(const ComputeGraphPtr &compute_graph) {
if (compute_graph == nullptr) {
return false;


+ 2
- 0
ge/graph/passes/pass_utils.h View File

@@ -30,6 +30,8 @@ class PassUtils {

static NodePtr GetInDataNode(const ConstNodePtr &node, int index);

static NodePtr GetInNodeCrossSubgraphByIndex(const ConstNodePtr &node, int index);

static bool IsConstant(const ConstNodePtr &node);

static Status SetOutNodeWeight(const OutDataAnchorPtr &out_data_anchor, const NodePtr &src_node);


+ 1
- 1
ge/graph/passes/subexpression_migration_pass.cc View File

@@ -279,7 +279,7 @@ Status SubexpressionMigrationPass::GraphNodeMigration(const ComputeGraphPtr &gra
const auto &in_anchor = in_anchors.at(i);
const auto &base_node = in_anchor->GetOwnerNode();
GELOGD("Get Data direct node: %s", base_node->GetName().c_str());
if (!base_node->GetHostNode()) {
if (!base_node->GetHostNode() || base_node->GetType() == SWITCH) {
continue;
}



+ 8
- 2
ge/graph/passes/switch_dead_branch_elimination.cc View File

@@ -94,6 +94,12 @@ Status SwitchDeadBranchElimination::DeleteSwitchNode(NodePtr &node, NodePtr &pre
GELOGE(FAILED, "parameter is null.");
return FAILED;
}

// If two nodes aren't in same graph, get node's direct in_node instead of pred_node.
if (node->GetOwnerComputeGraph() != pred_node->GetOwnerComputeGraph()) {
pred_node = PassUtils::GetInDataNode(node, kPredInputIndex);
}

// link pred's in control nodes to switch
if (GraphUtils::CopyInCtrlEdges(pred_node, node) != GRAPH_SUCCESS) {
return FAILED;
@@ -131,7 +137,7 @@ Status SwitchDeadBranchElimination::Run(NodePtr &node) {
return SUCCESS;
}

auto pred_node = PassUtils::GetInDataNode(node, kPredInputIndex);
auto pred_node = PassUtils::GetInNodeCrossSubgraphByIndex(node, kPredInputIndex);
if (pred_node == nullptr) {
GELOGD("[%s] Pred input is null.", node->GetName().c_str());
return SUCCESS;
@@ -143,7 +149,7 @@ Status SwitchDeadBranchElimination::Run(NodePtr &node) {
return SUCCESS;
}

auto input_node = PassUtils::GetInDataNode(node, kDataInputIndex);
auto input_node = PassUtils::GetInNodeCrossSubgraphByIndex(node, kDataInputIndex);
if (input_node == nullptr) {
GELOGD("[%s] Data input is null.", node->GetName().c_str());
return SUCCESS;


+ 2
- 0
ge/graph/passes/switch_to_stream_switch_pass.cc View File

@@ -455,6 +455,8 @@ Status SwitchToStreamSwitchPass::CombineSwitchNode(const ComputeGraphPtr &graph)

// select first stream_switch
NodePtr stream_switch = switch_list.front();
// set stream_label
GE_CHK_STATUS_RET(SetStreamLabel(stream_switch, cast_node->GetName()), "Set stream label failed.");
OpDescPtr switch_desc = stream_switch->GetOpDesc();
GE_CHECK_NOTNULL(switch_desc);
switch_desc->SetName(CheckDuplicateName(cond_group + "/" + STREAMSWITCH + (true_branch_flag ? "_t" : "_f")));


+ 22
- 10
ge/graph/preprocess/graph_preprocess.cc View File

@@ -619,19 +619,25 @@ Status ProcessInputDtDynShape(NodePtr &node_ptr, bool &is_dynamic_batch, NodePtr
return SUCCESS;
}
input->SetDataType(dt_set);
int64_t input_shape_size = 0;
int64_t output_shape_size = 0;
ge::graphStatus input_graph_status = ge::TensorUtils::GetTensorSizeInBytes(*input, input_shape_size);
ge::graphStatus output_graph_status = ge::TensorUtils::GetTensorMemorySizeInBytes(*input, output_shape_size);
if (input_graph_status != ge::GRAPH_SUCCESS && output_graph_status != ge::GRAPH_SUCCESS) {
GELOGE(GRAPH_FAILED, "GetTensorSize failed!");
return FAILED;
}
ge::TensorUtils::SetSize(*input, input_shape_size);
const GeTensorDescPtr &output = op_desc->MutableOutputDesc(0);
GE_CHECK_NOTNULL(output);
output->SetDataType(dt_set);
ge::TensorUtils::SetSize(*output, output_shape_size);

GeShape shape = input->GetShape();
if (!shape.IsUnknownShape()) {
int64_t input_shape_size = 0;
int64_t output_shape_size = 0;
ge::graphStatus input_graph_status = ge::TensorUtils::GetTensorSizeInBytes(*input, input_shape_size);
ge::graphStatus output_graph_status = ge::TensorUtils::GetTensorMemorySizeInBytes(*input, output_shape_size);
if (input_graph_status != ge::GRAPH_SUCCESS && output_graph_status != ge::GRAPH_SUCCESS) {
GELOGE(GRAPH_FAILED, "[Process][InputOp] Get tensor size of op [%s] failed!", node_ptr->GetName().c_str());
return FAILED;
}
ge::TensorUtils::SetSize(*input, input_shape_size);
ge::TensorUtils::SetSize(*output, output_shape_size);
GELOGI("[Process][InputDynShape] Set input and output size of node [%s] success.", node_ptr->GetName().c_str());
}

if (is_dynamic_batch) {
GELOGI("The node [%s] dtype set fp16", switchn_node->GetName().c_str());
auto switchn_op_desc = switchn_node->GetOpDesc();
@@ -1255,6 +1261,12 @@ Status GraphPrepare::AdjustDataOpOutput(const NodePtr &node) {
return GE_GRAPH_GRAPH_NODE_NULL;
}
GeTensorDesc output = op_desc_ptr->GetOutputDesc(0);
GeShape output_shape = output.GetShape();
if (output_shape.IsUnknownShape()) {
GELOGD("[Adjust][DataOpOutput] Shape of op [%s] output is unknown.", node->GetName().c_str());
return SUCCESS;
}

int64_t tensor_size = 0;
graphStatus graph_status = TensorUtils::GetTensorMemorySizeInBytes(output, tensor_size);
if (graph_status != GRAPH_SUCCESS) {


+ 13
- 12
ge/hybrid/executor/hybrid_model_async_executor.cc View File

@@ -297,19 +297,20 @@ Status HybridModelAsyncExecutor::PrepareInputs(const InputData &current_data, Hy
data_buf.length,
mem_size);

GELOGI("[IMAS]CopyPlainData memcpy graph_%u type[F] output[%zu] memaddr[%p] mem_size[%zu] datasize[%lu]",
model_->root_runtime_param_.graph_id,
input_index,
args.inputs[input_index].GetData(),
mem_size,
data_buf.length);
GE_CHK_RT_RET(rtMemcpy(args.inputs[input_index].MutableData(),
mem_size,
data_buf.data,
data_buf.length,
RT_MEMCPY_HOST_TO_DEVICE));
if (data_buf.length > 0) {
GELOGI("[IMAS]CopyPlainData memcpy graph_%u type[F] output[%zu] memaddr[%p] mem_size[%zu] datasize[%lu]",
model_->root_runtime_param_.graph_id,
input_index,
args.inputs[input_index].GetData(),
mem_size,
data_buf.length);
GE_CHK_RT_RET(rtMemcpy(args.inputs[input_index].MutableData(),
mem_size,
data_buf.data,
data_buf.length,
RT_MEMCPY_HOST_TO_DEVICE));
}
}

return SUCCESS;
}



+ 159
- 63
ge/hybrid/model/hybrid_model_builder.cc View File

@@ -255,9 +255,7 @@ Status HybridModelBuilder::GetOrCreateNodeItem(const NodePtr &node, NodeItem **n
(void) AttrUtils::SetBool(new_node->op_desc, kIsFirstNode, false);
(void) AttrUtils::SetBool(new_node->op_desc, kIsLastNode, false);

new_node->node_id = node_index;
new_node->op_desc->SetId(node_index);
node_index += 1;
new_node->node_id = static_cast<int>(new_node->op_desc->GetId());
NodeExecutorManager::ExecutorType executor_type = NodeExecutorManager::GetInstance().ResolveExecutorType(*node);
new_node->is_profiling_report = (executor_type == NodeExecutorManager::ExecutorType::AICORE) ||
(executor_type == NodeExecutorManager::ExecutorType::AICPU_TF) ||
@@ -279,10 +277,10 @@ Status HybridModelBuilder::ParseForceInfershapeNodes(const NodePtr &node, NodeIt
}

Status HybridModelBuilder::ParseDependentInputNodes(NodeItem &node_item, const std::vector<string> &dependencies) {
std::set<NodePtr> dependent_input_nodes;
std::set<NodePtr> dependent_for_shape_inference;
std::set<NodePtr> dependent_for_execution;
auto &ge_node = node_item.node;
bool is_hccl_op =
NodeExecutorManager::GetInstance().ResolveExecutorType(*ge_node) == NodeExecutorManager::ExecutorType::HCCL;
bool is_hccl_op = node_item.IsHcclOp();

// The input tensors become valid after computation is done for parent nodes of type DEPEND_COMPUTE.
// Wait for these parent nodes before execution.
@@ -297,29 +295,15 @@ Status HybridModelBuilder::ParseDependentInputNodes(NodeItem &node_item, const s
auto src_node_item = MutableNodeItem(src_node);
GE_CHECK_NOTNULL(src_node_item);

if (is_hccl_op) {
GELOGD("[%s] Add input data dependent node [%s] due to engine type is HCCL",
node_item.NodeName().c_str(),
src_node_item->NodeName().c_str());
src_node_item->has_observer = true;
node_item.dependents_for_execution.emplace_back(src_node);
node_item.has_observer = true;
for (auto &dst_node : ge_node->GetOutNodes()) {
if (dst_node == nullptr) {
continue;
}

NodeItem *dst_node_item = nullptr;
GE_CHK_STATUS_RET_NOLOG(GetOrCreateNodeItem(dst_node, &dst_node_item));
dst_node_item->dependents_for_execution.emplace_back(ge_node);
}
} else if (src_node_item->shape_inference_type == DEPEND_COMPUTE) {
GELOGD("[%s] Add input data dependent node [%s] due to inference type = DEPEND_COMPUTE",
node_item.NodeName().c_str(),
src_node_item->NodeName().c_str());

if (src_node_item->shape_inference_type == DEPEND_COMPUTE || is_hccl_op || src_node_item->IsHcclOp()) {
GELOGD("[%s](%s) Add input data dependent node [%s](%s), shape inference type = %d",
ge_node->GetName().c_str(),
ge_node->GetType().c_str(),
src_node->GetName().c_str(),
src_node->GetType().c_str(),
static_cast<int>(src_node_item->shape_inference_type));
src_node_item->has_observer = true;
node_item.dependents_for_execution.emplace_back(src_node);
dependent_for_execution.emplace(src_node);
}

if (src_node_item->shape_inference_type == DEPEND_SHAPE_RANGE) {
@@ -327,22 +311,17 @@ Status HybridModelBuilder::ParseDependentInputNodes(NodeItem &node_item, const s
node_item.NodeName().c_str(),
src_node_item->NodeName().c_str());
src_node_item->has_observer = true;
dependent_input_nodes.emplace(src_node);
dependent_for_shape_inference.emplace(src_node);
}
}

// cond or branch need to be prepared before the execution of IF or CASE
if (node_item.node_type == IF || node_item.node_type == STATELESSIF || node_item.node_type == CASE) {
const auto &in_anchor = ge_node->GetInDataAnchor(0);
GE_CHECK_NOTNULL(in_anchor);
const auto &peer_anchor = in_anchor->GetPeerOutAnchor();
GE_CHECK_NOTNULL(peer_anchor);
auto src_node = peer_anchor->GetOwnerNode();
auto src_node = NodeUtils::GetInDataNodeByIndex(*ge_node, 0); // cond input
GE_CHECK_NOTNULL(src_node);
auto src_node_item = MutableNodeItem(src_node);
GE_CHECK_NOTNULL(src_node_item);
src_node_item->has_observer = true;
node_item.dependents_for_execution.emplace_back(src_node);
dependent_for_execution.emplace(src_node);
GELOGD("[%s] Dependent added from %s for control op's cond/branch",
node_item.NodeName().c_str(),
src_node_item->NodeName().c_str());
@@ -366,24 +345,32 @@ Status HybridModelBuilder::ParseDependentInputNodes(NodeItem &node_item, const s
GE_CHECK_NOTNULL(src_node);
auto src_node_item = MutableNodeItem(src_node);
src_node_item->to_const_output_id_list.emplace(peer_out_anchor->GetIdx());
src_node_item->has_observer = true;

dependent_input_nodes.emplace(src_node);
dependent_for_shape_inference.emplace(src_node);
GELOGD("[%s] Dependent added from output of [%s:%d]",
node_item.NodeName().c_str(),
src_node_item->NodeName().c_str(),
peer_out_anchor->GetIdx());
}

for (const auto &dep_node : dependent_input_nodes) {
GE_CHK_STATUS_RET(ParseDependentForFusedSubgraph(node_item, dependent_for_shape_inference));
for (const auto &dep_node : dependent_for_shape_inference) {
auto src_node_item = MutableNodeItem(dep_node);
GE_CHECK_NOTNULL(src_node_item);
src_node_item->has_observer = true;
node_item.dependents_for_shape_inference.emplace_back(dep_node);
}

GE_CHK_STATUS_RET(ParseDependentForFusedSubgraph(node_item));
for (const auto &dep_node : dependent_for_execution) {
auto src_node_item = MutableNodeItem(dep_node);
GE_CHECK_NOTNULL(src_node_item);
src_node_item->has_observer = true;
node_item.dependents_for_execution.emplace_back(dep_node);
}

return SUCCESS;
}

Status HybridModelBuilder::ParseDependentForFusedSubgraph(NodeItem &node_item) {
Status HybridModelBuilder::ParseDependentForFusedSubgraph(NodeItem &node_item, std::set<ge::NodePtr> &dependencies) {
if (node_item.fused_subgraph == nullptr) {
return SUCCESS;
}
@@ -413,17 +400,12 @@ Status HybridModelBuilder::ParseDependentForFusedSubgraph(NodeItem &node_item) {
node_item.NodeName().c_str(),
op_desc->GetName().c_str(),
src_node_item->NodeName().c_str());
src_node_item->has_observer = true;
src_node_item->to_const_output_id_list.emplace(peer_out_anchor->GetIdx());

auto &depends = node_item.dependents_for_shape_inference;
if (std::find(depends.begin(), depends.end(), src_node) == depends.end()) {
depends.emplace_back(src_node);
GELOGD("[%s] Dependent added from output of [%s:%d]",
node_item.NodeName().c_str(),
src_node_item->NodeName().c_str(),
peer_out_anchor->GetIdx());
}
dependencies.emplace(src_node);
GELOGD("[%s] Dependent added from output of [%s:%d]",
node_item.NodeName().c_str(),
src_node_item->NodeName().c_str(),
peer_out_anchor->GetIdx());
}

return SUCCESS;
@@ -770,9 +752,23 @@ Status HybridModelBuilder::LoadGraph() {
GELOGI("After merging subgraphs DirectNodesSize = %zu, GetAllNodesSize = %zu",
root_graph->GetDirectNodesSize(),
root_graph->GetAllNodesSize());
GE_DUMP(root_graph, "hybrid_merged_graph");
}

root_graph_ = root_graph;
// Reset node id by topological order across all subgraphs
int64_t index = 0;
for (const auto &node : root_graph->GetAllNodes()) {
GE_CHECK_NOTNULL(node);
auto parent_graph = node->GetOwnerComputeGraph();
// No need to update nodes in known subgraph
if (parent_graph != nullptr && !parent_graph->GetGraphUnknownFlag()) {
continue;
}
auto op_desc = node->GetOpDesc();
GE_CHECK_NOTNULL(op_desc);
op_desc->SetId(index++);
}
GE_DUMP(root_graph, "hybrid_merged_graph");
GE_CHK_STATUS_RET(LoadDynamicSubgraph(*root_graph, true), "Failed to load root graph.");
GELOGD("Done loading root graph successfully.");
GE_CHK_STATUS_RET(hybrid_model_.root_graph_item_->GroupNodes(), "Failed to group nodes for root graph");
@@ -810,6 +806,7 @@ Status HybridModelBuilder::LoadGraph() {
}
}

GE_CHK_STATUS_RET(ParseDependentByParallelGroup(), "Failed to establish dependencies for hccl ops");
GELOGI("Done loading all subgraphs successfully.");
return SUCCESS;
}
@@ -1075,25 +1072,38 @@ Status HybridModelBuilder::InitWeights() {
return SUCCESS;
}

Status HybridModelBuilder::LoadTask(NodeItem &node_item) {
auto &node_ptr = node_item.node;
GELOGD("[%s] Start to build kernel task", node_ptr->GetName().c_str());
auto load_ret = node_item.node_executor->LoadTask(hybrid_model_,
node_ptr,
node_item.kernel_task);
if (load_ret != UNSUPPORTED && load_ret != SUCCESS) {
GELOGE(load_ret, "[%s] Failed to load task", node_ptr->GetName().c_str());
return load_ret;
}

GELOGD("[%s] Done loading task successfully.", node_ptr->GetName().c_str());
return SUCCESS;
}

Status HybridModelBuilder::LoadTasks() {
GE_CHK_STATUS_RET(CheckAicpuOpList(), "Check Aicpu op failed.");
std::map<int64_t, NodeItem *> ordered_partitioned_calls;
for (auto &it : hybrid_model_.node_items_) {
auto &node_item = it.second;
auto &node_ptr = node_item->node;
if (node_item->node_type == NETOUTPUT) {
continue;
}

GELOGD("[%s] Start to build kernel task", node_ptr->GetName().c_str());
auto load_ret = node_item->node_executor->LoadTask(hybrid_model_,
node_ptr,
node_item->kernel_task);
if (load_ret != UNSUPPORTED && load_ret != SUCCESS) {
GELOGE(load_ret, "[%s] Failed to load task", node_ptr->GetName().c_str());
return load_ret;
if (node_item->node_type == PARTITIONEDCALL) {
ordered_partitioned_calls.emplace(node_item->node_id, node_item.get());
}
GE_CHK_STATUS_RET_NOLOG(LoadTask(*node_item));
}

GELOGD("[%s] Done loading task successfully.", node_ptr->GetName().c_str());
// HCCL operators need to be loaded in the same order across different processes
for (auto &it : ordered_partitioned_calls) {
GE_CHK_STATUS_RET_NOLOG(LoadTask(*it.second));
}

return SUCCESS;
@@ -1905,6 +1915,7 @@ Status HybridModelBuilder::LoadDynamicSubgraph(ComputeGraph &graph, bool is_root
NodeItem *node_item = nullptr;
GE_CHK_STATUS_RET_NOLOG(GetOrCreateNodeItem(node, &node_item));
GE_CHK_STATUS_RET_NOLOG(BuildNodeItem(node, *node_item));
GE_CHK_STATUS_RET_NOLOG(CollectParallelGroups(node_item));
GE_CHK_STATUS_RET_NOLOG(UpdateAnchorStatus(node)); // needed by FE generate task

node_item->input_start = input_start;
@@ -2011,5 +2022,90 @@ Status HybridModelBuilder::CheckAicpuOpList() {
"Launch check aicpu op type failed.");
return SUCCESS;
}

Status HybridModelBuilder::CollectParallelGroups(NodeItem *node_item) {
const auto &node = node_item->node;
auto executor_type = NodeExecutorManager::GetInstance().ResolveExecutorType(*node);
if (executor_type == NodeExecutorManager::ExecutorType::HCCL) {
std::string parallel_group;
if (AttrUtils::GetStr(node->GetOpDesc(), ATTR_NAME_PARALLEL_GROUP, parallel_group)) {
GELOGD("[%s] Got parallel group = %s", node_item->NodeName().c_str(), parallel_group.c_str());
parallel_group_to_nodes_[parallel_group].emplace(node_item);
std::set<std::string> group{parallel_group};
node_to_parallel_groups_[node_item].emplace(parallel_group);
}
} else if (executor_type == NodeExecutorManager::ExecutorType::COMPILED_SUBGRAPH) {
std::set<std::string> parallel_groups;
GELOGD("[%s] Parse parallel group for known-shaped subgraph", node_item->NodeName().c_str());
for (const auto &subgraph_name : node->GetOpDesc()->GetSubgraphInstanceNames()) {
GELOGD("[%s] Start to get parallel group from subgraph: %s",
node_item->NodeName().c_str(),
subgraph_name.c_str());
auto subgraph = root_graph_->GetSubgraph(subgraph_name);
GE_CHECK_NOTNULL(subgraph);
for (const auto &sub_node : subgraph->GetAllNodes()) {
std::string parallel_group;
if (AttrUtils::GetStr(sub_node->GetOpDesc(), ATTR_NAME_PARALLEL_GROUP, parallel_group)) {
GELOGD("[%s::%s] Got parallel group = %s",
subgraph_name.c_str(),
sub_node->GetName().c_str(),
parallel_group.c_str());
parallel_groups.emplace(parallel_group);
}
}
}

if (!parallel_groups.empty()) {
for (const auto &parallel_group : parallel_groups) {
parallel_group_to_nodes_[parallel_group].emplace(node_item);
GELOGD("[%s] has parallel group: %s", node_item->NodeName().c_str(), parallel_group.c_str());
}
node_to_parallel_groups_.emplace(node_item, std::move(parallel_groups));
}
}

return SUCCESS;
}

Status HybridModelBuilder::ParseDependentByParallelGroup() {
for (const auto &it : node_to_parallel_groups_) {
auto node_item = it.first;
auto dst_engine_type = NodeExecutorManager::GetInstance().ResolveExecutorType(*node_item->node);
for (const auto &parallel_group : it.second) {
auto &dependent_nodes = parallel_group_to_nodes_[parallel_group];
NodeItem *nearest_dep_node = nullptr;
int max_id = -1;
for (auto &dep_node : dependent_nodes) {
if (node_item == dep_node) {
continue;
}
auto src_engine_type = NodeExecutorManager::GetInstance().ResolveExecutorType(*dep_node->node);
if (src_engine_type == dst_engine_type) {
continue;
}

if (dep_node->node_id < node_item->node_id && dep_node->node_id > max_id) {
nearest_dep_node = dep_node;
max_id = dep_node->node_id;
}
}

if (nearest_dep_node != nullptr) {
GELOGD("Add dependency for nodes of same parallel group[%s], src = [%s], dst = [%s]",
parallel_group.c_str(),
nearest_dep_node->NodeName().c_str(),
node_item->NodeName().c_str());
auto &deps = node_item->dependents_for_execution;
if (std::find(deps.begin(), deps.end(), nearest_dep_node->node) != deps.end()) {
GELOGD("Already has dependency, skip it");
continue;
}
nearest_dep_node->has_observer = true;
deps.emplace_back(nearest_dep_node->node);
}
}
}
return SUCCESS;
}
} // namespace hybrid
} // namespace ge

+ 7
- 2
ge/hybrid/model/hybrid_model_builder.h View File

@@ -57,14 +57,17 @@ class HybridModelBuilder {
Status ValidateParams();
Status LoadGraph();
Status LoadGeModel(ComputeGraph &graph, const GeModelPtr &ge_model);
Status LoadTask(NodeItem &node_item);
Status LoadTasks();
Status IdentifyVariableOutputs(NodeItem &node_item);
Status IdentifySameInputs(NodeItem &node_item);
Status BuildNodeItem(const NodePtr &node, NodeItem &node_item);
Status GetOrCreateNodeItem(const NodePtr &node, NodeItem **node_item);
Status ParseForceInfershapeNodes(const NodePtr &node, NodeItem &node_item);
Status CollectParallelGroups(NodeItem *node_item);
Status ParseDependentInputNodes(NodeItem &node_item, const std::vector<string> &dependencies);
Status ParseDependentForFusedSubgraph(NodeItem &node_item);
Status ParseDependentForFusedSubgraph(NodeItem &node_item, std::set<ge::NodePtr> &dependencies);
Status ParseDependentByParallelGroup();
Status IndexTaskDefs();
Status IndexTaskDefs(const ComputeGraphPtr &sub_graph, const GeModelPtr &ge_model);
Status IndexSpecialNodes();
@@ -97,12 +100,14 @@ class HybridModelBuilder {
NodeItem *MutableNodeItem(const NodePtr &node);

GeRootModelPtr ge_root_model_;
ComputeGraphPtr root_graph_;
std::map<std::string, GeModelPtr> subgraph_models_;
std::map<std::string, NodePtr> constant_op_nodes_;
std::map<std::string, std::set<NodeItem *>> parallel_group_to_nodes_;
std::map<NodeItem *, std::set<std::string>> node_to_parallel_groups_;

HybridModel &hybrid_model_;
std::map<NodePtr, std::vector<std::pair<int, NodePtr>>> node_ref_inputs_;
int node_index = 0;

RuntimeParam &runtime_param_;
VarManager *var_manager_ = nullptr;


+ 4
- 0
ge/hybrid/model/node_item.cc View File

@@ -251,6 +251,10 @@ bool NodeItem::IsControlOp() const {
return ge::hybrid::IsControlOp(op_desc->GetType());
}

bool NodeItem::IsHcclOp() const {
return NodeExecutorManager::GetInstance().ResolveExecutorType(*node) == NodeExecutorManager::ExecutorType::HCCL;
}

std::string NodeItem::DebugString() const {
std::stringstream ss;
ss << "Node: ";


+ 2
- 0
ge/hybrid/model/node_item.h View File

@@ -67,6 +67,8 @@ struct NodeItem {

bool IsControlOp() const;

bool IsHcclOp() const;

void SetToDynamic();

std::string DebugString() const;


+ 23
- 20
ge/hybrid/node_executor/compiledsubgraph/known_node_executor.cc View File

@@ -95,13 +95,6 @@ Status KnownNodeTask::UpdateArgs(TaskContext &context) {
Status KnownNodeTask::Init(TaskContext &context) {
// allocate output mem
GE_CHK_STATUS_RET(context.AllocateOutputs(), "known node task allocate output failed.");

// init davinicmodel
if (!load_flag_) {
davinci_model_->InitRuntimeParams();
GE_CHK_STATUS_RET(davinci_model_->InitVariableMem(), "init variable mem failed.");
}

// allocate mem base
void *buffer = nullptr;
if (davinci_model_->TotalMemSize() != 0) {
@@ -129,23 +122,31 @@ Status KnownNodeTask::Init(TaskContext &context) {
void *global_step = context.GetExecutionContext()->global_step;
davinci_model_->SetKnownShapeGlobalStep(global_step);
}
int32_t device_id = 0;
rtError_t rt_ret = rtGetDevice(&device_id);
if (rt_ret != RT_ERROR_NONE || device_id < 0) {
GELOGE(rt_ret, "Call rtGetDevice failed, ret = 0x%X, device_id = %d.", rt_ret, device_id);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
davinci_model_->SetDeviceId(device_id);
GE_CHK_STATUS_RET(davinci_model_->Init(), "KnownNodeExecutor::InitDavinciModel failed.");
load_flag_ = true;
} else {
GE_CHK_STATUS_RET(ModelManager::GetInstance()->DestroyAicpuKernel(davinci_model_->GetSessionId(),
davinci_model_->Id(), davinci_model_->SubModelId()), "KnownNodeTask::Init destroy aicpu kernel failed.");
}
GE_CHK_STATUS_RET(ModelManager::GetInstance()->DestroyAicpuKernel(davinci_model_->GetSessionId(),
davinci_model_->Id(), davinci_model_->SubModelId()),
"KnownNodeTask::Init destroy aicpu kernel failed.");
GELOGI("[%s] KnownNodeExecutor::Init success.", context.GetNodeName());
return SUCCESS;
}

Status KnownNodeTask::InitDavinciModel() {
GELOGD("[Init][Model] start");
davinci_model_->InitRuntimeParams();
GE_CHK_STATUS_RET(davinci_model_->InitVariableMem(), "init variable mem failed");
int32_t device_id = 0;
GE_CHK_RT_RET(rtGetDevice(&device_id));
davinci_model_->SetDeviceId(static_cast<uint32_t>(device_id));
GE_CHK_STATUS_RET(DoInitDavinciModel(), "[Init][Model] Failed to init davinci model.");
GELOGD("[Init][Model] success");
return SUCCESS;
}

Status KnownNodeTask::DoInitDavinciModel() {
return davinci_model_->Init();
}

Status KnownNodeExecutor::PrepareTask(NodeTask &task, TaskContext &context) const {
GELOGD("[%s] KnownNodeExecutor::PrepareTask in.", context.GetNodeName());
RECORD_EXECUTION_EVENT(context.GetExecutionContext(), context.GetNodeName(), "[KnownNodeExecutorPrepareTask] Start");
@@ -182,9 +183,11 @@ Status KnownNodeExecutor::LoadTask(const HybridModel &model, const NodePtr &node

GE_CHK_STATUS_RET(davinci_model->Assign(ge_model), "KnownNodeExecutor::LoadTask davincimodel assign failed.");

task = MakeShared<KnownNodeTask>(davinci_model);
GE_CHECK_NOTNULL(task);
auto known_node_task = MakeShared<KnownNodeTask>(davinci_model);
GE_CHECK_NOTNULL(known_node_task);
GE_CHK_STATUS_RET_NOLOG(known_node_task->InitDavinciModel());
GELOGI("[%s] KnownNodeExecutor::LoadTask success.", node->GetName().c_str());
task = std::move(known_node_task);
return SUCCESS;
}



+ 5
- 3
ge/hybrid/node_executor/compiledsubgraph/known_node_executor.h View File

@@ -31,11 +31,15 @@ class KnownNodeTask : public NodeTask {
: davinci_model_(davinci_model)
{}

~KnownNodeTask() {}
~KnownNodeTask() = default;

Status UpdateArgs(TaskContext &context) override;
Status ExecuteAsync(TaskContext &context, std::function<void()> done_callback) override;
Status Init(TaskContext &context) override;
Status InitDavinciModel();

protected:
virtual Status DoInitDavinciModel();
private:
std::shared_ptr<DavinciModel> davinci_model_ = nullptr;
bool load_flag_ = false;
@@ -47,8 +51,6 @@ class KnownNodeExecutor : public NodeExecutor {
Status PrepareTask(NodeTask &task, TaskContext &context) const;
Status ExecuteTask(NodeTask &task, TaskContext &context, const std::function<void()> &callback) const;
~KnownNodeExecutor() {}
private:
std::shared_ptr<DavinciModel> davinci_model_ = nullptr;
};
} // namespace hybrid
} // namespace ge


+ 3
- 1
ge/offline/main.cc View File

@@ -244,9 +244,11 @@ class GFlagUtils {
" --framework Framework type. 0:Caffe; 1:MindSpore; 3:Tensorflow; 5:Onnx\n"
" --input_format Format of input data. E.g.: \"NCHW\"\n"
" --input_shape Shape of input data. Separate multiple nodes with semicolons (;). "
" --input_shape_range Shape range of input data. Separate multiple nodes with semicolons (;)."
"Use double quotation marks (\") to enclose each argument.\n"
" E.g.: \"input_name1:n1,c1,h1,w1;input_name2:n2,c2,h2,w2\"\n"
" --input_shape_range Shape range of input data. Separate multiple nodes with semicolons (;)."
"Use double quotation marks (\") to enclose each argument.\n"
" E.g.: \"input_name1:[n1~n2,c1,h1,w1];input_name2:[n2,c2~c3,h2,w2]\"\n"
" --dynamic_batch_size Set dynamic batch size. E.g.: \"batchsize1,batchsize2,batchsize3\"\n"
" --dynamic_image_size Set dynamic image size. Separate multiple nodes with semicolons (;). "
"Use double quotation marks (\") to enclose each argument.\n"


+ 9
- 1
inc/external/ge/ge_api_types.h View File

@@ -110,6 +110,7 @@ const char *const SAVE_ORIGINAL_MODEL = "ge.saveOriginalModel";
const char *const ORIGINAL_MODEL_FILE = "ge.originalModelFile";
const char *const INPUT_FP16_NODES = "ge.INPUT_NODES_SET_FP16";
const char *const OP_DEBUG_LEVEL = "ge.opDebugLevel";
const char *const PERFORMANCE_MODE = "ge.performance_mode";
} // namespace configure_option
// Configure stream num by Session constructor options param,
// its value should be int32_t type, default value is "1"
@@ -314,6 +315,11 @@ const std::string HCOM_MULTI_MODE = "ge.hcomMultiMode";
// atc and ir option
const char *const INPUT_SHAPE_RANGE = "input_shape_range";

// Configure express high compile performance or high execute performance
// normal: no need to compile, used saved .o files directly
// high: need to recompile, high execute performance mode
const std::string PERFORMANCE_MODE = "ge.performance_mode";

// Graph run mode
enum GraphRunMode { PREDICTION = 0, TRAIN };

@@ -388,6 +394,7 @@ static const char *const MDL_BANK_PATH = ge::MDL_BANK_PATH_FLAG.c_str();
static const char *const OP_BANK_PATH = ge::OP_BANK_PATH_FLAG.c_str();
static const char *const OP_BANK_UPDATE = ge::OP_BANK_UPDATE_FLAG.c_str();
static const char *const OP_DEBUG_LEVEL = ge::OP_DEBUG_LEVEL.c_str();
static const char *const PERFORMANCE_MODE = ge::PERFORMANCE_MODE.c_str();

// for interface: aclgrphBuildModel
#ifdef __GNUC__
@@ -412,7 +419,8 @@ const std::set<std::string> ir_builder_suppported_options = {INPUT_FORMAT,
OP_COMPILER_CACHE_MODE,
MDL_BANK_PATH,
OP_BANK_PATH,
OP_BANK_UPDATE};
OP_BANK_UPDATE,
PERFORMANCE_MODE};

// for interface: aclgrphParse
const std::set<std::string> ir_parser_suppported_options = {


+ 3
- 0
inc/framework/common/debug/log.h View File

@@ -19,6 +19,7 @@

#include <string>
#include <sstream>
#include <securec.h>

#include "runtime/rt.h"
#include "common/string_util.h"
@@ -105,6 +106,7 @@
do { \
bool b = (expr); \
if (!b) { \
REPORT_INNER_ERROR("E19999", __VA_ARGS__); \
GELOGE(_status, __VA_ARGS__); \
return _status; \
} \
@@ -193,6 +195,7 @@
{ \
bool b = (expr); \
if (b) { \
REPORT_INNER_ERROR("E19999", __VA_ARGS__); \
DOMI_LOGE(__VA_ARGS__); \
exec_expr; \
return _status; \


+ 3
- 2
inc/framework/common/ge_types.h View File

@@ -67,8 +67,9 @@ struct DataBuffer {
void *data; // Data address
uint64_t length; // Data length
bool isDataSupportMemShare = false;
DataBuffer(void *dataIn, uint64_t len, bool isSupportMemShare)
: data(dataIn), length(len), isDataSupportMemShare(isSupportMemShare) {}
uint32_t placement = 0;
DataBuffer(void *dataIn, uint64_t len, bool isSupportMemShare, uint32_t placement = 0)
: data(dataIn), length(len), isDataSupportMemShare(isSupportMemShare), placement(placement) {}

DataBuffer() : data(nullptr), length(0), isDataSupportMemShare(false) {}
};


+ 4
- 0
inc/framework/generator/ge_generator.h View File

@@ -76,10 +76,13 @@ class GE_FUNC_VISIBILITY GeGenerator {
/// @param [in] inputs: input tensors.
/// @param [in] outputs: output tensors.
/// @param [in] engine_type: engine type.
/// @param [in] compile_flag: op build flag, accurate build is 0, fuzz build is 1
/// @param [out] model_buff: model buff of op.
/// @return SUCCESS or FAILED
Status BuildSingleOpModel(OpDescPtr &op_desc, const vector<GeTensor> &inputs, const vector<GeTensor> &outputs,
OpEngineType engine_type, ModelBufferData &model_buff);
Status BuildSingleOpModel(OpDescPtr &op_desc, const vector<GeTensor> &inputs, const vector<GeTensor> &outputs,
OpEngineType engine_type, int32_t compile_flag, ModelBufferData &model_buff);
///
/// @ingroup ge
/// @brief: Build single Op into model buff.
@@ -99,6 +102,7 @@ class GE_FUNC_VISIBILITY GeGenerator {
const string &model_file_name, OpEngineType engine_type, ModelBufferData &model_buff,
bool is_offline = true);
bool CheckNoAicore(const ComputeGraphPtr &graph);
void RemoveConst(const vector<GeTensor> &inputs, vector<GeTensor> &outputs);
Status CheckForSingleOp(OpDescPtr &op_desc, const vector<GeTensor> &inputs, const vector<GeTensor> &outputs);

using GeRootModelPtr = std::shared_ptr<ge::GeRootModel>;


+ 1
- 1
metadef

@@ -1 +1 @@
Subproject commit ac0de0213755e49360a9467eb5b13e13a752a35b
Subproject commit ccfccb4bb355425cc09594b8ea267fb8ca938138

+ 1
- 1
parser

@@ -1 +1 @@
Subproject commit eff7e2ecc54ef7887581acd6ea66356de6872e3a
Subproject commit 0d4703aa893e90f23ba8a2dbd8903e028680213f

+ 6
- 0
tests/depends/error_manager/src/error_manager_stub.cc View File

@@ -18,6 +18,12 @@

using namespace ErrorMessage;

namespace ErrorMessage {
int FormatErrorMessage(char *str_dst, size_t dst_max, const char *format, ...) {
return 1;
}
}

thread_local Context ErrorManager::error_context_ = {0, "", "", ""};

ErrorManager &ErrorManager::GetInstance() {


+ 4
- 0
tests/depends/runtime/src/runtime_stub.cc View File

@@ -435,3 +435,7 @@ rtError_t rtGetTaskIdAndStreamID(uint32_t *taskId, uint32_t *streamId)
rtError_t rtDebugRegisterForStream(rtStream_t stream, uint32_t flag, const void *addr, uint32_t *streamId, uint32_t *taskId) {
return RT_ERROR_NONE;
}

rtError_t rtDebugUnRegisterForStream(rtStream_t stream) {
return RT_ERROR_NONE;
}

+ 1
- 1
tests/ut/common/graph/CMakeLists.txt View File

@@ -20,7 +20,7 @@ set(CMAKE_CXX_STANDARD 11)
set(PROTO_LIST
"${GE_CODE_DIR}/metadef/proto/om.proto"
"${GE_CODE_DIR}/metadef/proto/ge_ir.proto"
"${GE_CODE_DIR}/metadef/proto/proto_inner/ge_onnx.proto"
"${GE_CODE_DIR}/metadef/proto/onnx/ge_onnx.proto"
)

protobuf_generate(ge PROTO_SRCS PROTO_HDRS ${PROTO_LIST})


+ 6
- 1
tests/ut/ge/CMakeLists.txt View File

@@ -33,7 +33,7 @@ set(PROTO_LIST
"${GE_CODE_DIR}/metadef/proto/tensorflow/tensor_shape.proto"
"${GE_CODE_DIR}/metadef/proto/tensorflow/types.proto"
"${GE_CODE_DIR}/metadef/proto/tensorflow/node_def.proto"
"${GE_CODE_DIR}/metadef/proto/proto_inner/ge_onnx.proto"
"${GE_CODE_DIR}/metadef/proto/onnx/ge_onnx.proto"
)

protobuf_generate(ge PROTO_SRCS PROTO_HDRS ${PROTO_LIST})
@@ -670,6 +670,7 @@ set(PASS_TEST_FILES
"graph/passes/merge_pass_unittest.cc"
#"graph/passes/switch_pass_unittest.cc"
"graph/passes/switch_logic_remove_pass_unittest.cc"
"graph/passes/switch_dead_branch_elimination_unittest.cc"
"graph/passes/assert_pass_unittest.cc"
"graph/passes/dropout_pass_unittest.cc"
"graph/passes/unused_const_pass_unittest.cc"
@@ -736,6 +737,7 @@ set(KERNEL_TEST_FILES
"graph/passes/folding_kernel/gather_v2_kernel_unittest.cc"
"graph/passes/folding_kernel/slice_kernel_unittest.cc"
"graph/passes/folding_kernel/dynamic_stitch_kernel_unittest.cc"
"graph/passes/atomic_addr_clean_pass_unittest.cc"
)

set(MULTI_PARTS_TEST_FILES
@@ -764,7 +766,9 @@ set(MULTI_PARTS_TEST_FILES
"common/ge_format_util_unittest.cc"
"graph/variable_accelerate_ctrl_unittest.cc"
"graph/build/logical_stream_allocator_unittest.cc"
"graph/build/model_builder_unittest.cc"
"graph/build/mem_assigner_unittest.cc"
"graph/build/task_generator_unittest.cc"
"graph/preprocess/graph_preprocess_unittest.cc"
"graph/manager/hcom_util_unittest.cc"
"graph/manager/graph_caching_allocator_unittest.cc"
@@ -793,6 +797,7 @@ set(PROFILING_MNG_TEST_FILES

set(HYBRID_TEST_FILES
"hybrid/ge_hybrid_unittest.cc"
"hybrid/known_node_executor_unittest.cc"
)

set(OTHERS_TEST_FILES


+ 234
- 0
tests/ut/ge/common/format_transfer_hwcn_fractalz_unittest.cc View File

@@ -34427,6 +34427,240 @@ TEST_F(UtestFormatTransferHwcnFz, fp32_2c_2n_pad) {
}
}

TEST_F(UtestFormatTransferHwcnFz, fp16_1c_1n_with_groups) {
uint16_t data[1 * 1 * 1 * 2] = {19, 88};
uint16_t ret[1 * 1 * 16 * 16] ={19 , 0, 0, 0 ,0 , 0, 0, 0 , 0 , 0 , 0, 0, 0 , 0 , 0, 0,
0 , 88, 0, 0 ,0 , 0, 0, 0 , 0 , 0 , 0, 0, 0 , 0 , 0, 0,
0 , 0 , 0, 0 ,0 , 0, 0, 0 , 0 , 0 , 0, 0, 0 , 0 , 0, 0,
0 , 0 , 0, 0 ,0 , 0, 0, 0 , 0 , 0 , 0, 0, 0 , 0 , 0, 0,
0 , 0 , 0, 0 ,0 , 0, 0, 0 , 0 , 0 , 0, 0, 0 , 0 , 0, 0,
0 , 0 , 0, 0 ,0 , 0, 0, 0 , 0 , 0 , 0, 0, 0 , 0 , 0, 0,
0 , 0 , 0, 0 ,0 , 0, 0, 0 , 0 , 0 , 0, 0, 0 , 0 , 0, 0,
0 , 0 , 0, 0 ,0 , 0, 0, 0 , 0 , 0 , 0, 0, 0 , 0 , 0, 0,
0 , 0 , 0, 0 ,0 , 0, 0, 0 , 0 , 0 , 0, 0, 0 , 0 , 0, 0,
0 , 0 , 0, 0 ,0 , 0, 0, 0 , 0 , 0 , 0, 0, 0 , 0 , 0, 0,
0 , 0 , 0, 0 ,0 , 0, 0, 0 , 0 , 0 , 0, 0, 0 , 0 , 0, 0,
0 , 0 , 0, 0 ,0 , 0, 0, 0 , 0 , 0 , 0, 0, 0 , 0 , 0, 0,
0 , 0 , 0, 0 ,0 , 0, 0, 0 , 0 , 0 , 0, 0, 0 , 0 , 0, 0,
0 , 0 , 0, 0 ,0 , 0, 0, 0 , 0 , 0 , 0, 0, 0 , 0 , 0, 0,
0 , 0 , 0, 0 ,0 , 0, 0, 0 , 0 , 0 , 0, 0, 0 , 0 , 0, 0,
0 , 0 , 0, 0 ,0 , 0, 0, 0 , 0 , 0 , 0, 0, 0 , 0 , 0, 0};
FormatTransferFractalZ transfer;
ge::Format old_format = FORMAT_FRACTAL_Z;
int32_t groups = 2;
ge::Format new_format = static_cast<ge::Format>(ge::GetFormatFromSub(old_format, groups));
TransArgs args{
reinterpret_cast<uint8_t *>(data), FORMAT_HWCN, new_format, std::vector<int64_t>({1, 1, 1, 2}),
std::vector<int64_t>({1, 1, 16, 16}), DT_FLOAT16};

TransResult result;
EXPECT_EQ(transfer.TransFormat(args, result), SUCCESS);
EXPECT_EQ(result.length, sizeof(ret) / sizeof(ret[0]) * 2);
for (int i = 0; i < sizeof(ret) / sizeof(ret[0]); ++i) {
EXPECT_EQ((reinterpret_cast<uint16_t *>(result.data.get()))[i], ret[i]);
}
}

TEST_F(UtestFormatTransferHwcnFz, fp16_4c_8n_with_groups_02) {
uint16_t data[3 * 3 * 4 * 8] = {
11 , 99 , 68 , 2 , 14 , 59 , 24 , 100,
4 , 65 , 11 , 7 , 74 , 28 , 71 , 81,
94 , 63 , 80 , 7 , 95 , 29 , 92 , 76,
88 , 68 , 67 , 98 , 82 , 11 , 20 , 68,
36 , 17 , 15 , 89 , 31 , 8 , 51 , 49,
49 , 89 , 79 , 97 , 7 , 91 , 14 , 34,
55 , 40 , 85 , 59 , 31 , 35 , 41 , 89,
4 , 82 , 90 , 48 , 44 , 19 , 9 , 84,
100 , 43 , 7 , 94 , 4 , 91 , 67 , 16,
63 , 79 , 20 , 62 , 55 , 38 , 13 , 61,
98 , 99 , 44 , 0 , 97 , 42 , 65 , 80,
78 , 56 , 26 , 17 , 23 , 22 , 76 , 84,
34 , 88 , 38 , 57 , 37 , 77 , 46 , 28,
48 , 11 , 6 , 18 , 8 , 66 , 24 , 29,
7 , 72 , 34 , 79 , 99 , 14 , 75 , 62,
44 , 98 , 11 , 31 , 4 , 79 , 51 , 37,
84 , 3 , 89 , 74 , 68 , 85 , 17 , 93,
81 , 88 , 38 , 8 , 69 , 82 , 91 , 91,
45 , 42 , 7 , 96 , 81 , 96 , 39 , 35,
93 , 46 , 73 , 7 , 9 , 81 , 5 , 63,
35 , 30 , 27 , 42 , 20 , 52 , 36 , 91,
87 , 1 , 8 , 7 , 78 , 21 , 76 , 97,
52 , 18 , 55 , 57 , 95 , 67 , 3 , 69,
98 , 85 , 75 , 75 , 38 , 3 , 94 , 66,
92 , 27 , 9 , 39 , 5 , 21 , 4 , 48,
55 , 38 , 58 , 84 , 23 , 13 , 71 , 91,
99 , 58 , 58 , 16 , 86 , 45 , 63 , 97,
30 , 10 , 21 , 37 , 78 , 94 , 8 , 49,
18 , 52 , 67 , 65 , 78 , 82 , 74 , 35,
97 , 15 , 43 , 22 , 30 , 87 , 98 , 91,
22 , 88 , 83 , 63 , 79 , 63 , 42 , 74,
29 , 62 , 2 , 97 , 65 , 45 , 76 , 57,
71 , 65 , 0 , 69 , 76 , 41 , 58 , 98,
90 , 3 , 75 , 56 , 41 , 66 , 41 , 96,
44 , 87 , 61 , 26 , 62 , 57 , 49 , 29,
49 , 94 , 90 , 96 , 33 , 32 , 10 , 25};
uint16_t ret[9 * 1 * 16 * 16] ={
11 , 4 , 94 , 88 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
99 , 65 , 63 , 68 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
68 , 11 , 80 , 67 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2 , 7 , 7 , 98 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 14 , 74, 95, 82, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 59 , 28, 29, 11, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 24 , 71, 92, 20, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 ,100 , 81, 76, 68, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
36 , 49 , 55 , 4 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
17 , 89 , 40 , 82 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
15 , 79 , 85 , 90 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
89 , 97 , 59 , 48 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 31 , 7, 31, 44, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 8 , 91, 35, 19, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 51 , 14, 41, 9, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 49 , 34, 89, 84, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
100 , 63 , 98 , 78 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
43 , 79 , 99 , 56 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
7 , 20 , 44 , 26 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
94 , 62 , 0 , 17 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 4 , 55, 97, 23, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 91 , 38, 42, 22, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 67 , 13, 65, 76, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 16 , 61, 80, 84, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
34 , 48 , 7 , 44 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
88 , 11 , 72 , 98 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
38 , 6 , 34 , 11 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
57 , 18 , 79 , 31 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 37 , 8, 99, 4, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 77 , 66, 14, 79, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 46 , 24, 75, 51, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 28 , 29, 62, 37, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
84 , 81 , 45 , 93 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3 , 88 , 42 , 46 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
89 , 38 , 7 , 73 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
74 , 8 , 96 , 7 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 68 , 69, 81, 9, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 85 , 82, 96, 81, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 17 , 91, 39, 5, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 93 , 91, 35, 63, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
35 , 87 , 52 , 98 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
30 , 1 , 18 , 85 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
27 , 8 , 55 , 75 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
42 , 7 , 57 , 75 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 20 , 78, 95, 38, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 52 , 21, 67, 3, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 36 , 76, 3, 94, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 91 , 97, 69, 66, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
92 , 55 , 99 , 30 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
27 , 38 , 58 , 10 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
9 , 58 , 58 , 21 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
39 , 84 , 16 , 37 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 5 , 23, 86, 78, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 21 , 13, 45, 94, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 4 , 71, 63, 8, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 48 , 91, 97, 49, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
18 , 97 , 22 , 29 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
52 , 15 , 88 , 62 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
67 , 43 , 83 , 2 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
65 , 22 , 63 , 97 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 78 , 30, 79, 65, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 82 , 87, 63, 45, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 74 , 98, 42, 76, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 35 , 91, 74, 57, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
71 , 90 , 44 , 49 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
65 , 3 , 87 , 94 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 75 , 61 , 90 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
69 , 56 , 26 , 96 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 76 , 41, 62, 33, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 41 , 66, 57, 32, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 58 , 41, 49, 10, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 98 , 96, 29, 25, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
FormatTransferFractalZ transfer;
ge::Format old_format = FORMAT_FRACTAL_Z;
int32_t groups = 2;
ge::Format new_format = static_cast<ge::Format>(ge::GetFormatFromSub(old_format, groups));
TransArgs args{
reinterpret_cast<uint8_t *>(data), FORMAT_HWCN, new_format, std::vector<int64_t>({3, 3, 4, 8}),
std::vector<int64_t>({9, 1, 16, 16}), DT_FLOAT16};

TransResult result;
EXPECT_EQ(transfer.TransFormat(args, result), SUCCESS);
EXPECT_EQ(result.length, sizeof(ret) / sizeof(ret[0]) * 2);
for (int i = 0; i < sizeof(ret) / sizeof(ret[0]); ++i) {
EXPECT_EQ((reinterpret_cast<uint16_t *>(result.data.get()))[i], ret[i]);
}
}


TEST_F(UtestFormatTransferHwcnFz, build_transfer_fp32) {
float data[5 * 5 * 31 * 17];
TransArgs args{


+ 27
- 0
tests/ut/ge/generator/ge_generator_unittest.cc View File

@@ -88,6 +88,24 @@ TEST_F(UtestGeGenerator, test_build_single_op_online) {
EXPECT_EQ(generator.BuildSingleOpModel(op_desc, inputs, outputs, ENGINE_AIVECTOR, model_buffer), FAILED);
}

TEST_F(UtestGeGenerator, test_singleop_fuzz_build) {
GeTensorDesc tensor_desc;
shared_ptr<OpDesc> op_desc = make_shared<OpDesc>("Add", "add");
op_desc->AddInputDesc(tensor_desc);
op_desc->AddInputDesc(tensor_desc);
op_desc->AddOutputDesc(tensor_desc);

GeTensor tensor(tensor_desc);
const vector<GeTensor> inputs = { tensor, tensor };
const vector<GeTensor> outputs = { tensor };

GeGenerator generator;
generator.Initialize({});
ModelBufferData model_buffer;
bool compile_flag = true;
EXPECT_EQ(generator.BuildSingleOpModel(op_desc, inputs, outputs, ENGINE_AIVECTOR, compile_flag, model_buffer), SUCCESS);
}

TEST_F(UtestGeGenerator, test_check_aicore) {
GeGenerator generator;
generator.Initialize({});
@@ -128,4 +146,13 @@ TEST_F(UtestGeGenerator, test_set_model_name) {
ge_root_model->root_graph_ = std::move(graph);
EXPECT_EQ(generator.SetModelNameForDump(ge_root_model), SUCCESS);
}

TEST_F(UtestGeGenerator, test_remove_const) {
GeGenerator generator;
GeTensorDesc tensor_desc;
GeTensor tensor(tensor_desc);
const vector<GeTensor> inputs = {tensor};
vector<GeTensor> outputs;
generator.RemoveConst(inputs, outputs);
}
} // namespace ge

+ 14
- 0
tests/ut/ge/graph/build/mem_assigner_unittest.cc View File

@@ -249,3 +249,17 @@ TEST_F(UtestMemoryAssignerTest, graph_memory_assign_continuous_input) {
EXPECT_EQ(addn1->GetOpDesc()->GetOutputOffset()[0], 500);
EXPECT_EQ(addn2->GetOpDesc()->GetOutputOffset()[0], 600);
}

TEST_F(UtestMemoryAssignerTest, graph_memory_set_last_used_attr) {
ge::ComputeGraphPtr graph = make_shared<ge::ComputeGraph>("");
MakeGraph(graph);
auto node_f = graph->FindNode("F");
MemoryAssigner memory_assigner(graph);
map<int64_t, size_t> mem_offset;
size_t zero_memory_size = 0;
EXPECT_EQ(memory_assigner.AssignMemory(false, mem_offset, zero_memory_size), GRAPH_SUCCESS);

int32_t flag = 0;
(void) ge::AttrUtils::GetInt(node_f->GetOpDesc()->GetInputDesc(0), ATTR_NAME_IS_END_OF_INPUTMEM_LIFECYCLE, flag);
EXPECT_EQ(flag, 1);
}

+ 146
- 0
tests/ut/ge/graph/build/model_builder_unittest.cc View File

@@ -0,0 +1,146 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <gtest/gtest.h>
#include <memory>

#include "graph/anchor.h"
#include "graph/attr_value.h"
#include "graph/debug/ge_attr_define.h"
#include "graph/utils/graph_utils.h"
#include "graph/utils/node_utils.h"
#include "graph/utils/op_desc_utils.h"
#include "graph/utils/tensor_utils.h"
#include "omg/omg_inner_types.h"
#include "../passes/graph_builder_utils.h"

#define protected public
#define private public
#include "graph/build/model_builder.h"
#undef protected
#undef private

using namespace std;
using namespace testing;
using namespace ge;
using domi::GetContext;

class UtestModelBuilderTest : public testing::Test {
public:
ge::OpDescPtr CreateOpWithWsSize(const string &name, int64_t wsByte, const string &type = "some") {
ge::OpDescPtr op_def = make_shared<ge::OpDesc>(name, type);
auto desc_temp_ptr = make_shared<ge::GeTensorDesc>();
auto desc_temp = *desc_temp_ptr;

TensorUtils::SetSize(desc_temp, 1024);
op_def->AddInputDesc(desc_temp);
op_def->AddOutputDesc(desc_temp);

std::vector<int64_t> workspace_bytes;
workspace_bytes.push_back(wsByte);
op_def->SetWorkspaceBytes(workspace_bytes);
return op_def;
}
ge::OpDescPtr CreateRefOpWithWsSize(const string &name, int64_t wsByte, const string &type = "some") {
ge::OpDescPtr op_def = make_shared<ge::OpDesc>(name, type);
auto desc_temp_ptr = make_shared<ge::GeTensorDesc>();
auto desc_temp = *desc_temp_ptr;

TensorUtils::SetSize(desc_temp, 1024);
op_def->AddInputDesc(desc_temp);

auto desc_output_ptr = make_shared<ge::GeTensorDesc>();
auto desc_output = *desc_output_ptr;
TensorUtils::SetSize(desc_output, 6500);
ge::TensorUtils::SetReuseInput(desc_output, true);
ge::TensorUtils::SetReuseInputIndex(desc_output, 0);
op_def->AddOutputDesc(desc_output);

std::vector<int64_t> workspace_bytes;
workspace_bytes.push_back(wsByte);
op_def->SetWorkspaceBytes(workspace_bytes);
return op_def;
}
void MakeGraph(ge::ComputeGraphPtr &graph) {
ge::OpDescPtr op_def_a = CreateOpWithWsSize("A", 6000);
op_def_a->SetStreamId(0);
ge::OpDescPtr op_def_b = CreateOpWithWsSize("B", 120000);
op_def_b->SetStreamId(0);
ge::OpDescPtr op_def_c = CreateOpWithWsSize("C", 16000);
op_def_c->SetStreamId(1);
ge::OpDescPtr op_def_d = CreateOpWithWsSize("D", 24000);
op_def_d->SetStreamId(2);
ge::OpDescPtr op_def_e = CreateOpWithWsSize("E", 24000);
op_def_e->SetStreamId(3);
ge::OpDescPtr op_def_f = CreateOpWithWsSize("F", 30000);
op_def_f->SetStreamId(2);
ge::OpDescPtr op_def_g = CreateOpWithWsSize("G", 32000);
op_def_g->SetStreamId(3);
ge::OpDescPtr op_def_h = CreateOpWithWsSize("H", 48000);
op_def_h->SetStreamId(2);
ge::OpDescPtr op_def_i = CreateOpWithWsSize("I", 60000);
op_def_i->SetStreamId(2);
ge::OpDescPtr op_def_j = CreateOpWithWsSize("J", 256000, NETOUTPUT);
op_def_j->SetStreamId(3);

// add node
ge::NodePtr node_a = graph->AddNode(op_def_a);
ge::NodePtr node_b = graph->AddNode(op_def_b);
ge::NodePtr node_c = graph->AddNode(op_def_c);
ge::NodePtr node_d = graph->AddNode(op_def_d);
ge::NodePtr node_e = graph->AddNode(op_def_e);
ge::NodePtr node_f = graph->AddNode(op_def_f);
ge::NodePtr node_g = graph->AddNode(op_def_g);
ge::NodePtr node_h = graph->AddNode(op_def_h);
ge::NodePtr node_i = graph->AddNode(op_def_i);
ge::NodePtr node_j = graph->AddNode(op_def_j);

// add edge
ge::GraphUtils::AddEdge(node_a->GetOutDataAnchor(0), node_b->GetInDataAnchor(0));
ge::GraphUtils::AddEdge(node_a->GetOutDataAnchor(0), node_c->GetInDataAnchor(0));
ge::GraphUtils::AddEdge(node_b->GetOutDataAnchor(0), node_d->GetInDataAnchor(0));
ge::GraphUtils::AddEdge(node_b->GetOutDataAnchor(0), node_e->GetInDataAnchor(0));
ge::GraphUtils::AddEdge(node_c->GetOutDataAnchor(0), node_g->GetInDataAnchor(0));
ge::GraphUtils::AddEdge(node_d->GetOutDataAnchor(0), node_f->GetInDataAnchor(0));
ge::GraphUtils::AddEdge(node_e->GetOutDataAnchor(0), node_g->GetInDataAnchor(1));
ge::GraphUtils::AddEdge(node_f->GetOutDataAnchor(0), node_h->GetInDataAnchor(0));
ge::GraphUtils::AddEdge(node_g->GetOutDataAnchor(0), node_j->GetInDataAnchor(0));
ge::GraphUtils::AddEdge(node_h->GetOutDataAnchor(0), node_i->GetInDataAnchor(0));
ge::GraphUtils::AddEdge(node_i->GetOutDataAnchor(0), node_j->GetInDataAnchor(1));

GetContext().out_nodes_map["H"] = {0};
GetContext().out_nodes_map["I"] = {0};
GetContext().out_nodes_map["J"] = {0};
graph->TopologicalSorting();
}


protected:
void SetUp() {}

void TearDown() { GetContext().out_nodes_map.clear(); }
};

// when check GetMemoryRanges return fail, Assign return fail
TEST_F(UtestModelBuilderTest, SetInputIsConst) {
Graph2SubGraphInfoList subgraphs;
std::map<std::string, int> stream_max_parallel_num;
ge::ComputeGraphPtr graph = make_shared<ge::ComputeGraph>("");
MakeGraph(graph);
graph->TopologicalSorting();
ge::ModelBuilder builder(0, graph, subgraphs, stream_max_parallel_num, false);
EXPECT_EQ(builder.PreBuildModel(), SUCCESS);
}

+ 68
- 0
tests/ut/ge/graph/build/task_generator_unittest.cc View File

@@ -0,0 +1,68 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <gtest/gtest.h>
#include <memory>

#include "graph/anchor.h"
#include "graph/attr_value.h"
#include "graph/debug/ge_attr_define.h"
#include "graph/utils/graph_utils.h"
#include "graph/utils/node_utils.h"
#include "graph/utils/op_desc_utils.h"
#include "graph/utils/tensor_utils.h"
#include "omg/omg_inner_types.h"
#include "../passes/graph_builder_utils.h"

#define protected public
#define private public
#include "graph/build/task_generator.h"
#undef protected
#undef private

using namespace std;
using namespace testing;
using namespace ge;

class UtestTaskGeneratorTest : public testing::Test {
public:
ge::ComputeGraphPtr BuildGraphFpProfiling() {
ge::ut::GraphBuilder builder("graph");
auto data = builder.AddNode("data", "phony", 1, 1);
auto addn1 = builder.AddNode("addn1", "AddN", 1, 1);
auto netoutput = builder.AddNode("netoutput", "NetOutput", 2, 0);
auto op_desc = data->GetOpDesc();
(void)AttrUtils::SetStr(op_desc, ATTR_NAME_FRAMEWORK_ORIGINAL_TYPE, "IteratorV2");
op_desc->SetOpKernelLibName("GE");
builder.AddDataEdge(data, 0, addn1, 0);
builder.AddDataEdge(addn1, 0, netoutput, 0);
return builder.GetGraph();
}

protected:
void SetUp() {}
void TearDown() {}
};

TEST_F(UtestTaskGeneratorTest, AutoFindFpOpIndex) {
auto graph = BuildGraphFpProfiling();
TaskGenerator task_generator(nullptr, 0);
ProfilingPoint profiling_point;
profiling_point.fp_index = -1;
EXPECT_EQ(task_generator.AutoFindFpOpIndex(graph, profiling_point), SUCCESS);
// addn1 is fp
EXPECT_EQ(profiling_point.fp_index, 2);
}

+ 48
- 0
tests/ut/ge/graph/load/davinci_model_unittest.cc View File

@@ -942,4 +942,52 @@ TEST_F(UtestDavinciModel, simple_test_gmock) {
EXPECT_EQ(mock_stub.func2(2, 5), 1023);
EXPECT_EQ(mock_stub.func2(3, 5), 1023);
}

TEST_F(UtestDavinciModel, NnExecute) {
DavinciModel model(0, nullptr);
ComputeGraphPtr graph = make_shared<ComputeGraph>("default");
ProfilingManager::Instance().is_load_profiling_ = true;

GeModelPtr ge_model = make_shared<GeModel>();
ge_model->SetGraph(GraphUtils::CreateGraphFromComputeGraph(graph));
AttrUtils::SetInt(ge_model, ATTR_MODEL_MEMORY_SIZE, 10240);
AttrUtils::SetInt(ge_model, ATTR_MODEL_STREAM_NUM, 1);

shared_ptr<domi::ModelTaskDef> model_task_def = make_shared<domi::ModelTaskDef>();
ge_model->SetModelTaskDef(model_task_def);

GeTensorDesc tensor(GeShape({1,4,128,128}), FORMAT_NCHW, DT_FLOAT);
TensorUtils::SetSize(tensor, 512);
{
OpDescPtr op_desc = CreateOpDesc("data", DATA);
op_desc->AddInputDesc(tensor);
op_desc->AddOutputDesc(tensor);
op_desc->SetInputOffset({1024});
op_desc->SetOutputOffset({1024});
NodePtr node = graph->AddNode(op_desc); // op_index = 0
}

{
OpDescPtr op_desc = CreateOpDesc("output", NETOUTPUT);
op_desc->AddInputDesc(tensor);
op_desc->SetInputOffset({5120});
op_desc->SetSrcName( { "memcpy" } );
op_desc->SetSrcIndex( { 0 } );
NodePtr node = graph->AddNode(op_desc); // op_index = 3
}

EXPECT_EQ(model.Assign(ge_model), SUCCESS);
EXPECT_EQ(model.Init(), SUCCESS);

rtStream_t stream = nullptr;
InputData input_data;
OutputData output_data;
vector<OutputTensorInfo> outputs;
EXPECT_EQ(model.GenOutputTensorInfo(&output_data, outputs), SUCCESS);
EXPECT_EQ(output_data.blobs.size(), 1);
EXPECT_EQ(outputs.size(), 1);
input_data.blobs = output_data.blobs;
EXPECT_EQ(input_data.blobs.size(), 1);
EXPECT_EQ(model.NnExecute(stream, false, input_data, output_data), SUCCESS);
}
} // namespace ge

+ 25
- 0
tests/ut/ge/graph/load/kernel_task_info_unittest.cc View File

@@ -496,6 +496,7 @@ TEST_F(UtestKernelTaskInfo, kernel_task_info_init_cce_task) {
KernelTaskInfo kernel_task_info;
domi::KernelDef *kernel_def = task_def.mutable_kernel();
kernel_task_info.davinci_model_ = &model;
kernel_task_info.op_desc_ = model.op_list_[0];

kernel_def->set_flowtable("InitCceTask");
domi::KernelContext *context = kernel_def->mutable_context();
@@ -529,6 +530,7 @@ TEST_F(UtestKernelTaskInfo, kernel_taskInfo_init_cce_task_failed1) {
domi::TaskDef task_def;
KernelTaskInfo kernel_task_info;
kernel_task_info.davinci_model_ = &model;
kernel_task_info.op_desc_ = CreateOpDesc("FrameworkOp", "FrameworkOp");

domi::KernelDef *kernel_def = task_def.mutable_kernel();
EXPECT_EQ(kernel_task_info.InitCceTask(*kernel_def), INTERNAL_ERROR);
@@ -546,6 +548,7 @@ TEST_F(UtestKernelTaskInfo, kernel_taskInfo_init_cce_task_failed2) {
domi::TaskDef task_def;
KernelTaskInfo kernel_task_info;
kernel_task_info.davinci_model_ = &model;
kernel_task_info.op_desc_ = model.op_list_[0];

domi::KernelDef *kernel_def = task_def.mutable_kernel();
// KernelTaskInfo::SetContext -> SUCCESS
@@ -569,6 +572,7 @@ TEST_F(UtestKernelTaskInfo, kernel_taskInfo_init_cce_task_failed3) {
domi::TaskDef task_def;
KernelTaskInfo kernel_task_info;
kernel_task_info.davinci_model_ = &model;
kernel_task_info.op_desc_ = model.op_list_[0];

domi::KernelDef *kernel_def = task_def.mutable_kernel();
// KernelTaskInfo::SetContext -> SUCCESS
@@ -594,6 +598,7 @@ TEST_F(UtestKernelTaskInfo, kernel_taskInfo_init_cce_task_failed4) {
domi::TaskDef task_def;
KernelTaskInfo kernel_task_info;
kernel_task_info.davinci_model_ = &model;
kernel_task_info.op_desc_ = model.op_list_[0];

domi::KernelDef *kernel_def = task_def.mutable_kernel();
// KernelTaskInfo::SetContext -> SUCCESS
@@ -620,6 +625,7 @@ TEST_F(UtestKernelTaskInfo, kernel_taskInfo_init_cce_task_failed5) {
domi::TaskDef task_def;
KernelTaskInfo kernel_task_info;
kernel_task_info.davinci_model_ = &model;
kernel_task_info.op_desc_ = model.op_list_[0];

domi::KernelDef *kernel_def = task_def.mutable_kernel();
// KernelTaskInfo::SetContext -> SUCCESS
@@ -647,6 +653,7 @@ TEST_F(UtestKernelTaskInfo, kernel_taskInfo_init_cce_task_failed6) {
domi::TaskDef task_def;
KernelTaskInfo kernel_task_info;
kernel_task_info.davinci_model_ = &model;
kernel_task_info.op_desc_ = model.op_list_[0];

domi::KernelDef *kernel_def = task_def.mutable_kernel();
// KernelTaskInfo::SetContext -> SUCCESS
@@ -675,6 +682,7 @@ TEST_F(UtestKernelTaskInfo, kernel_taskInfo_init_cce_task_failed7) {
domi::TaskDef task_def;
KernelTaskInfo kernel_task_info;
kernel_task_info.davinci_model_ = &model;
kernel_task_info.op_desc_ = model.op_list_[0];

domi::KernelDef *kernel_def = task_def.mutable_kernel();
// KernelTaskInfo::SetContext -> SUCCESS
@@ -712,6 +720,7 @@ TEST_F(UtestKernelTaskInfo, success_kernel_taskInfo_init_set_context) {
context->set_args_count(1);
context->set_args_offset("args111111", 10);

kernel_task_info.op_desc_ = CreateOpDesc("FrameworkOp", "FrameworkOp");
EXPECT_EQ(kernel_task_info.SetContext(*kernel_def), SUCCESS);

EXPECT_EQ(kernel_task_info.Release(), SUCCESS);
@@ -733,6 +742,7 @@ TEST_F(UtestKernelTaskInfo, kernel_taskInfo_init_set_context_failed1) {
context->set_is_flowtable(true);
context->set_args_count(0);

kernel_task_info.op_desc_ = CreateOpDesc("FrameworkOp", "FrameworkOp");
EXPECT_EQ(kernel_task_info.SetContext(*kernel_def), INTERNAL_ERROR);

kernel_def->clear_context();
@@ -752,6 +762,8 @@ TEST_F(UtestKernelTaskInfo, kernel_taskInfo_init_set_context_failed2) {
context->set_args_count(5);
context->set_args_offset("\0\0"); // args_offset = 0

kernel_task_info.op_desc_ = CreateOpDesc("FrameworkOp", "FrameworkOp");

EXPECT_EQ(kernel_task_info.SetContext(*kernel_def), PARAM_INVALID);

kernel_def->clear_context();
@@ -769,6 +781,7 @@ TEST_F(UtestKernelTaskInfo, kernel_task_info_update_cce_args) {
domi::TaskDef task_def;
KernelTaskInfo kernel_task_info;
kernel_task_info.davinci_model_ = &model;
kernel_task_info.op_desc_ = model.op_list_[0];

domi::KernelDef *kernel_def = task_def.mutable_kernel();
domi::KernelContext *context = kernel_def->mutable_context();
@@ -815,6 +828,7 @@ TEST_F(UtestKernelTaskInfo, kernel_task_info_update_cce_args_failed1) {
domi::TaskDef task_def;
KernelTaskInfo kernel_task_info;
kernel_task_info.davinci_model_ = &model;
kernel_task_info.op_desc_ = model.op_list_[0];

domi::KernelDef *kernel_def = task_def.mutable_kernel();
domi::KernelContext *context = kernel_def->mutable_context();
@@ -856,6 +870,7 @@ TEST_F(UtestKernelTaskInfo, kernel_task_info_set_flowtable) {
domi::TaskDef task_def;
KernelTaskInfo kernel_task_info;
kernel_task_info.davinci_model_ = &model;
kernel_task_info.op_desc_ = model.op_list_[0];

domi::KernelDef *kernel_def = task_def.mutable_kernel();
domi::KernelContext *context = kernel_def->mutable_context();
@@ -887,6 +902,7 @@ TEST_F(UtestKernelTaskInfo, kernel_task_info_set_flowtable_failed1) {
domi::TaskDef task_def;
KernelTaskInfo kernel_task_info;
kernel_task_info.davinci_model_ = &model;
kernel_task_info.op_desc_ = model.op_list_[0];

domi::KernelDef *kernel_def = task_def.mutable_kernel();
domi::KernelContext *context = kernel_def->mutable_context();
@@ -911,6 +927,7 @@ TEST_F(UtestKernelTaskInfo, kernel_task_info_set_flowtable_failed2) {
domi::TaskDef task_def;
KernelTaskInfo kernel_task_info;
kernel_task_info.davinci_model_ = &model;
kernel_task_info.op_desc_ = model.op_list_[0];

domi::KernelDef *kernel_def = task_def.mutable_kernel();
domi::KernelContext *context = kernel_def->mutable_context();
@@ -935,6 +952,7 @@ TEST_F(UtestKernelTaskInfo, kernel_task_info_set_flowtable_failed3) {
domi::TaskDef task_def;
KernelTaskInfo kernel_task_info;
kernel_task_info.davinci_model_ = &model;
kernel_task_info.op_desc_ = model.op_list_[0];

domi::KernelDef *kernel_def = task_def.mutable_kernel();
domi::KernelContext *context = kernel_def->mutable_context();
@@ -988,6 +1006,7 @@ TEST_F(UtestKernelTaskInfo, success_distribute_dump_task) {
domi::TaskDef task_def;
KernelTaskInfo kernel_task_info;
kernel_task_info.davinci_model_ = &model;
kernel_task_info.op_desc_ = CreateOpDesc("FrameworkOp", "FrameworkOp");

domi::KernelDef *kernel_def = task_def.mutable_kernel();

@@ -1033,6 +1052,7 @@ TEST_F(UtestKernelTaskInfo, success_store_input_output_tensor) {
domi::TaskDef task_def;
KernelTaskInfo kernel_task_info;
kernel_task_info.davinci_model_ = &model;
kernel_task_info.op_desc_ = CreateOpDesc("FrameworkOp", "FrameworkOp");

std::vector<void *> input_data_addrs;
std::vector<void *> output_data_addrs;
@@ -1062,6 +1082,7 @@ TEST_F(UtestKernelTaskInfo, fail_release) {
domi::TaskDef task_def;
KernelTaskInfo kernel_task_info;
kernel_task_info.davinci_model_ = &model;
kernel_task_info.op_desc_ = CreateOpDesc("FrameworkOp", "FrameworkOp");

std::vector<void *> input_data_addrs;
std::vector<void *> output_data_addrs;
@@ -1091,6 +1112,7 @@ TEST_F(UtestKernelTaskInfo, update_l2data_success) {
DavinciModel model(0, nullptr);
KernelTaskInfo kernel_task_info;
kernel_task_info.davinci_model_ = &model;
kernel_task_info.op_desc_ = CreateOpDesc("FrameworkOp", "FrameworkOp");
domi::KernelDef kernel_def;

EXPECT_EQ(kernel_task_info.UpdateL2Data(kernel_def), SUCCESS);
@@ -1168,6 +1190,7 @@ TEST_F(UtestKernelTaskInfo, kernel_task_info_update_args_te) {
KernelTaskInfo kernel_task_info;
kernel_task_info.kernel_type_ = ccKernelType::TE;
kernel_task_info.davinci_model_ = &model;
kernel_task_info.op_desc_ = CreateOpDesc("FrameworkOp", "FrameworkOp");
EXPECT_EQ(kernel_task_info.UpdateArgs(), SUCCESS);
}

@@ -1177,6 +1200,7 @@ TEST_F(UtestKernelTaskInfo, kernel_task_info_update_args_aicpu) {
KernelTaskInfo kernel_task_info;
kernel_task_info.kernel_type_ = ccKernelType::TE;
kernel_task_info.davinci_model_ = &model;
kernel_task_info.op_desc_ = CreateOpDesc("FrameworkOp", "FrameworkOp");
kernel_task_info.args_size_ = 120;
kernel_task_info.args_addr = std::unique_ptr<uint8_t[]>(new (std::nothrow) uint8_t[kernel_task_info.args_size_]);
kernel_task_info.io_addrs_ = { (void*)0x12345678, (void*)0x22345678 };
@@ -1191,6 +1215,7 @@ TEST_F(UtestKernelTaskInfo, kernel_task_info_super_kernel_info) {

KernelTaskInfo kernel_task_info;
kernel_task_info.davinci_model_ = &model;
kernel_task_info.op_desc_ = CreateOpDesc("FrameworkOp", "FrameworkOp");

EXPECT_EQ(kernel_task_info.SaveSuperKernelInfo(), SUCCESS);



+ 65
- 0
tests/ut/ge/graph/passes/atomic_addr_clean_pass_unittest.cc View File

@@ -0,0 +1,65 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <gtest/gtest.h>
#include "graph/passes/atomic_addr_clean_pass.h"
#include "common/op/ge_op_utils.h"
#include "common/types.h"
#include "graph/anchor.h"
#include "graph/attr_value.h"
#include "graph/compute_graph.h"
#include "graph/op_desc.h"
#include "graph/utils/attr_utils.h"
#include "graph/utils/graph_utils.h"
#include "graph/utils/op_desc_utils.h"
#include "graph/utils/tensor_utils.h"
#include "inc/pass_manager.h"
using namespace testing;

namespace ge {
class UtestGraphPassesAtomicAddrCleanPass : public Test {
public:
UtestGraphPassesAtomicAddrCleanPass() {
graph_ = std::make_shared<ComputeGraph>("test");
}

NodePtr NewNode(const string &name, const string &type, int input_cnt, int output_cnt) {
OpDescPtr op_desc = std::make_shared<OpDesc>(name, type);
for (int i = 0; i < input_cnt; ++i) {
op_desc->AddInputDesc(GeTensorDesc());
}
for (int i = 0; i < output_cnt; ++i) {
op_desc->AddOutputDesc(GeTensorDesc());
}
NodePtr node = graph_->AddNode(op_desc);
return node;
}

ComputeGraphPtr graph_;
};

// node1 -> node2 -> node3
TEST_F(UtestGraphPassesAtomicAddrCleanPass, pass_run_success) {
auto node1 = NewNode("node1", DATA, 0, 1);
auto node2 = NewNode("node2", RELU, 1, 1);
auto node3 = NewNode("node3", NETOUTPUT, 1, 0);
GraphUtils::AddEdge(node1->GetOutDataAnchor(0), node2->GetInDataAnchor(0));
GraphUtils::AddEdge(node2->GetOutDataAnchor(0), node3->GetInDataAnchor(0));
AtomicAddrCleanPass atomi_addr_clean_pass;
Status ret = atomi_addr_clean_pass.Run(graph_);
EXPECT_EQ(ret, SUCCESS);
}
} // namespace ge

+ 163
- 0
tests/ut/ge/graph/passes/switch_dead_branch_elimination_unittest.cc View File

@@ -0,0 +1,163 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cstdint>
#include <string>
#include <gtest/gtest.h>
#include "common/ge_inner_error_codes.h"
#include "graph/passes/switch_dead_branch_elimination.h"
#include "graph_builder_utils.h"
namespace ge {
class UtestSwitchDeadBranchElimination : public testing::Test {
protected:
void SetUp() {}
void TearDown() {}
};
namespace {
/*
* data1 const1
* \ /
* case1
* |
* relu1
* |
* netoutput
*/
ut::GraphBuilder ParentGraphBuilder() {
ut::GraphBuilder builder = ut::GraphBuilder("g1");
auto data1 = builder.AddNode("data1", "Data", 0, 1);
auto const1 = builder.AddNode("const1", "Const", 0, 1);
auto case1 = builder.AddNode("case1", CASE, 2, 1);
auto relu1 = builder.AddNode("relu1", "Relu", 1, 1);
auto netoutput = builder.AddNode("netoutput", NETOUTPUT, 1, 0);
int32_t weight[1] = {1};
GeTensorDesc weight_desc(GeShape({1}), FORMAT_NHWC, DT_INT32);
GeTensorPtr tensor = std::make_shared<GeTensor>(weight_desc, (uint8_t *)weight, sizeof(weight));
OpDescUtils::SetWeights(const1, {tensor});
builder.AddDataEdge(data1, 0, case1, 0);
builder.AddDataEdge(const1, 0, case1, 1);
builder.AddDataEdge(case1, 0, relu1, 0);
builder.AddDataEdge(relu1, 0, netoutput, 0);
return builder;
}
/*
* data1 data2
* \ /
* switch
* / \
* relu1 relu2
* \ /
* merge
* |
* netoutput
*/
ut::GraphBuilder SwitchSubgraphBuilder(string graph_name, uint32_t num) {
ut::GraphBuilder builder = ut::GraphBuilder(graph_name);
string data1_name = "data1_" + std::to_string(num);
auto data1 = builder.AddNode(data1_name, "Data", 0, 1);
auto data1_desc = data1->GetOpDesc();
EXPECT_NE(data1_desc, nullptr);
AttrUtils::SetInt(data1_desc, "_parent_node_index", 0);
string data2_name = "data2_" + std::to_string(num);
auto data2 = builder.AddNode(data2_name, "Data", 0, 1);
auto data2_desc = data2->GetOpDesc();
EXPECT_NE(data2_desc, nullptr);
AttrUtils::SetInt(data2_desc, "_parent_node_index", 1);
string switch_name = "switch_" + std::to_string(num);
auto switch1 = builder.AddNode(switch_name, "Switch", 2, 2);
string relu1_name = "relu1_" + std::to_string(num);
auto relu1 = builder.AddNode(relu1_name, "Relu", 1, 1);
string relu2_name = "relu2_" + std::to_string(num);
auto relu2 = builder.AddNode(relu2_name, "Relu", 1, 1);
string merge_name = "merge_" + std::to_string(num);
auto merge = builder.AddNode(merge_name, "Merge", 2, 1);
string output_name = "output_" + std::to_string(num);
auto netoutput = builder.AddNode(output_name, NETOUTPUT, 1, 0);
builder.AddDataEdge(data1, 0, switch1, 0);
builder.AddDataEdge(data2, 0, switch1, 1);
builder.AddDataEdge(switch1, 0, relu1, 0);
builder.AddDataEdge(switch1, 1, relu2, 0);
builder.AddDataEdge(relu1, 0, merge, 0);
builder.AddDataEdge(relu2, 0, merge, 1);
builder.AddDataEdge(merge, 0, netoutput, 0);
return builder;
}
void AddCaseSubgraph(ComputeGraphPtr &parent_graph, uint32_t branch_num) {
auto case_node = parent_graph->FindNode("case1");
EXPECT_NE(case_node, nullptr);
for (uint32_t i = 0; i < branch_num; ++i) {
string name = "Branch_Graph_" + std::to_string(i);
auto builder_subgraph = SwitchSubgraphBuilder(name, i);
auto switch_subgraph = builder_subgraph.GetGraph();
case_node->GetOpDesc()->AddSubgraphName(switch_subgraph->GetName());
case_node->GetOpDesc()->SetSubgraphInstanceName(i, switch_subgraph->GetName());
switch_subgraph->SetParentNode(case_node);
switch_subgraph->SetParentGraph(parent_graph);
EXPECT_EQ(parent_graph->AddSubgraph(switch_subgraph->GetName(), switch_subgraph), GRAPH_SUCCESS);
}
}
} // namespace
TEST_F(UtestSwitchDeadBranchElimination, switch_dead_branch_elimination_across_case_success) {
auto builder = ParentGraphBuilder();
auto parent_graph = builder.GetGraph();
AddCaseSubgraph(parent_graph, 2);
auto subgraphs = parent_graph->GetAllSubgraphs();
EXPECT_EQ(subgraphs.size(), 2);
SwitchDeadBranchElimination switch_pass;
for (auto &subgraph : subgraphs) {
auto switch_node = subgraph->FindFirstNodeMatchType("Switch");
if (switch_node != nullptr) {
EXPECT_EQ(switch_pass.Run(switch_node), SUCCESS);
}
}
auto all_nodes = parent_graph->GetAllNodes();
EXPECT_EQ(all_nodes.size(), 17);
for (auto &subgraph : subgraphs) {
EXPECT_EQ(subgraph->GetDirectNode().size(), 6);
EXPECT_EQ(subgraph->FindFirstNodeMatchType("Switch"), nullptr);
auto merge_node = subgraph->FindFirstNodeMatchType("Merge");
EXPECT_NE(merge_node, nullptr);
auto merge_innode = merge_node->GetInDataNodes();
EXPECT_EQ(merge_innode.size(), 1);
}
}
} // namespace ge

+ 30
- 0
tests/ut/ge/graph/preprocess/graph_preprocess_unittest.cc View File

@@ -50,6 +50,28 @@ ComputeGraphPtr BuildGraph1(){
return builder.GetGraph();
}

ComputeGraphPtr BuildGraph2() {
auto builder = ut::GraphBuilder("g2");
auto data1 = builder.AddNode("data1", DATA, 1, 1, FORMAT_NCHW, DT_FLOAT, std::vector<int64_t>({22, -1}));
ge::AttrUtils::SetStr(data1->GetOpDesc(), ATTR_ATC_USER_DEFINE_DATATYPE, "DT_INT8");
auto data_opdesc = data1->GetOpDesc();
AttrUtils::SetInt(data_opdesc, ATTR_NAME_INDEX, 0);

data1->UpdateOpDesc(data_opdesc);
return builder.GetGraph();
}

ComputeGraphPtr BuildGraph3() {
auto builder = ut::GraphBuilder("g3");
auto data1 = builder.AddNode("data1", DATA, 1, 1, FORMAT_NCHW, DT_FLOAT);
ge::AttrUtils::SetStr(data1->GetOpDesc(), ATTR_ATC_USER_DEFINE_DATATYPE, "DT_INT8");
auto data_opdesc = data1->GetOpDesc();
AttrUtils::SetInt(data_opdesc, ATTR_NAME_INDEX, 0);

data1->UpdateOpDesc(data_opdesc);
return builder.GetGraph();
}

TEST_F(UtestGraphPreproces, test_dynamic_input_shape_parse) {
ge::GraphPrepare graph_prepare;
graph_prepare.compute_graph_ = BuildGraph1();
@@ -88,4 +110,12 @@ TEST_F(UtestGraphPreproces, test_check_user_input) {
Status ret = graph_prepare.CheckUserInput(user_input);
EXPECT_EQ(ret, GE_GRAPH_INIT_FAILED);
}

TEST_F(UtestGraphPreproces, test_update_input_output1) {
ge::GraphPrepare graph_prepare;
graph_prepare.compute_graph_ = BuildGraph3();

Status ret = graph_prepare.UpdateInputOutputByOptions();
EXPECT_EQ(ret, SUCCESS);
}
}

+ 74
- 2
tests/ut/ge/hybrid/ge_hybrid_unittest.cc View File

@@ -19,10 +19,12 @@
#include <vector>
#include "runtime/rt.h"

#include "graph/utils/node_utils.h"
#define protected public
#define private public
#include "hybrid/model/hybrid_model_builder.h"
#include "hybrid/model/hybrid_model.h"
#include "hybrid/node_executor/node_executor.h"
#include "model/ge_model.h"
#include "model/ge_root_model.h"
#include "hybrid/node_executor/aicore/aicore_op_task.h"
@@ -51,7 +53,9 @@ class UtestGeHybrid : public testing::Test {
protected:
void SetUp() {}

void TearDown() {}
void TearDown() {
NpuMemoryAllocator::allocators_.clear();
}
};

static ge::OpDescPtr CreateOpDesc(string name = "", string type = "") {
@@ -245,7 +249,7 @@ TEST_F(UtestGeHybrid, init_weight_success) {
ASSERT_EQ(ret,PARAM_INVALID);
}

TEST_F(UtestGeHybrid, hybrid_model_executor) {
TEST_F(UtestGeHybrid, hybrid_model_executor) {
ComputeGraphPtr compute_graph = MakeShared<ComputeGraph>("abc");
GeRootModelPtr root_model = MakeShared<ge::GeRootModel>(compute_graph);
HybridModel model(root_model);
@@ -256,3 +260,71 @@ TEST_F(UtestGeHybrid, init_weight_success) {
HybridModelExecutor executor(model_ptr, device_id, stream);
executor.Init();
}

TEST_F(UtestGeHybrid, test_parse_parallel_group) {
NodeExecutorManager::GetInstance().engine_mapping_.emplace("ops_kernel_info_hccl",
NodeExecutorManager::ExecutorType::HCCL);
ComputeGraphPtr compute_graph = MakeShared<ComputeGraph>("test");
OpDescPtr op_desc = CreateOpDesc("AllReduce", "AllReduce");
op_desc->SetId(0);
ge::AttrUtils::SetStr(op_desc, ATTR_NAME_PARALLEL_GROUP, "group_1");
auto node = compute_graph->AddNode(op_desc);
std::unique_ptr<NodeItem> node_item;
NodeItem::Create(node, node_item);
node_item->node_id = 0;

op_desc->SetOpKernelLibName("ops_kernel_info_hccl");
GeRootModelPtr root_model = MakeShared<ge::GeRootModel>(compute_graph);
HybridModel model(root_model);

HybridModelBuilder builder(model);
builder.root_graph_ = compute_graph;
ASSERT_EQ(builder.CollectParallelGroups(node_item.get()), SUCCESS);

ASSERT_EQ(builder.node_to_parallel_groups_.size(), 1);
ASSERT_EQ(builder.parallel_group_to_nodes_.size(), 1);

OpDescPtr op_desc_1 = CreateOpDesc("subgraph", "PartitionedCall");
op_desc_1->AddSubgraphName("subgraph");
auto node_1 = compute_graph->AddNode(op_desc_1);

ComputeGraphPtr subgraph = MakeShared<ComputeGraph>("subgraph");
ASSERT_EQ(NodeUtils::SetSubgraph(*node_1, 0, subgraph), GRAPH_SUCCESS);

std::unique_ptr<NodeItem> node_item_1;
NodeItem::Create(node_1, node_item_1);
node_item_1->node_id = 1;

ASSERT_EQ(builder.CollectParallelGroups(node_item_1.get()), SUCCESS);
ASSERT_EQ(builder.node_to_parallel_groups_.size(), 1);
ASSERT_EQ(builder.parallel_group_to_nodes_.size(), 1);

OpDescPtr op_desc_2 = CreateOpDesc("sub_node_1", "AllReduce");
ge::AttrUtils::SetStr(op_desc_2, ATTR_NAME_PARALLEL_GROUP, "group_1");
auto node_2 = subgraph->AddNode(op_desc_2);
ASSERT_TRUE(node_2 != nullptr);

OpDescPtr op_desc_3 = CreateOpDesc("sub_node_2", "AllReduce2");
ge::AttrUtils::SetStr(op_desc_3, ATTR_NAME_PARALLEL_GROUP, "group_2");
auto node_3 = subgraph->AddNode(op_desc_3);
ASSERT_TRUE(node_3 != nullptr);

ASSERT_EQ(builder.CollectParallelGroups(node_item_1.get()), SUCCESS);
ASSERT_EQ(builder.node_to_parallel_groups_.size(), 2);
ASSERT_EQ(builder.parallel_group_to_nodes_.size(), 2);
ASSERT_EQ(builder.parallel_group_to_nodes_["group_1"].size(), 2);
ASSERT_EQ(builder.parallel_group_to_nodes_["group_2"].size(), 1);

ASSERT_FALSE(node_item->has_observer);
ASSERT_TRUE(node_item_1->dependents_for_execution.empty());
ASSERT_EQ(builder.ParseDependentByParallelGroup(), SUCCESS);
ASSERT_TRUE(node_item->has_observer);
ASSERT_EQ(node_item_1->dependents_for_execution.size(), 1);
ASSERT_EQ(node_item_1->dependents_for_execution[0], node);

// repeat parse
ASSERT_EQ(builder.ParseDependentByParallelGroup(), SUCCESS);
ASSERT_TRUE(node_item->has_observer);
ASSERT_EQ(node_item_1->dependents_for_execution.size(), 1);
ASSERT_EQ(node_item_1->dependents_for_execution[0], node);
}

+ 62
- 0
tests/ut/ge/hybrid/known_node_executor_unittest.cc View File

@@ -0,0 +1,62 @@
/**
* Copyright 2019-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <gtest/gtest.h>
#include <gmock/gmock.h>
#include <vector>
#include <memory>

#define protected public
#define private public
#include "hybrid/node_executor/compiledsubgraph/known_node_executor.h"
#undef private
#undef protected
#include "graph/manager/graph_mem_allocator.h"

using namespace std;
using namespace testing;
using namespace ge;
using namespace hybrid;

class UnknownNodeExecutorTest : public testing::Test {
protected:
void SetUp() {}
void TearDown() {}
};

namespace {
class KnownNodeTaskMock : public KnownNodeTask {
public:
KnownNodeTaskMock(std::shared_ptr<DavinciModel> davinci_model): KnownNodeTask(davinci_model) {};
~KnownNodeTaskMock() override = default;
MOCK_METHOD0(DoInitDavinciModel, Status());
};
}

TEST_F(UnknownNodeExecutorTest, test_init_davinci_model) {
auto davinci_model = std::make_shared<DavinciModel>(0, nullptr);
davinci_model->SetDeviceId(0);
davinci_model->SetKnownNode(true);

auto ge_model = make_shared<GeModel>();
AttrUtils::SetInt(ge_model, ATTR_MODEL_VAR_SIZE, 0);
AttrUtils::SetInt(ge_model, ATTR_MODEL_MEMORY_SIZE, 1024);
davinci_model->Assign(ge_model);

KnownNodeTaskMock mock(davinci_model);
EXPECT_CALL(mock, DoInitDavinciModel).WillOnce(::testing::Return(SUCCESS));
ASSERT_EQ(mock.InitDavinciModel(), SUCCESS);
}

+ 21
- 9
tests/ut/ge/profiling/ge_profiling_manager_unittest.cc View File

@@ -37,6 +37,10 @@ class UtestGeProfilinganager : public testing::Test {
void TearDown() override {}
};

int32_t ReporterCallback(uint32_t moduleId, uint32_t type, void *data, uint32_t len) {
return -1;
}

TEST_F(UtestGeProfilinganager, init_success) {
setenv("PROFILING_MODE", "true", true);
Options options;
@@ -53,16 +57,24 @@ TEST_F(UtestGeProfilinganager, init_success) {
}

TEST_F(UtestGeProfilinganager, ParseOptions) {
setenv("PROFILING_MODE", "true", true);
Options options;
options.device_id = 0;
options.job_id = "0";
options.profiling_mode = "1";
options.profiling_options = R"({"result_path":"/data/profiling","training_trace":"on","task_trace":"on","aicpu_trace":"on","fp_point":"Data_0","bp_point":"addn","ai_core_metrics":"ResourceConflictRatio"})";
setenv("PROFILING_MODE", "true", true);
Options options;
options.device_id = 0;
options.job_id = "0";
options.profiling_mode = "1";
options.profiling_options = R"({"result_path":"/data/profiling","training_trace":"on","task_trace":"on","aicpu_trace":"on","fp_point":"Data_0","bp_point":"addn","ai_core_metrics":"ResourceConflictRatio"})";


struct MsprofGeOptions prof_conf = {{ 0 }};

Status ret = ProfilingManager::Instance().ParseOptions(options.profiling_options);
EXPECT_EQ(ret, ge::SUCCESS);
}

struct MsprofGeOptions prof_conf = {{ 0 }};
TEST_F(UtestGeProfilinganager, plungin_init_) {
ProfilingManager::Instance().prof_cb_.msprofReporterCallback = ReporterCallback;

Status ret = ProfilingManager::Instance().ParseOptions(options.profiling_options);
EXPECT_EQ(ret, ge::SUCCESS);
Status ret = ProfilingManager::Instance().PluginInit();
EXPECT_EQ(ret, INTERNAL_ERROR);
ProfilingManager::Instance().prof_cb_.msprofReporterCallback = nullptr;
}

+ 22
- 0
third_party/fwkacllib/inc/runtime/stream.h View File

@@ -189,6 +189,28 @@ RTS_API rtError_t rtStreamActive(rtStream_t activeStream, rtStream_t stream);
*/
RTS_API rtError_t rtStreamSwitchN(void *ptr, uint32_t size, void *valuePtr, rtStream_t *trueStreamPtr,
uint32_t elementSize, rtStream_t stream, rtSwitchDataType_t dataType);

/*
* @ingroup dvrt_stream
* @brief enable debug for dump overflow exception with stream
* @param [in] addr: ddr address of kernel exception dumpped
* @param [in] stream: stream handle
* @param [in] flag: debug flag
* @return RT_ERROR_NONE for ok
* @return RT_ERROR_INVALID_VALUE for error input
*/
RTS_API rtError_t rtDebugRegisterForStream(rtStream_t stream, uint32_t flag, const void *addr,
uint32_t *streamId, uint32_t *taskId);

/*
* @ingroup rt_model
* @brief disable debug for dump overflow exception with stream
* @param [in] stream: stream handle
* @return RT_ERROR_NONE for ok
* @return RT_ERROR_INVALID_VALUE for error input
*/
RTS_API rtError_t rtDebugUnRegisterForStream(rtStream_t stream);

#if defined(__cplusplus) && !defined(COMPILE_OMG_PACKAGE)
}
#endif


+ 1
- 0
third_party/fwkacllib/inc/toolchain/prof_callback.h View File

@@ -74,6 +74,7 @@ enum MsprofReporterCallbackType {
MSPROF_REPORTER_REPORT = 0, // report data
MSPROF_REPORTER_INIT, // init reporter
MSPROF_REPORTER_UNINIT, // uninit reporter
MSPROF_REPORTER_DATA_MAX_LEN, // data max length for calling report callback
};

/**


Loading…
Cancel
Save