Browse Source

for error code

tags/v1.1.0
weiyang 3 years ago
parent
commit
28f911f5ca
20 changed files with 422 additions and 319 deletions
  1. +13
    -13
      ge/common/helper/model_helper.cc
  2. +8
    -8
      ge/common/helper/om_file_helper.cc
  3. +12
    -12
      ge/common/model_parser/base.cc
  4. +139
    -137
      ge/executor/ge_executor.cc
  5. +1
    -1
      ge/graph/execute/graph_execute.cc
  6. +2
    -2
      ge/graph/load/graph_loader.cc
  7. +16
    -15
      ge/graph/load/new_model_manager/davinci_model.cc
  8. +25
    -22
      ge/graph/load/new_model_manager/model_manager.cc
  9. +22
    -22
      ge/single_op/single_op.cc
  10. +8
    -8
      ge/single_op/single_op_manager.cc
  11. +29
    -27
      ge/single_op/single_op_model.cc
  12. +2
    -2
      ge/single_op/stream_resource.cc
  13. +6
    -6
      ge/single_op/task/aicpu_kernel_task_builder.cc
  14. +16
    -16
      ge/single_op/task/aicpu_task_builder.cc
  15. +2
    -2
      ge/single_op/task/op_task.cc
  16. +24
    -24
      ge/single_op/task/tbe_task_builder.cc
  17. +37
    -0
      inc/external/ge/ge_api_error_codes.h
  18. +58
    -0
      inc/external/ge/ge_error_codes.h
  19. +1
    -1
      metadef
  20. +1
    -1
      parser

+ 13
- 13
ge/common/helper/model_helper.cc View File

@@ -268,18 +268,18 @@ ModelHelper::SaveOriginalGraphToOmModel(const ge::Graph &graph, const std::strin

FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ModelHelper::LoadModel(const ge::ModelData &model_data) {
if (model_data.model_data == nullptr || model_data.model_len == 0) {
GELOGE(GE_EXEC_MODEL_DATA_SIZE_INVALID, "Model_data is nullptr, or model_data_size is 0");
return GE_EXEC_MODEL_DATA_SIZE_INVALID;
GELOGE(ACL_ERROR_GE_EXEC_MODEL_DATA_SIZE_INVALID, "Model_data is nullptr, or model_data_size is 0");
return ACL_ERROR_GE_EXEC_MODEL_DATA_SIZE_INVALID;
}

if (is_assign_model_) {
GELOGE(GE_EXEC_LOAD_MODEL_REPEATED, "Model helper has already loaded!");
return GE_EXEC_LOAD_MODEL_REPEATED;
GELOGE(ACL_ERROR_GE_EXEC_LOAD_MODEL_REPEATED, "Model helper has already loaded!");
return ACL_ERROR_GE_EXEC_LOAD_MODEL_REPEATED;
}

if (ReleaseLocalModelData() != SUCCESS) {
GELOGE(INTERNAL_ERROR, "ReleaseLocalModelData failed.");
return INTERNAL_ERROR;
GELOGE(ACL_ERROR_GE_EXEC_RELEASE_MODEL_DATA, "ReleaseLocalModelData failed.");
return ACL_ERROR_GE_EXEC_RELEASE_MODEL_DATA;
}

Status status = ge::DavinciModelParser::ParseModelContent(model_data, model_addr_tmp_, model_len_tmp_);
@@ -300,8 +300,8 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ModelHelper::LoadModel(c
auto partition_table = reinterpret_cast<ModelPartitionTable *>(model_addr_tmp_);
if (partition_table->num == kOriginalOmPartitionNum) {
model_addr_tmp_ = nullptr;
GELOGE(GE_EXEC_MODEL_PARTITION_NUM_INVALID, "om model is error,please use executable om model");
return GE_EXEC_MODEL_PARTITION_NUM_INVALID;
GELOGE(ACL_ERROR_GE_EXEC_MODEL_PARTITION_NUM_INVALID, "om model is error,please use executable om model");
return ACL_ERROR_GE_EXEC_MODEL_PARTITION_NUM_INVALID;
}
// Encrypt model need to del temp model/no encrypt model don't need to del model
model_addr_tmp_ = nullptr;
@@ -321,23 +321,23 @@ Status ModelHelper::GenerateGeModel(OmFileLoadHelper &om_load_helper) {
GE_CHECK_NOTNULL(model_);
Status ret = LoadModelData(om_load_helper);
if (ret != SUCCESS) {
return GE_EXEC_LOAD_MODEL_PARTITION_FAILED;
return ACL_ERROR_GE_EXEC_LOAD_MODEL_PARTITION_FAILED;
}
ret = LoadWeights(om_load_helper);
if (ret != SUCCESS) {
return GE_EXEC_LOAD_WEIGHT_PARTITION_FAILED;
return ACL_ERROR_GE_EXEC_LOAD_WEIGHT_PARTITION_FAILED;
}
ret = LoadTask(om_load_helper);
if (ret != SUCCESS) {
return GE_EXEC_LOAD_TASK_PARTITION_FAILED;
return ACL_ERROR_GE_EXEC_LOAD_TASK_PARTITION_FAILED;
}
ret = LoadTBEKernelStore(om_load_helper);
if (ret != SUCCESS) {
return GE_EXEC_LOAD_KERNEL_PARTITION_FAILED;
return ACL_ERROR_GE_EXEC_LOAD_KERNEL_PARTITION_FAILED;
}
ret = LoadCustAICPUKernelStore(om_load_helper);
if (ret != SUCCESS) {
return GE_EXEC_LOAD_KERNEL_PARTITION_FAILED;
return ACL_ERROR_GE_EXEC_LOAD_KERNEL_PARTITION_FAILED;
}
return SUCCESS;
}


+ 8
- 8
ge/common/helper/om_file_helper.cc View File

@@ -110,8 +110,8 @@ Status OmFileLoadHelper::CheckModelValid(const ge::ModelData &model) const {

Status OmFileLoadHelper::LoadModelPartitionTable(uint8_t *model_data, const uint32_t model_data_size) {
if (model_data == nullptr) {
GELOGE(PARAM_INVALID, "Param model_data must not be null!");
return PARAM_INVALID;
GELOGE(ACL_ERROR_GE_EXEC_MODEL_ADDR_INVALID, "Param model_data must not be null!");
return ACL_ERROR_GE_EXEC_MODEL_ADDR_INVALID;
}
// Init partition table
auto partition_table = reinterpret_cast<ModelPartitionTable *>(model_data);
@@ -119,16 +119,16 @@ Status OmFileLoadHelper::LoadModelPartitionTable(uint8_t *model_data, const uint
// Original model partition include graph-info
if ((partition_table->num != PARTITION_SIZE) && (partition_table->num != (PARTITION_SIZE - 1)) &&
(partition_table->num != (PARTITION_SIZE - kOptionalNum)) && (partition_table->num != 1)) {
GELOGE(GE_EXEC_MODEL_PARTITION_NUM_INVALID, "Invalid partition_table->num:%u", partition_table->num);
return GE_EXEC_MODEL_PARTITION_NUM_INVALID;
GELOGE(ACL_ERROR_GE_EXEC_MODEL_PARTITION_NUM_INVALID, "Invalid partition_table->num:%u", partition_table->num);
return ACL_ERROR_GE_EXEC_MODEL_PARTITION_NUM_INVALID;
}
size_t mem_offset = SIZE_OF_MODEL_PARTITION_TABLE(*partition_table);
GELOGI("ModelPartitionTable num :%u, ModelFileHeader length :%zu, ModelPartitionTable length :%zu",
partition_table->num, sizeof(ModelFileHeader), mem_offset);
if (model_data_size <= mem_offset) {
GELOGE(GE_EXEC_MODEL_DATA_SIZE_INVALID, "invalid model data, partition_table->num:%u, model data size %u",
GELOGE(ACL_ERROR_GE_EXEC_MODEL_DATA_SIZE_INVALID, "invalid model data, partition_table->num:%u, model data size %u",
partition_table->num, model_data_size);
return GE_EXEC_MODEL_DATA_SIZE_INVALID;
return ACL_ERROR_GE_EXEC_MODEL_DATA_SIZE_INVALID;
}
for (uint32_t i = 0; i < partition_table->num; i++) {
ModelPartition partition;
@@ -138,9 +138,9 @@ Status OmFileLoadHelper::LoadModelPartitionTable(uint8_t *model_data, const uint
context_.partition_datas_.push_back(partition);

if (partition.size > model_data_size || mem_offset > model_data_size - partition.size) {
GELOGE(GE_EXEC_MODEL_DATA_SIZE_INVALID, "The partition size %zu is greater than the model data size %u.",
GELOGE(ACL_ERROR_GE_EXEC_MODEL_DATA_SIZE_INVALID, "The partition size %zu is greater than the model data size %u.",
partition.size + mem_offset, model_data_size);
return GE_EXEC_MODEL_DATA_SIZE_INVALID;
return ACL_ERROR_GE_EXEC_MODEL_DATA_SIZE_INVALID;
}
mem_offset += partition.size;
GELOGI("Partition, type:%d, size:%u", static_cast<int>(partition.type), partition.size);


+ 12
- 12
ge/common/model_parser/base.cc View File

@@ -36,18 +36,18 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ModelParserBase::LoadFro
std::string real_path = RealPath(model_path);
if (real_path.empty()) {
GELOGE(GE_EXEC_MODEL_PATH_INVALID, "Model file path '%s' is invalid", model_path);
return GE_EXEC_MODEL_PATH_INVALID;
return ACL_ERROR_GE_EXEC_MODEL_PATH_INVALID;
}

if (GetFileLength(model_path) == -1) {
GELOGE(GE_EXEC_READ_MODEL_FILE_FAILED, "File size not valid, file: %s.", model_path);
return GE_EXEC_READ_MODEL_FILE_FAILED;
GELOGE(ACL_ERROR_GE_EXEC_MODEL_PATH_INVALID, "File size not valid, file: %s.", model_path);
return ACL_ERROR_GE_EXEC_MODEL_PATH_INVALID;
}

std::ifstream fs(real_path.c_str(), std::ifstream::binary);
if (!fs.is_open()) {
GELOGE(GE_EXEC_READ_MODEL_FILE_FAILED, "Open file: %s failed, error: %s", model_path, strerror(errno));
return GE_EXEC_READ_MODEL_FILE_FAILED;
GELOGE(ACL_ERROR_GE_EXEC_MODEL_PATH_INVALID, "Open file: %s failed, error: %s", model_path, strerror(errno));
return ACL_ERROR_GE_EXEC_MODEL_PATH_INVALID;
}

// get length of file:
@@ -60,8 +60,8 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ModelParserBase::LoadFro

char *data = new (std::nothrow) char[len];
if (data == nullptr) {
GELOGE(MEMALLOC_FAILED, "Load model From file failed, bad memory allocation occur. (need:%u)", len);
return MEMALLOC_FAILED;
GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, "Load model From file failed, bad memory allocation occur. (need:%u)", len);
return ACL_ERROR_GE_MEMORY_ALLOCATION;
}

// read data as a block:
@@ -84,7 +84,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ModelParserBase::ParseMo
GE_CHECK_NOTNULL(model.model_data);

// Model length too small
GE_CHK_BOOL_RET_STATUS(model.model_len >= sizeof(ModelFileHeader), GE_EXEC_MODEL_DATA_SIZE_INVALID,
GE_CHK_BOOL_RET_STATUS(model.model_len >= sizeof(ModelFileHeader), ACL_ERROR_GE_EXEC_MODEL_DATA_SIZE_INVALID,
"Invalid model. Model data size %u must be greater than or equal to %zu.", model.model_len,
sizeof(ModelFileHeader));
// Get file header
@@ -92,7 +92,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ModelParserBase::ParseMo
// Determine whether the file length and magic number match
GE_CHK_BOOL_RET_STATUS(
file_header->length == model.model_len - sizeof(ModelFileHeader) && file_header->magic == MODEL_FILE_MAGIC_NUM,
GE_EXEC_MODEL_DATA_SIZE_INVALID,
ACL_ERROR_GE_EXEC_MODEL_DATA_SIZE_INVALID,
"Invalid model. file_header->length[%u] + sizeof(ModelFileHeader)[%zu] != model->model_len[%u] || "
"MODEL_FILE_MAGIC_NUM[%u] != file_header->magic[%u]",
file_header->length, sizeof(ModelFileHeader), model.model_len, MODEL_FILE_MAGIC_NUM, file_header->magic);
@@ -102,15 +102,15 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ModelParserBase::ParseMo
// Get data address
uint8_t *data = reinterpret_cast<uint8_t *>(model.model_data) + sizeof(ModelFileHeader);
if (file_header->is_encrypt == ModelEncryptType::UNENCRYPTED) { // Unencrypted model
GE_CHK_BOOL_RET_STATUS(model.key.empty(), GE_EXEC_MODEL_NOT_SUPPORT_ENCRYPTION,
GE_CHK_BOOL_RET_STATUS(model.key.empty(), ACL_ERROR_GE_EXEC_MODEL_NOT_SUPPORT_ENCRYPTION,
"Invalid param. model is unencrypted, but key is not empty.");

model_data = data;
model_len = file_header->length;
GELOGI("Model_len is %u, model_file_head_len is %zu.", model_len, sizeof(ModelFileHeader));
} else {
GELOGE(GE_EXEC_MODEL_NOT_SUPPORT_ENCRYPTION, "Invalid model. ModelEncryptType not supported.");
res = GE_EXEC_MODEL_NOT_SUPPORT_ENCRYPTION;
GELOGE(ACL_ERROR_GE_EXEC_MODEL_NOT_SUPPORT_ENCRYPTION, "Invalid model. ModelEncryptType not supported.");
res = ACL_ERROR_GE_EXEC_MODEL_NOT_SUPPORT_ENCRYPTION;
}

return res;


+ 139
- 137
ge/executor/ge_executor.cc View File

@@ -50,19 +50,6 @@ const size_t kDynamicImageSizeVecSize = 2;
const size_t kDynamicImageSizeInputSize = 2;
const char *const kBatchLabel = "Batch_";

ge::Status TransferDomiErrorCode(const uint32_t errorCode) {
switch (errorCode) {
case ge::PARAM_INVALID:
case domi::PARAM_INVALID:
return ge::PARAM_INVALID;
case ge::INTERNAL_ERROR:
case domi::INTERNAL_ERROR:
return ge::INTERNAL_ERROR;
default:
return ge::FAILED;
}
}

void GetGeTensorDescFromDomiInfo(std::vector<ge::TensorDesc> &ge_descs,
const std::vector<ge::InputOutputDescInfo> &domi_descs,
const std::vector<uint32_t> &formats) {
@@ -286,14 +273,14 @@ Status GeExecutor::Finalize() {
Status GeExecutor::SetDynamicBatchSize(uint32_t model_id, void *dynamic_input_addr, uint64_t length,
uint64_t batch_size) {
if (dynamic_input_addr == nullptr) {
GELOGE(PARAM_INVALID, "Dynamic input addr is nullptr!");
return PARAM_INVALID;
GELOGE(ACL_ERROR_GE_DYNAMIC_INPUT_ADDR_INVALID, "Dynamic input addr is nullptr!");
return ACL_ERROR_GE_DYNAMIC_INPUT_ADDR_INVALID;
}

uint64_t size = sizeof(uint32_t);
if (length < size) {
GELOGE(PARAM_INVALID, "Dynamic input size [%lu] is less than [%lu]!", length, size);
return PARAM_INVALID;
GELOGE(ACL_ERROR_GE_DYNAMIC_INPUT_LENGTH_INVALID, "Dynamic input size [%lu] is less than [%lu]!", length, size);
return ACL_ERROR_GE_DYNAMIC_INPUT_LENGTH_INVALID;
}
if (length >= sizeof(uint64_t)) {
size = sizeof(uint64_t);
@@ -310,8 +297,8 @@ Status GeExecutor::SetDynamicBatchSize(uint32_t model_id, void *dynamic_input_ad
}

if (!IsDynamicBatchSizeMatchModel(batch_size, batch_info)) {
GELOGE(PARAM_INVALID, "The current dynamic input does not match the gear of the model.");
return PARAM_INVALID;
GELOGE(ACL_ERROR_GE_DYNAMIC_BATCH_SIZE_INVALID, "The current dynamic input does not match the gear of the model.");
return ACL_ERROR_GE_DYNAMIC_BATCH_SIZE_INVALID;
}

ret = GraphExecutor::SetDynamicSize(model_id, batch_num, static_cast<int32_t>(DYNAMIC_BATCH));
@@ -322,7 +309,7 @@ Status GeExecutor::SetDynamicBatchSize(uint32_t model_id, void *dynamic_input_ad
// memcpy dynamic_batch_size from host to device
rtError_t rt_ret = rtMemcpy(dynamic_input_addr, length, &batch_size, size, RT_MEMCPY_HOST_TO_DEVICE);
if (rt_ret != RT_ERROR_NONE) {
GELOGE(RT_FAILED, "memcpy dynamic batch input data failed! ret: 0x%X", rt_ret);
GELOGE(rt_ret, "memcpy dynamic batch input data failed! ret: 0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
return SUCCESS;
@@ -331,14 +318,15 @@ Status GeExecutor::SetDynamicBatchSize(uint32_t model_id, void *dynamic_input_ad
Status GeExecutor::SetDynamicImageSize(uint32_t model_id, void *dynamic_input_addr, uint64_t length,
uint64_t image_height, uint64_t image_width) {
if (dynamic_input_addr == nullptr) {
GELOGE(PARAM_INVALID, "Dynamic input addr is nullptr!");
return PARAM_INVALID;
GELOGE(ACL_ERROR_GE_DYNAMIC_INPUT_ADDR_INVALID, "Dynamic input addr is nullptr!");
return ACL_ERROR_GE_DYNAMIC_INPUT_ADDR_INVALID;
}

uint64_t dynamic_input_size = kDynamicImageSizeInputSize * sizeof(uint32_t);
if (length < dynamic_input_size) {
GELOGE(PARAM_INVALID, "Dynamic input size [%lu] is less than [%lu]!", length, dynamic_input_size);
return PARAM_INVALID;
GELOGE(ACL_ERROR_GE_DYNAMIC_INPUT_LENGTH_INVALID,
"Dynamic input size [%lu] is less than [%lu]!", length, dynamic_input_size);
return ACL_ERROR_GE_DYNAMIC_INPUT_LENGTH_INVALID;
}
uint64_t size = sizeof(uint32_t);
if (length >= kDynamicImageSizeInputSize * sizeof(uint64_t)) {
@@ -355,8 +343,8 @@ Status GeExecutor::SetDynamicImageSize(uint32_t model_id, void *dynamic_input_ad
}

if (!IsDynamicImageSizeMatchModel(image_height, image_width, batch_info)) {
GELOGE(PARAM_INVALID, "The current dynamic input does not match the gear of the model.");
return PARAM_INVALID;
GELOGE(ACL_ERROR_GE_DYNAMIC_BATCH_SIZE_INVALID, "The current dynamic input does not match the gear of the model.");
return ACL_ERROR_GE_DYNAMIC_BATCH_SIZE_INVALID;
}

ret = GraphExecutor::SetDynamicSize(model_id, batch_num, static_cast<int32_t>(DYNAMIC_IMAGE));
@@ -365,20 +353,21 @@ Status GeExecutor::SetDynamicImageSize(uint32_t model_id, void *dynamic_input_ad
return ret;
}

// Memcpy dynamic resolution height from host to device
// Memcpy dynamic resolution height from host to device
rtError_t rt_ret =
rtMemcpy(dynamic_input_addr, size, &image_height, size, RT_MEMCPY_HOST_TO_DEVICE);
if (rt_ret != RT_ERROR_NONE) {
GELOGE(RT_FAILED, "memcpy dynamic resolution input data failed! ret: 0x%X", rt_ret);
GELOGE(rt_ret, "memcpy dynamic resolution input data failed! ret: 0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}

uint64_t remain_size = length - size;
// Memcpy dynamic resolution width from host to device
if (rtMemcpy(reinterpret_cast<void *>(reinterpret_cast<uint8_t *>(dynamic_input_addr) + size),
remain_size, &image_width, size, RT_MEMCPY_HOST_TO_DEVICE) != RT_ERROR_NONE) {
GELOGE(FAILED, "memcpy dynamic resolution input data failed!");
return FAILED;
rt_ret = rtMemcpy(reinterpret_cast<void *>(reinterpret_cast<uint8_t *>(dynamic_input_addr) + size),
remain_size, &image_width, size, RT_MEMCPY_HOST_TO_DEVICE);
if (rt_ret != RT_ERROR_NONE) {
GELOGE(rt_ret, "memcpy dynamic resolution input data failed!");
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
return SUCCESS;
}
@@ -386,15 +375,15 @@ Status GeExecutor::SetDynamicImageSize(uint32_t model_id, void *dynamic_input_ad
Status GeExecutor::SetDynamicDims(uint32_t model_id, void *dynamic_input_addr, uint64_t length,
const vector<uint64_t> &dynamic_dims) {
if (dynamic_input_addr == nullptr) {
GELOGE(FAILED, "Dynamic input addr is nullptr!");
return FAILED;
GELOGE(ACL_ERROR_GE_DYNAMIC_INPUT_ADDR_INVALID, "Dynamic input addr is nullptr!");
return ACL_ERROR_GE_DYNAMIC_INPUT_ADDR_INVALID;
}

vector<uint64_t> cur_dynamic_dims;
Status ret = GetCurDynamicDims(model_id, dynamic_dims, cur_dynamic_dims);
if (ret != SUCCESS) {
GELOGE(FAILED, "Set cur gear dynamic dims failed");
return FAILED;
GELOGE(ret, "Set cur gear dynamic dims failed");
return ret;
}
std::vector<std::vector<int64_t>> batch_info;
int32_t dynamic_type = static_cast<int32_t>(FIXED);
@@ -405,32 +394,35 @@ Status GeExecutor::SetDynamicDims(uint32_t model_id, void *dynamic_input_addr, u
}

if (!IsDynmaicDimsSizeMatchModel(cur_dynamic_dims, batch_info)) {
GELOGE(PARAM_INVALID, "The current dynamic input does not match the gear of the model.");
return PARAM_INVALID;
GELOGE(ACL_ERROR_GE_DYNAMIC_BATCH_SIZE_INVALID, "The current dynamic input does not match the gear of the model.");
return ACL_ERROR_GE_DYNAMIC_BATCH_SIZE_INVALID;
}

ret = GraphExecutor::SetDynamicSize(model_id, cur_dynamic_dims, static_cast<int32_t>(DYNAMIC_DIMS));
if (ret != SUCCESS) {
GELOGE(FAILED, "Set dynamic size failed");
return FAILED;
GELOGE(ret, "Set dynamic size failed");
return ret;
}

size_t dynamic_dim_num = cur_dynamic_dims.size();
uint64_t dynamic_input_size = static_cast<uint64_t>(dynamic_dim_num * sizeof(uint32_t));
if (length < dynamic_input_size) {
GELOGE(FAILED, "Dynamic input size [%lu] is less than [%lu]!", length, dynamic_input_size);
return FAILED;
GELOGE(ACL_ERROR_GE_DYNAMIC_INPUT_LENGTH_INVALID,
"Dynamic input size [%lu] is less than [%lu]!", length, dynamic_input_size);
return ACL_ERROR_GE_DYNAMIC_INPUT_LENGTH_INVALID;
}
uint64_t size = sizeof(uint32_t);
if (length >= dynamic_dim_num * sizeof(uint64_t)) {
size = sizeof(uint64_t);
}
rtError_t rt_ret;
for (uint32_t i = 0; i < dynamic_dim_num; ++i) {
// Memcpy dynamic dim[i] from host to device
if (rtMemcpy(reinterpret_cast<void *>(reinterpret_cast<uint8_t *>(dynamic_input_addr) + size * i),
length - size * i, &cur_dynamic_dims[i], size, RT_MEMCPY_HOST_TO_DEVICE) != RT_ERROR_NONE) {
GELOGE(FAILED, "memcpy dynamic resolution input data failed!");
return FAILED;
rt_ret = rtMemcpy(reinterpret_cast<void *>(reinterpret_cast<uint8_t *>(dynamic_input_addr) + size * i),
length - size * i, &cur_dynamic_dims[i], size, RT_MEMCPY_HOST_TO_DEVICE);
if (rt_ret != RT_ERROR_NONE) {
GELOGE(rt_ret, "memcpy dynamic resolution input data failed!");
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
}
return SUCCESS;
@@ -443,15 +435,15 @@ Status GeExecutor::GetCurDynamicDims(uint32_t model_id, const vector<uint64_t> &
vector<ge::TensorDesc> output_desc;
auto ret = GetModelDescInfo(model_id, input_desc, output_desc);
if (ret != ge::SUCCESS) {
GELOGE(FAILED, "GetModelDescInfo failed.");
return FAILED;
GELOGE(ret, "GetModelDescInfo failed.");
return ret;
}
vector<string> user_designate_shape_order;
vector<int64_t> all_data_dims;
ret = GetUserDesignateShapeOrder(model_id, user_designate_shape_order);
if (ret != ge::SUCCESS) {
GELOGE(FAILED, "GetUserDesignateShapeOrder failed.");
return FAILED;
GELOGE(ret, "GetUserDesignateShapeOrder failed.");
return ret;
}
for (auto &data_name : user_designate_shape_order) {
for (auto &desc : input_desc) {
@@ -464,17 +456,18 @@ Status GeExecutor::GetCurDynamicDims(uint32_t model_id, const vector<uint64_t> &
}
}
if (dynamic_dims.size() != all_data_dims.size()){
GELOGE(FAILED, "Dynamic input size [%lu] is not equal with all data dims size [%lu]!",
GELOGE(ACL_ERROR_GE_DYNAMIC_INPUT_LENGTH_INVALID,
"Dynamic input size [%lu] is not equal with all data dims size [%lu]!",
dynamic_dims.size(), all_data_dims.size());
return FAILED;
return ACL_ERROR_GE_DYNAMIC_INPUT_LENGTH_INVALID;
}
for (std::size_t i = 0; i < all_data_dims.size(); ++i) {
if (all_data_dims[i] < 0) {
cur_dynamic_dims.push_back(dynamic_dims[i]);
} else if (static_cast<uint64_t>(all_data_dims[i]) != dynamic_dims[i]) {
GELOGE(PARAM_INVALID, "Static dims should be same, index: %zu value: %d should be %d",
GELOGE(ACL_ERROR_GE_DYNAMIC_INPUT_LENGTH_INVALID, "Static dims should be same, index: %zu value: %d should be %d",
i, dynamic_dims[i], all_data_dims[i]);
return PARAM_INVALID;
return ACL_ERROR_GE_DYNAMIC_INPUT_LENGTH_INVALID;
}
}
return SUCCESS;
@@ -483,8 +476,8 @@ Status GeExecutor::GetCurDynamicDims(uint32_t model_id, const vector<uint64_t> &
Status GeExecutor::GetCurShape(const uint32_t model_id, std::vector<int64_t> &batch_info, int32_t &dynamic_type) {
GELOGI("Begin to get current shape");
if (!isInit_) {
GELOGE(GE_EXEC_NOT_INIT, "GeExecutor has not been initialized!");
return GE_EXEC_NOT_INIT;
GELOGE(ACL_ERROR_GE_EXEC_NOT_INIT, "GeExecutor has not been initialized!");
return ACL_ERROR_GE_EXEC_NOT_INIT;
}
Status ret = GraphExecutor::GetCurShape(model_id, batch_info, dynamic_type);
if (ret != SUCCESS) {
@@ -499,12 +492,12 @@ Status GeExecutor::SetDynamicAippData(uint32_t model_id, void *dynamic_input_add
const kAippDynamicPara &aippParms) {
GELOGI("Enter to SetDynamicAippData.");
if (dynamic_input_addr == nullptr) {
GELOGE(PARAM_INVALID, "Dynamic aipp input addr is nullptr!");
return PARAM_INVALID;
GELOGE(ACL_ERROR_GE_DYNAMIC_INPUT_ADDR_INVALID, "Dynamic aipp input addr is nullptr!");
return ACL_ERROR_GE_DYNAMIC_INPUT_ADDR_INVALID;
}
if (aippBatchPara.empty()) {
GELOGE(PARAM_INVALID, "aippBatchPara is empty.");
return PARAM_INVALID;
GELOGE(ACL_ERROR_GE_AIPP_BATCH_EMPTY, "aippBatchPara is empty.");
return ACL_ERROR_GE_AIPP_BATCH_EMPTY;
}
uint64_t batch_num = aippBatchPara.size();
uint64_t real_aippParms_size = sizeof(kAippDynamicPara) - sizeof(kAippDynamicBatchPara);
@@ -514,13 +507,14 @@ Status GeExecutor::SetDynamicAippData(uint32_t model_id, void *dynamic_input_add
"batch num is %lu, struct_len is %lu",
model_id, length, batch_num, struct_len);
if (struct_len > length) {
GELOGE(PARAM_INVALID, "input dynamic aipp param len [%lu] is larger than aipp_data size [%lu]", struct_len, length);
return PARAM_INVALID;
GELOGE(ACL_ERROR_GE_DYNAMIC_INPUT_LENGTH_INVALID,
"input dynamic aipp param len [%lu] is larger than aipp_data size [%lu]", struct_len, length);
return ACL_ERROR_GE_DYNAMIC_INPUT_LENGTH_INVALID;
}
// Memcpy real kAippDynamicBatchPara from host to device
rtError_t rt_ret = rtMemcpy(dynamic_input_addr, length, &aippParms, real_aippParms_size, RT_MEMCPY_HOST_TO_DEVICE);
if (rt_ret != RT_ERROR_NONE) {
GELOGE(RT_FAILED, "memcpy real_aippParms_size failed! ret: 0x%X", rt_ret);
GELOGE(rt_ret, "memcpy real_aippParms_size failed! ret: 0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
uint64_t remain_len = length - real_aippParms_size;
@@ -531,7 +525,7 @@ Status GeExecutor::SetDynamicAippData(uint32_t model_id, void *dynamic_input_add
(remain_len - i * sizeof(kAippDynamicBatchPara)), &(aippBatchPara[i]),
sizeof(kAippDynamicBatchPara), RT_MEMCPY_HOST_TO_DEVICE);
if (rt_ret != RT_ERROR_NONE) {
GELOGE(RT_FAILED, "memcpy kAippDynamicBatchPara input data failed! ret: 0x%X", rt_ret);
GELOGE(rt_ret, "memcpy kAippDynamicBatchPara input data failed! ret: 0x%X", rt_ret);
return RT_ERROR_TO_GE_STATUS(rt_ret);
}
}
@@ -543,27 +537,28 @@ Status GeExecutor::LoadModelOffline(uint32_t &model_id, const std::string &path,
int32_t priority, std::shared_ptr<ge::ModelListener> listener) {
GELOGI("load model offline begin.");
if (!isInit_) {
GELOGE(GE_EXEC_NOT_INIT, "GeExecutor has not been initialized!");
return GE_EXEC_NOT_INIT;
GELOGE(ACL_ERROR_GE_EXEC_NOT_INIT, "GeExecutor has not been initialized!");
return ACL_ERROR_GE_EXEC_NOT_INIT;
}

string filePath = RealPath(path.c_str());
if (filePath.empty()) {
GELOGE(ge::FAILED, "File path is invalid. please check your text file '%s'.", path.c_str());
return ge::FAILED;
GELOGE(ACL_ERROR_GE_EXEC_MODEL_PATH_INVALID,
"File path is invalid. please check your text file '%s'.", path.c_str());
return ACL_ERROR_GE_EXEC_MODEL_PATH_INVALID;
}

std::shared_ptr<ModelListenerAdapter> listener_adapter = MakeShared<ModelListenerAdapter>();
if (listener_adapter == nullptr) {
GELOGE(MEMALLOC_FAILED, "ModelListenerAdapter make shared failed!");
return ge::FAILED;
GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, "ModelListenerAdapter make shared failed!");
return ACL_ERROR_GE_MEMORY_ALLOCATION;
}
listener_adapter->listener = listener;

Status ret = GraphLoader::LoadModelFromFile(path, key, priority, listener_adapter, model_id);
if (ret != SUCCESS) {
GELOGE(ret, "[GeExecutor] LoadModelFromFile failed");
return TransferDomiErrorCode(ret);
return ACL_ERROR_GE_LOAD_MODEL;
}
return SUCCESS;
}
@@ -572,21 +567,21 @@ Status GeExecutor::LoadModel(uint32_t &model_id, const ModelData &model_data,
std::shared_ptr<ge::ModelListener> listener) {
GELOGI("Load model begin.");
if (!isInit_) {
GELOGE(GE_EXEC_NOT_INIT, "GeExecutor has not been initialized!");
return GE_EXEC_NOT_INIT;
GELOGE(ACL_ERROR_GE_EXEC_NOT_INIT, "GeExecutor has not been initialized!");
return ACL_ERROR_GE_EXEC_NOT_INIT;
}

std::shared_ptr<ModelListenerAdapter> listener_adapter = MakeShared<ModelListenerAdapter>();
if (listener_adapter == nullptr) {
GELOGE(MEMALLOC_FAILED, "ModelListenerAdapter make shared failed!");
return ge::FAILED;
GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, "ModelListenerAdapter make shared failed!");
return ACL_ERROR_GE_MEMORY_ALLOCATION;
}
listener_adapter->listener = listener;

Status ret = GraphLoader::LoadModel(model_data, listener_adapter, model_id);
if (ret != SUCCESS) {
GELOGE(ret, "[GeExecutor] LoadModel failed.");
return TransferDomiErrorCode(ret);
return ACL_ERROR_GE_LOAD_MODEL;
}
return ret;
}
@@ -594,13 +589,13 @@ Status GeExecutor::LoadModel(uint32_t &model_id, const ModelData &model_data,
Status GeExecutor::UnloadModel(uint32_t model_id) {
GELOGI("unload model %u begin.", model_id);
if (!isInit_) {
GELOGE(GE_EXEC_NOT_INIT, "GeExecutor has not been initialized!");
return GE_EXEC_NOT_INIT;
GELOGE(ACL_ERROR_GE_EXEC_NOT_INIT, "GeExecutor has not been initialized!");
return ACL_ERROR_GE_EXEC_NOT_INIT;
}
Status ret = GraphLoader::DestroyAicpuSessionForInfer(model_id);
if (ret != SUCCESS) {
GELOGE(ret, "[GraphLoader] DestroyAicpuSessionForInfer failed. model id: %u", model_id);
return FAILED;
return ACL_ERROR_GE_INTERNAL_ERROR;
}

std::shared_ptr<DavinciModel> davinci_model = ModelManager::GetInstance()->GetModel(model_id);
@@ -608,14 +603,19 @@ Status GeExecutor::UnloadModel(uint32_t model_id) {
uint64_t session_id = davinci_model->GetSessionId();
VarManagerPool::Instance().RemoveVarManager(session_id);
}
return GraphLoader::UnloadModel(model_id);
ret = GraphLoader::UnloadModel(model_id);
if (ret != SUCCESS) {
GELOGE(ret, "[GraphLoader] DestroyAicpuSessionForInfer failed. model id: %u", model_id);
return ACL_ERROR_GE_UNLOAD_MODEL;
}
return SUCCESS;
}

Status GeExecutor::RunModel(const ge::RunModelData &input_data, ge::RunModelData &output_data) {
GELOGI("run model begin.");
if (!isInit_) {
GELOGE(GE_EXEC_NOT_INIT, "GeExecutor has not been initialized!");
return GE_EXEC_NOT_INIT;
GELOGE(ACL_ERROR_GE_EXEC_NOT_INIT, "GeExecutor has not been initialized!");
return ACL_ERROR_GE_EXEC_NOT_INIT;
}

InputData inputs;
@@ -631,8 +631,8 @@ Status GeExecutor::GetModelDescInfo(uint32_t model_id, std::vector<ge::TensorDes
std::vector<ge::TensorDesc> &output_desc, bool new_model_desc) {
GELOGI("get model desc info begin.");
if (!isInit_) {
GELOGE(GE_EXEC_NOT_INIT, "GeExecutor has not been initialized!");
return GE_EXEC_NOT_INIT;
GELOGE(ACL_ERROR_GE_EXEC_NOT_INIT, "GeExecutor has not been initialized!");
return ACL_ERROR_GE_EXEC_NOT_INIT;
}

std::vector<InputOutputDescInfo> input_desc_infos;
@@ -644,19 +644,20 @@ Status GeExecutor::GetModelDescInfo(uint32_t model_id, std::vector<ge::TensorDes
output_formats, new_model_desc);
if (ret != domi::SUCCESS) {
GELOGE(ret, "GetInputOutputDescInfo failed. ret = %u", ret);
return ret;
return ACL_ERROR_GE_GET_TENSOR_INFO;
}

if (input_formats.size() != input_desc_infos.size()) {
GELOGE(ge::PARAM_INVALID, "input_formats size %zu is not equal to input_desc_infos size %zu.", input_formats.size(),
input_desc_infos.size());
return ge::PARAM_INVALID;
GELOGE(ACL_ERROR_GE_PARAM_INVALID,
"input_formats size %zu is not equal to input_desc_infos size %zu.",
input_formats.size(), input_desc_infos.size());
return ACL_ERROR_GE_PARAM_INVALID;
}

if (output_formats.size() != output_desc_infos.size()) {
GELOGE(ge::PARAM_INVALID, "output_formats size %zu is not equal to output_desc_infos size %zu.",
GELOGE(ACL_ERROR_GE_PARAM_INVALID, "output_formats size %zu is not equal to output_desc_infos size %zu.",
output_formats.size(), output_desc_infos.size());
return ge::PARAM_INVALID;
return ACL_ERROR_GE_PARAM_INVALID;
}

// Transfer data to TensorDesc
@@ -679,8 +680,8 @@ Status GeExecutor::GetDynamicBatchInfo(uint32_t model_id, std::vector<std::vecto
int32_t &dynamic_type) {
GELOGI("Begin to get dynamic batch info.");
if (!isInit_) {
GELOGE(GE_EXEC_NOT_INIT, "GeExecutor has not been initialized!");
return GE_EXEC_NOT_INIT;
GELOGE(ACL_ERROR_GE_EXEC_NOT_INIT, "GeExecutor has not been initialized!");
return ACL_ERROR_GE_EXEC_NOT_INIT;
}

Status ret = GraphExecutor::GetDynamicBatchInfo(model_id, batch_info, dynamic_type);
@@ -703,8 +704,8 @@ Status GeExecutor::GetDynamicBatchInfo(uint32_t model_id, std::vector<std::vecto
Status GeExecutor::GetCombinedDynamicDims(uint32_t model_id, vector<vector<int64_t>> &batch_info) {
GELOGI("Begin to get combined dynamic dims info.");
if (!isInit_) {
GELOGE(GE_EXEC_NOT_INIT, "GeExecutor has not been initialized!");
return GE_EXEC_NOT_INIT;
GELOGE(ACL_ERROR_GE_EXEC_NOT_INIT, "GeExecutor has not been initialized!");
return ACL_ERROR_GE_EXEC_NOT_INIT;
}

Status ret = GraphExecutor::GetCombinedDynamicDims(model_id, batch_info);
@@ -727,8 +728,8 @@ Status GeExecutor::GetCombinedDynamicDims(uint32_t model_id, vector<vector<int64
Status GeExecutor::GetUserDesignateShapeOrder(uint32_t model_id, vector<string> &user_designate_shape_order) {
GELOGI("Begin to get user designate shape info.");
if (!isInit_) {
GELOGE(GE_EXEC_NOT_INIT, "GeExecutor has not been initialized!");
return GE_EXEC_NOT_INIT;
GELOGE(ACL_ERROR_GE_EXEC_NOT_INIT, "GeExecutor has not been initialized!");
return ACL_ERROR_GE_EXEC_NOT_INIT;
}

Status ret = GraphExecutor::GetUserDesignateShapeOrder(model_id, user_designate_shape_order);
@@ -752,8 +753,8 @@ Status GeExecutor::GetUserDesignateShapeOrder(uint32_t model_id, vector<string>
Status GeExecutor::GetAIPPInfo(uint32_t model_id, uint32_t index, AippConfigInfo &aipp_info) {
GELOGI("Begin to GetAIPPInfo.");
if (!isInit_) {
GELOGE(GE_EXEC_NOT_INIT, "not inited yet!");
return GE_EXEC_NOT_INIT;
GELOGE(ACL_ERROR_GE_EXEC_NOT_INIT, "not inited yet!");
return ACL_ERROR_GE_EXEC_NOT_INIT;
}
Status ret = GraphExecutor::GetAIPPInfo(model_id, index, aipp_info);
if (ret != SUCCESS) {
@@ -767,8 +768,8 @@ Status GeExecutor::GetAIPPInfo(uint32_t model_id, uint32_t index, AippConfigInfo
Status GeExecutor::GetAippType(uint32_t model_id, uint32_t index, InputAippType &type, size_t &aipp_index) {
GELOGI("Begin to get aipp type.");
if (!isInit_) {
GELOGE(GE_EXEC_NOT_INIT, "not inited yet!");
return GE_EXEC_NOT_INIT;
GELOGE(ACL_ERROR_GE_EXEC_NOT_INIT, "not inited yet!");
return ACL_ERROR_GE_EXEC_NOT_INIT;
}
Status ret = GraphExecutor::GetAippType(model_id, index, type, aipp_index);
if (ret != SUCCESS) {
@@ -782,8 +783,8 @@ Status GeExecutor::GetAippType(uint32_t model_id, uint32_t index, InputAippType
Status GeExecutor::GetModelAttr(uint32_t model_id, std::vector<std::string> &dynamic_output_shape_info) {
GELOGI("Begin to get dynamic batch output shape info");
if (!isInit_) {
GELOGE(GE_EXEC_NOT_INIT, "not inited yet!");
return GE_EXEC_NOT_INIT;
GELOGE(ACL_ERROR_GE_EXEC_NOT_INIT, "not inited yet!");
return ACL_ERROR_GE_EXEC_NOT_INIT;
}
Status ret = GraphExecutor::GetModelAttr(model_id, dynamic_output_shape_info);
if (ret != SUCCESS) {
@@ -799,8 +800,8 @@ Status GeExecutor::GetModelDescInfoForZeroCopy(uint32_t model_id, std::vector<ge
std::vector<TensorDesc> &output_desc) {
GELOGI("get model desc info for zero copy begin.");
if (!isInit_) {
GELOGE(GE_EXEC_NOT_INIT, "GeExecutor has not been initialized!");
return GE_EXEC_NOT_INIT;
GELOGE(ACL_ERROR_GE_EXEC_NOT_INIT, "GeExecutor has not been initialized!");
return ACL_ERROR_GE_EXEC_NOT_INIT;
}

std::vector<InputOutputDescInfo> input_desc_infos;
@@ -812,17 +813,17 @@ Status GeExecutor::GetModelDescInfoForZeroCopy(uint32_t model_id, std::vector<ge
input_formats, output_formats);
if (ret != domi::SUCCESS) {
GELOGE(ret, "Get DescInfo from zero copy failed. ret = %u", ret);
return TransferDomiErrorCode(ret);
return ACL_ERROR_GE_GET_TENSOR_INFO;
}

if (input_formats.size() != input_desc_infos.size()) {
GELOGE(ge::FAILED, "input_formats.size() != input_desc_infos.size().");
return ge::FAILED;
GELOGE(ACL_ERROR_GE_PARAM_INVALID, "input_formats.size() != input_desc_infos.size().");
return ACL_ERROR_GE_PARAM_INVALID;
}

if (output_formats.size() != output_desc_infos.size()) {
GELOGE(ge::FAILED, "output_formats.size() != output_desc_infos.size().");
return ge::FAILED;
GELOGE(ACL_ERROR_GE_PARAM_INVALID, "output_formats.size() != output_desc_infos.size().");
return ACL_ERROR_GE_PARAM_INVALID;
}

GetGeTensorDescFromDomiInfo(input_desc, input_desc_infos, input_formats);
@@ -837,8 +838,8 @@ Status GeExecutor::CommandHandle(const Command &command) {

Status ret = GraphLoader::CommandHandle(command);
if (ret != SUCCESS) {
GELOGE(ret, "CommandHandle: Command Handle failed.");
return TransferDomiErrorCode(ret);
GELOGE(ACL_ERROR_GE_COMMAND_HANDLE, "CommandHandle: Command Handle failed.");
return ACL_ERROR_GE_COMMAND_HANDLE;
}
return SUCCESS;
}
@@ -846,8 +847,8 @@ Status GeExecutor::CommandHandle(const Command &command) {
Status GeExecutor::GetMaxUsedMemory(uint32_t model_id, uint32_t &max_size) {
GELOGI("Get max used memory begin.");
if (!isInit_) {
GELOGE(GE_EXEC_NOT_INIT, "GeExecutor has not been initialized!");
return GE_EXEC_NOT_INIT;
GELOGE(ACL_ERROR_GE_EXEC_NOT_INIT, "GeExecutor has not been initialized!");
return ACL_ERROR_GE_EXEC_NOT_INIT;
}

uint64_t max_mem_size = 0;
@@ -866,14 +867,15 @@ Status GeExecutor::GetMaxUsedMemory(uint32_t model_id, uint32_t &max_size) {
Status GeExecutor::LoadDataFromFile(const std::string &path, ModelData &model_data) {
GELOGI("Load data from file begin.");
if (!isInit_) {
GELOGE(GE_EXEC_NOT_INIT, "GeExecutor has not been initialized!");
return GE_EXEC_NOT_INIT;
GELOGE(ACL_ERROR_GE_EXEC_NOT_INIT, "GeExecutor has not been initialized!");
return ACL_ERROR_GE_EXEC_NOT_INIT;
}

string filePath = RealPath(path.c_str());
if (filePath.empty()) {
GELOGE(GE_EXEC_MODEL_PATH_INVALID, "File path is invalid. please check your text file '%s'.", path.c_str());
return GE_EXEC_MODEL_PATH_INVALID;
GELOGE(ACL_ERROR_GE_EXEC_MODEL_PATH_INVALID,
"File path is invalid. please check your text file '%s'.", path.c_str());
return ACL_ERROR_GE_EXEC_MODEL_PATH_INVALID;
}
GELOGI("load modelData from file: %s.", path.c_str());
std::string key_path;
@@ -903,8 +905,8 @@ Status GeExecutor::LoadModelFromData(uint32_t &model_id, const ModelData &model_
void *weight_ptr, size_t weight_size) {
GELOGI("Load model from data begin.");
if (!isInit_) {
GELOGE(GE_EXEC_NOT_INIT, "not inited yet!");
return GE_EXEC_NOT_INIT;
GELOGE(ACL_ERROR_GE_EXEC_NOT_INIT, "not inited yet!");
return ACL_ERROR_GE_EXEC_NOT_INIT;
}

return GraphLoader::LoadModelFromData(model_id, model_data, dev_ptr, mem_size, weight_ptr, weight_size);
@@ -924,8 +926,8 @@ Status GeExecutor::LoadModelWithQ(uint32_t &model_id, const ModelData &model_dat
const std::vector<uint32_t> &output_queue_ids) {
GELOGI("Load model with queue begin.");
if (!isInit_) {
GELOGE(GE_EXEC_NOT_INIT, "GeExecutor has not been initialized!");
return GE_EXEC_NOT_INIT;
GELOGE(ACL_ERROR_GE_EXEC_NOT_INIT, "GeExecutor has not been initialized!");
return ACL_ERROR_GE_EXEC_NOT_INIT;
}
return GraphLoader::LoadModelWithQ(model_id, model_data, input_queue_ids, output_queue_ids);
}
@@ -944,8 +946,8 @@ Status GeExecutor::ExecModel(uint32_t model_id, void *stream, const ge::RunModel
ge::RunModelData &run_output_data, bool async_mode) {
GELOGI("Execute model begin.");
if (!isInit_) {
GELOGE(GE_EXEC_NOT_INIT, "GeExecutor has not been initialized!");
return GE_EXEC_NOT_INIT;
GELOGE(ACL_ERROR_GE_EXEC_NOT_INIT, "GeExecutor has not been initialized!");
return ACL_ERROR_GE_EXEC_NOT_INIT;
}

InputData input_data;
@@ -981,8 +983,8 @@ Status GeExecutor::ExecModel(uint32_t model_id, void *stream, const ge::RunModel
Status GeExecutor::GetMemAndWeightSize(const std::string &path, size_t &mem_size, size_t &weight_size) {
GELOGI("Get memory and weight size from file begin.");
if (!isInit_) {
GELOGE(GE_EXEC_NOT_INIT, "GeExecutor has not been initialized!");
return GE_EXEC_NOT_INIT;
GELOGE(ACL_ERROR_GE_EXEC_NOT_INIT, "GeExecutor has not been initialized!");
return ACL_ERROR_GE_EXEC_NOT_INIT;
}

ModelData model;
@@ -1014,13 +1016,13 @@ Status GeExecutor::GetMemAndWeightSize(const void *model_data, size_t model_size
size_t &weight_size) {
GELOGI("Get memory and weight size from data begin.");
if (!isInit_) {
GELOGE(GE_EXEC_NOT_INIT, "GeExecutor has not been initialized!");
return GE_EXEC_NOT_INIT;
GELOGE(ACL_ERROR_GE_EXEC_NOT_INIT, "GeExecutor has not been initialized!");
return ACL_ERROR_GE_EXEC_NOT_INIT;
}

if (model_data == nullptr) {
GELOGE(PARAM_INVALID, "invalid model data!");
return PARAM_INVALID;
GELOGE(ACL_ERROR_GE_EXEC_MODEL_ADDR_INVALID, "invalid model data!");
return ACL_ERROR_GE_EXEC_MODEL_ADDR_INVALID;
}

ModelData model;
@@ -1043,8 +1045,8 @@ Status GeExecutor::LoadDynamicSingleOp(const std::string &model_name, const ge::
Status GeExecutor::ExecuteAsync(SingleOp *executor, const std::vector<DataBuffer> &inputs,
std::vector<DataBuffer> &outputs) {
if (executor == nullptr) {
GELOGE(PARAM_INVALID, "param is NULL");
return PARAM_INVALID;
GELOGE(ACL_ERROR_GE_EXEC_NOT_INIT, "param is NULL");
return ACL_ERROR_GE_EXEC_NOT_INIT;
}

return executor->ExecuteAsync(inputs, outputs);
@@ -1067,8 +1069,8 @@ Status GeExecutor::GetDeviceIdByModelId(uint32_t model_id, uint32_t &device_id)
GE_CHECK_NOTNULL(model_manager);
auto davinci_model = model_manager->GetModel(model_id);
if (davinci_model == nullptr) {
GELOGE(FAILED, "Model id: %d is invaild or model is not loaded.", model_id);
return FAILED;
GELOGE(ACL_ERROR_GE_EXEC_MODEL_ID_INVALID, "Model id: %d is invaild or model is not loaded.", model_id);
return ACL_ERROR_GE_EXEC_MODEL_ID_INVALID;
}

device_id = davinci_model->GetDeviceId();
@@ -1094,8 +1096,8 @@ Status GeExecutor::GetBatchInfoSize(uint32_t model_id, size_t &shape_count) {
Status GeExecutor::GetOrigInputInfo(uint32_t model_id, uint32_t index, OriginInputInfo &orig_input_info) {
GELOGI("Begin to GetOrigInputInfo.");
if (!isInit_) {
GELOGE(GE_EXEC_NOT_INIT, "not inited yet!");
return GE_EXEC_NOT_INIT;
GELOGE(ACL_ERROR_GE_EXEC_NOT_INIT, "not inited yet!");
return ACL_ERROR_GE_EXEC_NOT_INIT;
}

Status ret = GraphExecutor::GetOrigInputInfo(model_id, index, orig_input_info);
@@ -1113,8 +1115,8 @@ Status GeExecutor::GetAllAippInputOutputDims(uint32_t model_id, uint32_t index,
std::vector<InputOutputDims> &output_dims) {
GELOGI("Begin to GetAllAippInputOutputDims.");
if (!isInit_) {
GELOGE(GE_EXEC_NOT_INIT, "not inited yet!");
return GE_EXEC_NOT_INIT;
GELOGE(ACL_ERROR_GE_EXEC_NOT_INIT, "not inited yet!");
return ACL_ERROR_GE_EXEC_NOT_INIT;
}

Status ret = GraphExecutor::GetAllAippInputOutputDims(model_id, index, input_dims, output_dims);


+ 1
- 1
ge/graph/execute/graph_execute.cc View File

@@ -91,7 +91,7 @@ Status GraphExecutor::SetDynamicSize(uint32_t model_id, const std::vector<uint64
GE_CHECK_NOTNULL(model_manager);
Status ret = model_manager->SetDynamicSize(model_id, batch_num, dynamic_type);
if (ret != SUCCESS) {
GELOGE(FAILED, "SetDynamicSize failed");
GELOGE(ret, "SetDynamicSize failed");
return ret;
}
return SUCCESS;


+ 2
- 2
ge/graph/load/graph_loader.cc View File

@@ -212,9 +212,9 @@ Status GraphLoader::CommandHandle(const Command &command) {
return ret;
}
} catch (std::bad_alloc &) {
GELOGE(MEMALLOC_FAILED, "Command handle failed, bad memory allocation occur !");
GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, "Command handle failed, bad memory allocation occur !");

return MEMALLOC_FAILED;
return ACL_ERROR_GE_MEMORY_ALLOCATION;
} catch (...) {
GELOGE(FAILED, "Command handle failed, some exceptions occur !");



+ 16
- 15
ge/graph/load/new_model_manager/davinci_model.cc View File

@@ -1502,8 +1502,8 @@ Status DavinciModel::InitVariable(const OpDescPtr &op_desc) {
Status DavinciModel::SetQueIds(const std::vector<uint32_t> &input_queue_ids,
const std::vector<uint32_t> &output_queue_ids) {
if (input_queue_ids.empty() && output_queue_ids.empty()) {
GELOGE(GE_EXEC_MODEL_QUEUE_ID_INVALID, "Param is empty");
return GE_EXEC_MODEL_QUEUE_ID_INVALID;
GELOGE(ACL_ERROR_GE_EXEC_MODEL_QUEUE_ID_INVALID, "Param is empty");
return ACL_ERROR_GE_EXEC_MODEL_QUEUE_ID_INVALID;
}

input_queue_ids_ = input_queue_ids;
@@ -1524,15 +1524,15 @@ Status DavinciModel::LoadWithQueue() {
}

if (input_queue_ids_.size() != new_input_data_info_.size()) {
GELOGE(GE_EXEC_MODEL_QUEUE_ID_INVALID, "Input queue ids not match model: input_queue=%zu input_data=%zu",
GELOGE(ACL_ERROR_GE_EXEC_MODEL_QUEUE_ID_INVALID, "Input queue ids not match model: input_queue=%zu input_data=%zu",
input_queue_ids_.size(), new_input_data_info_.size());
return GE_EXEC_MODEL_QUEUE_ID_INVALID;
return ACL_ERROR_GE_EXEC_MODEL_QUEUE_ID_INVALID;
}

if (output_queue_ids_.size() != new_output_data_info_.size()) {
GELOGE(GE_EXEC_MODEL_QUEUE_ID_INVALID, "Output queue ids not match model: output_queue=%zu output_data=%zu",
GELOGE(ACL_ERROR_GE_EXEC_MODEL_QUEUE_ID_INVALID, "Output queue ids not match model: output_queue=%zu output_data=%zu",
output_queue_ids_.size(), new_output_data_info_.size());
return GE_EXEC_MODEL_QUEUE_ID_INVALID;
return ACL_ERROR_GE_EXEC_MODEL_QUEUE_ID_INVALID;
}

GE_CHK_STATUS_RET(AddHeadStream(), "Add head stream failed.");
@@ -1875,7 +1875,7 @@ Status DavinciModel::GetAIPPInfo(uint32_t index, AippConfigInfo &aipp_info) {
OpDescPtr data_op = data_op_list_[index];
if (!data_op->HasAttr(ATTR_NAME_AIPP)) {
GELOGW("GetAIPPInfo: there is not AIPP related with index %u.", index);
return GE_AIPP_NOT_EXIST;
return ACL_ERROR_GE_AIPP_NOT_EXIST;
}

std::unique_ptr<domi::AippOpParams> aipp_params(new (std::nothrow) domi::AippOpParams());
@@ -1914,8 +1914,9 @@ Status DavinciModel::GetAippType(uint32_t index, InputAippType &type, size_t &ai
} else if (data_mode == "dynamic_aipp_conf") {
type = DYNAMIC_AIPP_NODE;
} else {
GELOGE(INTERNAL_ERROR, "The info of aipp releated info %s is invalid with index %u.", data_mode.c_str(), index);
return INTERNAL_ERROR;
GELOGE(ACL_ERROR_GE_AIPP_MODE_INVALID,
"The info of aipp releated info %s is invalid with index %u.", data_mode.c_str(), index);
return ACL_ERROR_GE_AIPP_MODE_INVALID;
}

if (type == DATA_WITH_DYNAMIC_AIPP) {
@@ -1929,8 +1930,8 @@ Status DavinciModel::GetAippType(uint32_t index, InputAippType &type, size_t &ai
}
}
if (aipp_index == 0xFFFFFFFF) {
GELOGE(INTERNAL_ERROR, "Can not find aipp data node from index %u", index);
return INTERNAL_ERROR;
GELOGE(ACL_ERROR_GE_AIPP_NOT_EXIST, "Can not find aipp data node from index %u", index);
return ACL_ERROR_GE_AIPP_NOT_EXIST;
}
}
return SUCCESS;
@@ -4107,8 +4108,8 @@ Status DavinciModel::GetOrigInputInfo(uint32_t index, OriginInputInfo &orig_inpu
GE_CHK_BOOL_RET_STATUS(index < data_op_list_.size(), PARAM_INVALID, "Index %u is invalid.", index);
OpDescPtr data_op = data_op_list_[index];
if (!data_op->HasAttr(ATTR_NAME_AIPP_INPUTS) || !data_op->HasAttr(ATTR_NAME_AIPP_OUTPUTS)) {
GELOGE(GE_AIPP_NOT_EXIST, "GetOrigInputInfo: there is not AIPP related with index %u.", index);
return GE_AIPP_NOT_EXIST;
GELOGE(ACL_ERROR_GE_AIPP_NOT_EXIST, "GetOrigInputInfo: there is not AIPP related with index %u.", index);
return ACL_ERROR_GE_AIPP_NOT_EXIST;
}

vector<std::string> inputs;
@@ -4151,8 +4152,8 @@ Status DavinciModel::GetAllAippInputOutputDims(uint32_t index, std::vector<Input
GE_CHK_BOOL_RET_STATUS(index < data_op_list_.size(), PARAM_INVALID, "Index %u is invalid.", index);
OpDescPtr data_op = data_op_list_[index];
if (!data_op->HasAttr(ATTR_NAME_AIPP_INPUTS) || !data_op->HasAttr(ATTR_NAME_AIPP_OUTPUTS)) {
GELOGE(GE_AIPP_NOT_EXIST, "GetAllAippInputOutputDims: there is not AIPP related with index %u.", index);
return GE_AIPP_NOT_EXIST;
GELOGE(ACL_ERROR_GE_AIPP_NOT_EXIST, "GetAllAippInputOutputDims: there is not AIPP related with index %u.", index);
return ACL_ERROR_GE_AIPP_NOT_EXIST;
}

vector<std::string> inputs;


+ 25
- 22
ge/graph/load/new_model_manager/model_manager.cc View File

@@ -927,7 +927,7 @@ Status ModelManager::GetInputOutputDescInfo(const uint32_t model_id, vector<Inpu
Status ModelManager::GetDynamicBatchInfo(const uint32_t model_id, std::vector<std::vector<int64_t>> &batch_info,
int32_t &dynamic_type) {
std::shared_ptr<DavinciModel> davinci_model = GetModel(model_id);
GE_CHK_BOOL_RET_STATUS(davinci_model != nullptr, GE_EXEC_MODEL_ID_INVALID,
GE_CHK_BOOL_RET_STATUS(davinci_model != nullptr, ACL_ERROR_GE_EXEC_MODEL_ID_INVALID,
"GetDynamicBatchInfo failed, Invalid model id %u!", model_id);

return davinci_model->GetDynamicBatchInfo(batch_info, dynamic_type);
@@ -942,8 +942,8 @@ Status ModelManager::GetDynamicBatchInfo(const uint32_t model_id, std::vector<st
///
Status ModelManager::GetCombinedDynamicDims(const uint32_t model_id, vector<vector<int64_t>> &batch_info) {
std::shared_ptr<DavinciModel> davinci_model = GetModel(model_id);
GE_CHK_BOOL_RET_STATUS(davinci_model != nullptr, PARAM_INVALID, "GetCombinedDynamicDims Failed, Invalid Model ID %u!",
model_id);
GE_CHK_BOOL_RET_STATUS(davinci_model != nullptr, ACL_ERROR_GE_EXEC_MODEL_ID_INVALID,
"GetCombinedDynamicDims Failed, Invalid Model ID %u!", model_id);

davinci_model->GetCombinedDynamicDims(batch_info);
return SUCCESS;
@@ -959,7 +959,7 @@ Status ModelManager::GetCombinedDynamicDims(const uint32_t model_id, vector<vect
Status ModelManager::GetUserDesignateShapeOrder(const uint32_t model_id,
std::vector<std::string> &user_input_shape_order) {
auto davinci_model = GetModel(model_id);
GE_CHK_BOOL_RET_STATUS(davinci_model != nullptr, PARAM_INVALID,
GE_CHK_BOOL_RET_STATUS(davinci_model != nullptr, ACL_ERROR_GE_EXEC_MODEL_ID_INVALID,
"GetUserDesignateShapeOrder Failed, Invalid Model ID %u!", model_id)
davinci_model->GetUserDesignateShapeOrder(user_input_shape_order);
return SUCCESS;
@@ -1000,7 +1000,8 @@ Status ModelManager::GetInputOutputDescInfoForZeroCopy(const uint32_t model_id,
///
Status ModelManager::GetAIPPInfo(const uint32_t model_id, uint32_t index, AippConfigInfo &aipp_info) {
std::shared_ptr<DavinciModel> davinci_model = GetModel(model_id);
GE_CHK_BOOL_RET_STATUS(davinci_model != nullptr, PARAM_INVALID, "GetAIPPInfo failed, invalid model_id is %u.",
GE_CHK_BOOL_RET_STATUS(davinci_model != nullptr, ACL_ERROR_GE_EXEC_MODEL_ID_INVALID,
"GetAIPPInfo failed, invalid model_id is %u.",
model_id);

return davinci_model->GetAIPPInfo(index, aipp_info);
@@ -1008,7 +1009,8 @@ Status ModelManager::GetAIPPInfo(const uint32_t model_id, uint32_t index, AippCo

Status ModelManager::GetAippType(uint32_t model_id, uint32_t index, InputAippType &type, size_t &aipp_index) {
std::shared_ptr<DavinciModel> davinci_model = GetModel(model_id);
GE_CHK_BOOL_RET_STATUS(davinci_model != nullptr, PARAM_INVALID, "GetAIPPInfo failed, invalid model_id is %u.",
GE_CHK_BOOL_RET_STATUS(davinci_model != nullptr, ACL_ERROR_GE_EXEC_MODEL_ID_INVALID,
"GetAIPPInfo failed, invalid model_id is %u.",
model_id);

return davinci_model->GetAippType(index, type, aipp_index);
@@ -1035,7 +1037,8 @@ Status ModelManager::GenSessionId(uint64_t &session_id) {

Status ModelManager::LoadModelOffline(uint32_t &model_id, const ModelData &model, shared_ptr<ModelListener> listener,
void *dev_ptr, size_t mem_size, void *weight_ptr, size_t weight_size) {
GE_CHK_BOOL_RET_STATUS(model.key.empty() || access(model.key.c_str(), F_OK) == 0, GE_EXEC_MODEL_KEY_PATH_INVALID,
GE_CHK_BOOL_RET_STATUS(model.key.empty() || access(model.key.c_str(), F_OK) == 0,
ACL_ERROR_GE_EXEC_MODEL_KEY_PATH_INVALID,
"input key file path %s is invalid, %s", model.key.c_str(), strerror(errno));
GenModelId(&model_id);

@@ -1054,8 +1057,8 @@ Status ModelManager::LoadModelOffline(uint32_t &model_id, const ModelData &model
try {
davinci_model = std::make_shared<DavinciModel>(model.priority, listener);
} catch (std::bad_alloc &) {
GELOGE(MEMALLOC_FAILED, "Make shared failed");
return MEMALLOC_FAILED;
GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, "Make shared failed");
return ACL_ERROR_GE_MEMORY_ALLOCATION;
} catch (...) {
GELOGE(INTERNAL_ERROR, "Make shared failed since other exception raise");
return INTERNAL_ERROR;
@@ -1094,7 +1097,6 @@ Status ModelManager::LoadModelOffline(uint32_t &model_id, const ModelData &model
GE_CHK_BOOL_TRUE_EXEC_WITH_LOG(ret != SUCCESS, break, "DavinciInit failed.");

InsertModel(model_id, davinci_model);
GE_CHK_BOOL_TRUE_EXEC_WITH_LOG(davinci_model == nullptr, ret = PARAM_INVALID; break, "Insert model failed");

GELOGI("Parse model %u success.", model_id);

@@ -1122,7 +1124,7 @@ Status ModelManager::LoadModelWithQ(uint32_t &model_id, const ModelData &model_d
const std::vector<uint32_t> &input_queue_ids,
const std::vector<uint32_t> &output_queue_ids) {
GE_CHK_BOOL_RET_STATUS(model_data.key.empty() || access(model_data.key.c_str(), F_OK) == 0,
GE_EXEC_MODEL_KEY_PATH_INVALID, "input key file path %s is not valid, %s",
ACL_ERROR_GE_EXEC_MODEL_KEY_PATH_INVALID, "input key file path %s is not valid, %s",
model_data.key.c_str(), strerror(errno));

ModelHelper model_helper;
@@ -1134,8 +1136,8 @@ Status ModelManager::LoadModelWithQ(uint32_t &model_id, const ModelData &model_d

shared_ptr<DavinciModel> davinci_model = MakeShared<DavinciModel>(model_data.priority, nullptr);
if (davinci_model == nullptr) {
GELOGE(MEMALLOC_FAILED, "create model failed.");
return MEMALLOC_FAILED;
GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, "create model failed.");
return ACL_ERROR_GE_MEMORY_ALLOCATION;
}

ret = davinci_model->Assign(model_helper.GetGeModel());
@@ -1390,13 +1392,13 @@ Status ModelManager::GetModelMemAndWeightSize(const ModelData &model, size_t &me

auto partition_table = reinterpret_cast<ModelPartitionTable *>(model_data);
if (partition_table->num == 1) {
GELOGE(GE_EXEC_MODEL_PARTITION_NUM_INVALID, "om model is error,please use executable om model");
return GE_EXEC_MODEL_PARTITION_NUM_INVALID;
GELOGE(ACL_ERROR_GE_EXEC_MODEL_PARTITION_NUM_INVALID, "om model is error,please use executable om model");
return ACL_ERROR_GE_EXEC_MODEL_PARTITION_NUM_INVALID;
}
ModelPartition task_partition;
if (om_file_helper.GetModelPartition(ModelPartitionType::TASK_INFO, task_partition) != SUCCESS) {
GELOGE(GE_EXEC_LOAD_TASK_PARTITION_FAILED, "get task model partition failed.");
return GE_EXEC_LOAD_TASK_PARTITION_FAILED;
GELOGE(ACL_ERROR_GE_EXEC_LOAD_TASK_PARTITION_FAILED, "get task model partition failed.");
return ACL_ERROR_GE_EXEC_LOAD_TASK_PARTITION_FAILED;
}

std::shared_ptr<domi::ModelTaskDef> model_task_def = MakeShared<domi::ModelTaskDef>();
@@ -1405,14 +1407,14 @@ Status ModelManager::GetModelMemAndWeightSize(const ModelData &model, size_t &me
}
if (task_partition.size != 0) {
if (!ReadProtoFromArray(task_partition.data, static_cast<int>(task_partition.size), model_task_def.get())) {
GELOGE(GE_EXEC_LOAD_TASK_PARTITION_FAILED, "ReadProtoFromArray failed.");
return GE_EXEC_LOAD_TASK_PARTITION_FAILED;
GELOGE(ACL_ERROR_GE_EXEC_LOAD_TASK_PARTITION_FAILED, "ReadProtoFromArray failed.");
return ACL_ERROR_GE_EXEC_LOAD_TASK_PARTITION_FAILED;
}
}

ModelPartition partition_weight;
ret = om_file_helper.GetModelPartition(ModelPartitionType::WEIGHTS_DATA, partition_weight);
GE_CHK_BOOL_TRUE_EXEC_WITH_LOG(ret != SUCCESS, return GE_EXEC_LOAD_WEIGHT_PARTITION_FAILED,
GE_CHK_BOOL_TRUE_EXEC_WITH_LOG(ret != SUCCESS, return ACL_ERROR_GE_EXEC_LOAD_WEIGHT_PARTITION_FAILED,
"Get weight partition failed. ret = %u", ret);

mem_size = model_task_def->memory_size();
@@ -1431,7 +1433,8 @@ void ModelManager::GenModelId(uint32_t *id) {

Status ModelManager::GetOrigInputInfo(uint32_t model_id, uint32_t index, OriginInputInfo &orig_input_info) {
std::shared_ptr<DavinciModel> davinci_model = GetModel(model_id);
GE_CHK_BOOL_RET_STATUS(davinci_model != nullptr, PARAM_INVALID, "GetOrigInputInfo failed, invalid model_id is %u.",
GE_CHK_BOOL_RET_STATUS(davinci_model != nullptr, ACL_ERROR_GE_EXEC_MODEL_ID_INVALID,
"GetOrigInputInfo failed, invalid model_id is %u.",
model_id);

return davinci_model->GetOrigInputInfo(index, orig_input_info);
@@ -1441,7 +1444,7 @@ Status ModelManager::GetAllAippInputOutputDims(uint32_t model_id, uint32_t index
std::vector<InputOutputDims> &input_dims,
std::vector<InputOutputDims> &output_dims) {
std::shared_ptr<DavinciModel> davinci_model = GetModel(model_id);
GE_CHK_BOOL_RET_STATUS(davinci_model != nullptr, PARAM_INVALID,
GE_CHK_BOOL_RET_STATUS(davinci_model != nullptr, ACL_ERROR_GE_EXEC_MODEL_ID_INVALID,
"GetAllAippInputOutputDims failed, invalid model_id is %u.", model_id);

return davinci_model->GetAllAippInputOutputDims(index, input_dims, output_dims);


+ 22
- 22
ge/single_op/single_op.cc View File

@@ -51,9 +51,9 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY SingleOp::~SingleOp() {
Status SingleOp::ValidateArgs(const std::vector<DataBuffer> &inputs, const std::vector<DataBuffer> &outputs) {
auto num_inputs = inputs.size();
if (num_inputs != input_sizes_.size()) {
GELOGE(PARAM_INVALID, "Input num mismatch. model expect %zu, but given %zu", input_addr_list_.size(),
GELOGE(ACL_ERROR_GE_PARAM_INVALID, "Input num mismatch. model expect %zu, but given %zu", input_addr_list_.size(),
inputs.size());
return PARAM_INVALID;
return ACL_ERROR_GE_PARAM_INVALID;
}

for (size_t i = 0; i < num_inputs; ++i) {
@@ -62,16 +62,16 @@ Status SingleOp::ValidateArgs(const std::vector<DataBuffer> &inputs, const std::
GELOGI("Input [%zu], aligned_size:%zu, inputs.length:%lu, input_sizes_:%lu",
i, aligned_size, inputs[i].length, input_sizes_[i]);
if (aligned_size < input_sizes_[i]) {
GELOGE(PARAM_INVALID, "Input size mismatch. index = %zu, model expect %zu,"
GELOGE(ACL_ERROR_GE_PARAM_INVALID, "Input size mismatch. index = %zu, model expect %zu,"
" but given %zu(after align)", i, input_sizes_[i], aligned_size);
return PARAM_INVALID;
return ACL_ERROR_GE_PARAM_INVALID;
}
}

auto num_outputs = outputs.size();
if (num_outputs != output_sizes_.size()) {
GELOGE(PARAM_INVALID, "output num mismatch. model expect %zu, but given %zu", output_sizes_.size(), outputs.size());
return PARAM_INVALID;
GELOGE(ACL_ERROR_GE_PARAM_INVALID, "output num mismatch. model expect %zu, but given %zu", output_sizes_.size(), outputs.size());
return ACL_ERROR_GE_PARAM_INVALID;
}

for (size_t i = 0; i < num_outputs; ++i) {
@@ -80,9 +80,9 @@ Status SingleOp::ValidateArgs(const std::vector<DataBuffer> &inputs, const std::
GELOGI("Output [%zu], aligned_size:%zu, outputs.length:%lu, output_sizes_:%lu",
i, aligned_size, outputs[i].length, output_sizes_[i]);
if (aligned_size < output_sizes_[i]) {
GELOGE(PARAM_INVALID, "Output size mismatch. index = %zu, model expect %zu,"
GELOGE(ACL_ERROR_GE_PARAM_INVALID, "Output size mismatch. index = %zu, model expect %zu,"
"but given %zu(after align)", i, output_sizes_[i], aligned_size);
return PARAM_INVALID;
return ACL_ERROR_GE_PARAM_INVALID;
}
}

@@ -134,8 +134,8 @@ Status SingleOp::UpdateArgs(const std::vector<DataBuffer> &inputs, const std::ve
RT_MEMCPY_HOST_TO_DEVICE_EX,
stream_);
if (rt_ret != RT_ERROR_NONE) {
GELOGE(RT_FAILED, "rtMemcpyAsync addresses failed, ret = %d", rt_ret);
return RT_FAILED;
GELOGE(rt_ret, "rtMemcpyAsync addresses failed, ret = %d", rt_ret);
return rt_ret;
}
} else if (task->GetOpTaskType() == OP_TASK_AICPUCC) {
GELOGD("Update aicpu_CC task args");
@@ -198,29 +198,29 @@ Status DynamicSingleOp::ValidateParams(const vector<GeTensorDesc> &input_desc,
std::vector<GeTensorDesc> &output_desc,
std::vector<DataBuffer> &outputs) const {
if (inputs.size() != input_desc.size()) {
GELOGE(PARAM_INVALID,
GELOGE(ACL_ERROR_GE_PARAM_INVALID,
"Input number mismatches input desc number. Input num = %zu, input desc num = %zu",
inputs.size(),
input_desc.size());
return PARAM_INVALID;
return ACL_ERROR_GE_PARAM_INVALID;
}

if (outputs.size() != output_desc.size()) {
GELOGE(PARAM_INVALID,
GELOGE(ACL_ERROR_GE_PARAM_INVALID,
"Output number mismatches output desc number. Output num = %zu, output desc num = %zu",
outputs.size(),
output_desc.size());
return PARAM_INVALID;
return ACL_ERROR_GE_PARAM_INVALID;
}

if (input_desc.size() != num_inputs_) {
GELOGE(PARAM_INVALID, "Input number mismatches. expect %zu, but given %zu", num_inputs_, input_desc.size());
return PARAM_INVALID;
GELOGE(ACL_ERROR_GE_PARAM_INVALID, "Input number mismatches. expect %zu, but given %zu", num_inputs_, input_desc.size());
return ACL_ERROR_GE_PARAM_INVALID;
}

if (output_desc.size() != num_outputs_) {
GELOGE(PARAM_INVALID, "Output number mismatches. expect %zu, but given %zu", num_outputs_, output_desc.size());
return PARAM_INVALID;
GELOGE(ACL_ERROR_GE_PARAM_INVALID, "Output number mismatches. expect %zu, but given %zu", num_outputs_, output_desc.size());
return ACL_ERROR_GE_PARAM_INVALID;
}

return SUCCESS;
@@ -247,8 +247,8 @@ Status DynamicSingleOp::AllocateWorkspaces(const std::vector<int64_t> &workspace
GE_CHECK_NOTNULL(stream_resource);
auto ws_base = stream_resource->MallocMemory(kPurpose, static_cast<size_t>(total_size));
if (ws_base == nullptr) {
GELOGE(MEMALLOC_FAILED, "Failed to allocate memory of size: %ld", total_size);
return MEMALLOC_FAILED;
GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, "Failed to allocate memory of size: %ld", total_size);
return ACL_ERROR_GE_MEMORY_ALLOCATION;
}
GELOGD("Done allocating workspace memory successfully.");

@@ -293,10 +293,10 @@ Status DynamicSingleOp::ExecuteAsync(const vector<GeTensorDesc> &input_desc,
} else if (op_task_->GetOpTaskType() == OP_TASK_AICPU || op_task_->GetOpTaskType() == OP_TASK_AICPUCC) {
return op_task_->LaunchKernel(input_desc, input_buffers, output_desc, output_buffers, stream_);
} else {
GELOGE(UNSUPPORTED,
GELOGE(ACL_ERROR_GE_OP_TASK_TYPE_INVALID,
"Only TBE_Task, AI_CPU_Task and AI_CPUCC_Task are supported, but got %u",
op_task_->GetOpTaskType());
return UNSUPPORTED;
return ACL_ERROR_GE_OP_TASK_TYPE_INVALID;
}
}



+ 8
- 8
ge/single_op/single_op_manager.cc View File

@@ -33,16 +33,16 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status SingleOpManager::GetOpFr
SingleOp **single_op) {
GELOGI("GetOpFromModel in. model name = %s", model_name.c_str());
if (single_op == nullptr) {
GELOGE(PARAM_INVALID, "single op is null");
return PARAM_INVALID;
GELOGE(ACL_ERROR_GE_INTERNAL_ERROR, "single op is null");
return ACL_ERROR_GE_INTERNAL_ERROR;
}

uintptr_t resource_id = 0;
GE_CHK_STATUS_RET(GetResourceId(stream, resource_id));
StreamResource *res = GetResource(resource_id, stream);
if (res == nullptr) {
GELOGE(MEMALLOC_FAILED, "GetResource failed");
return MEMALLOC_FAILED;
GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, "GetResource failed");
return ACL_ERROR_GE_MEMORY_ALLOCATION;
}

SingleOp *op = res->GetOperator(model_data.model_data);
@@ -109,8 +109,8 @@ Status SingleOpManager::GetDynamicOpFromModel(const string &model_name,
GE_CHK_STATUS_RET(GetResourceId(stream, resource_id));
StreamResource *res = GetResource(resource_id, stream);
if (res == nullptr) {
GELOGE(MEMALLOC_FAILED, "GetResource failed");
return MEMALLOC_FAILED;
GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, "GetResource failed");
return ACL_ERROR_GE_MEMORY_ALLOCATION;
}

DynamicSingleOp *op = res->GetDynamicOperator(model_data.model_data);
@@ -140,8 +140,8 @@ Status SingleOpManager::GetResourceId(rtStream_t stream, uintptr_t &resource_id)
rtContext_t rt_cur_ctx = nullptr;
auto rt_err = rtCtxGetCurrent(&rt_cur_ctx);
if (rt_err != RT_ERROR_NONE) {
GELOGE(RT_FAILED, "get current context failed, runtime result is %d", static_cast<int>(rt_err));
return RT_FAILED;
GELOGE(rt_err, "get current context failed, runtime result is %d", static_cast<int>(rt_err));
return rt_err;
}
// use current context as resource key instead
GELOGI("use context as resource key instead when default stream");


+ 29
- 27
ge/single_op/single_op_model.cc View File

@@ -94,7 +94,7 @@ Status SingleOpModel::InitModelMem(StreamResource &res) {
GELOGI("total memory: %lu, zero_copy_mem: %lu", model_params_.memory_size, model_params_.zero_copy_mem_size);
model_params_.mem_base = res.MallocMemory(purpose, model_params_.memory_size - model_params_.zero_copy_mem_size);
if (model_params_.mem_base == nullptr) {
return RT_FAILED;
return ACL_ERROR_GE_MEMORY_ALLOCATION;
}
}

@@ -103,7 +103,7 @@ Status SingleOpModel::InitModelMem(StreamResource &res) {
model_params_.weight_base = res.MallocWeight(purpose, model_params_.weight_size);
if (model_params_.weight_base == nullptr) {
// no need to free memory, for that was handled by StreamResources
return RT_FAILED;
return ACL_ERROR_GE_MEMORY_ALLOCATION;
}

auto weight_buffer = model_helper_.GetGeModel()->GetWeight();
@@ -121,8 +121,9 @@ Status SingleOpModel::InitModelMem(StreamResource &res) {
Status SingleOpModel::ParseInputNode(const OpDescPtr &op_desc) {
vector<int64_t> offsets = op_desc->GetOutputOffset();
if (offsets.size() != kDataOutputNum) {
GELOGE(PARAM_INVALID, "Data op should have only one output, but got %zu", op_desc->GetOutputOffset().size());
return PARAM_INVALID;
GELOGE(ACL_ERROR_GE_PARAM_INVALID,
"Data op should have only one output, but got %zu", op_desc->GetOutputOffset().size());
return ACL_ERROR_GE_PARAM_INVALID;
}

auto output_desc = op_desc->GetOutputDescPtr(0);
@@ -158,8 +159,8 @@ Status SingleOpModel::LoadAllNodes() {
Graph graph = ge_model->GetGraph();
auto compute_graph = GraphUtils::GetComputeGraph(graph);
if (compute_graph == nullptr) {
GELOGE(PARAM_INVALID, "[%s] compute_graph is null", model_name_.c_str());
return PARAM_INVALID;
GELOGE(ACL_ERROR_GE_INTERNAL_ERROR, "[%s] compute_graph is null", model_name_.c_str());
return ACL_ERROR_GE_INTERNAL_ERROR;
}

auto nodes = compute_graph->GetDirectNode();
@@ -257,8 +258,8 @@ Status SingleOpModel::BuildTaskList(SingleOp &single_op) {
}
single_op.tasks_.emplace_back(task);
} else {
GELOGE(UNSUPPORTED, "Only TBE, AI_CPU, CUST_AI_CPU kernel are supported, but got %u", context.kernel_type());
return UNSUPPORTED;
GELOGE(ACL_ERROR_GE_OP_KERNEL_TYPE_INVALID, "Only TBE, AI_CPU, CUST_AI_CPU kernel are supported, but got %u", context.kernel_type());
return ACL_ERROR_GE_OP_KERNEL_TYPE_INVALID;
}
} else if (task_type == RT_MODEL_TASK_KERNEL_EX) {
GELOGD("Building AICPU_TF task");
@@ -282,7 +283,7 @@ Status SingleOpModel::BuildTaskList(SingleOp &single_op) {

void SingleOpModel::ParseArgTable(TbeOpTask *task, SingleOp &op) {
if (task == nullptr) {
GELOGE(PARAM_INVALID, "tbe op task is nullptr");
GELOGE(ACL_ERROR_GE_INTERNAL_ERROR, "tbe op task is nullptr");
return;
}
// args: addr1, addr2, addr3 ...
@@ -305,14 +306,14 @@ Status SingleOpModel::BuildKernelTask(const domi::KernelDef &kernel_def, TbeOpTa
const auto &context = kernel_def.context();
auto iter = op_list_.find(context.op_index());
if (iter == op_list_.end()) {
GELOGE(INTERNAL_ERROR, "op desc not found. op index = %u", context.op_index());
return INTERNAL_ERROR;
GELOGE(ACL_ERROR_GE_INTERNAL_ERROR, "op desc not found. op index = %u", context.op_index());
return ACL_ERROR_GE_INTERNAL_ERROR;
}

auto *tbe_task = new (std::nothrow) TbeOpTask();
if (tbe_task == nullptr) {
GELOGE(MEMALLOC_FAILED, "create tbe op task failed");
return MEMALLOC_FAILED;
GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, "create tbe op task failed");
return ACL_ERROR_GE_MEMORY_ALLOCATION;
}

auto builder = TbeTaskBuilder(model_name_, iter->second, kernel_def);
@@ -331,14 +332,14 @@ Status SingleOpModel::BuildKernelExTask(const domi::KernelExDef &kernel_def, AiC
bool dynamic_flag, bool& depend_compute_flag, uint64_t session_id) {
auto iter = op_list_.find(kernel_def.op_index());
if (iter == op_list_.end()) {
GELOGE(INTERNAL_ERROR, "op desc not found. op index = %u", kernel_def.op_index());
return INTERNAL_ERROR;
GELOGE(ACL_ERROR_GE_INTERNAL_ERROR, "op desc not found. op index = %u", kernel_def.op_index());
return ACL_ERROR_GE_INTERNAL_ERROR;
}

std::unique_ptr<AiCpuTask> aicpu_task(new (std::nothrow) AiCpuTask());
if (aicpu_task == nullptr) {
GELOGE(MEMALLOC_FAILED, "create aicpu_TF op task failed");
return MEMALLOC_FAILED;
GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, "create aicpu_TF op task failed");
return ACL_ERROR_GE_MEMORY_ALLOCATION;
}
auto builder = AiCpuTaskBuilder(iter->second->GetOpDesc(), kernel_def);
auto ret = builder.BuildTask(*aicpu_task, model_params_, dynamic_flag, session_id);
@@ -356,13 +357,13 @@ Status SingleOpModel::BuildCpuKernelTask(const domi::KernelDef &kernel_def, OpTa
const auto &context = kernel_def.context();
auto iter = op_list_.find(context.op_index());
if (iter == op_list_.end()) {
GELOGE(INTERNAL_ERROR, "op desc not found. op index = %u", context.op_index());
return INTERNAL_ERROR;
GELOGE(ACL_ERROR_GE_INTERNAL_ERROR, "op desc not found. op index = %u", context.op_index());
return ACL_ERROR_GE_INTERNAL_ERROR;
}
std::unique_ptr<AiCpuCCTask> aicpucc_task(new (std::nothrow) AiCpuCCTask());
if (aicpucc_task == nullptr) {
GELOGE(MEMALLOC_FAILED, "create aicpu_CC op task failed");
return MEMALLOC_FAILED;
GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, "create aicpu_CC op task failed");
return ACL_ERROR_GE_MEMORY_ALLOCATION;
}

auto builder = AiCpuCCTaskBuilder(iter->second->GetOpDesc(), kernel_def);
@@ -398,8 +399,9 @@ Status SingleOpModel::BuildModelTaskKernel(const TaskDef &task_def, DynamicSingl
GE_CHK_STATUS_RET_NOLOG(BuildCpuKernelTask(task_def.kernel(), &task));
single_op.op_task_.reset(task);
} else {
GELOGE(UNSUPPORTED, "Only TBE, AI_CPU, CUST_AI_CPU kernel are supported, but got %u", context.kernel_type());
return UNSUPPORTED;
GELOGE(ACL_ERROR_GE_OP_KERNEL_TYPE_INVALID,
"Only TBE, AI_CPU, CUST_AI_CPU kernel are supported, but got %u", context.kernel_type());
return ACL_ERROR_GE_OP_KERNEL_TYPE_INVALID;
}
return SUCCESS;
}
@@ -422,8 +424,8 @@ Status SingleOpModel::BuildTaskListForDynamicOp(DynamicSingleOp &single_op) {
GE_CHK_STATUS_RET_NOLOG(BuildModelTaskKernel(task_def, single_op));
} else if (task_type == RT_MODEL_TASK_KERNEL_EX) {
if (single_op.op_task_ != nullptr) {
GELOGE(UNSUPPORTED, "Do not support dynamic op with multiple tasks.");
return UNSUPPORTED;
GELOGE(ACL_ERROR_GE_OP_TASK_TYPE_INVALID, "Do not support dynamic op with multiple tasks.");
return ACL_ERROR_GE_OP_TASK_TYPE_INVALID;
}
GELOGD("Building AICPU_TF task");
AiCpuTask *aicpu_task = nullptr;
@@ -434,8 +436,8 @@ Status SingleOpModel::BuildTaskListForDynamicOp(DynamicSingleOp &single_op) {
depend_compute_flag, dynamic_singleop_sessionid));
if (depend_compute_flag) {
if (i >= tasks.size() - 1) {
GELOGE(FAILED, "The copy task of the fourth operator was not found.");
return FAILED;
GELOGE(ACL_ERROR_GE_PARAM_INVALID, "The copy task of the fourth operator was not found.");
return ACL_ERROR_GE_PARAM_INVALID;
}
++i;
const TaskDef &copy_task_def = tasks[i];


+ 2
- 2
ge/single_op/stream_resource.cc View File

@@ -160,8 +160,8 @@ Status StreamResource::BuildOperator(const string &model_name, const ModelData &

auto new_op = std::unique_ptr<SingleOp>(new(std::nothrow) SingleOp(&stream_mu_, stream_));
if (new_op == nullptr) {
GELOGE(MEMALLOC_FAILED, "new SingleOp failed");
return MEMALLOC_FAILED;
GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, "new SingleOp failed");
return ACL_ERROR_GE_MEMORY_ALLOCATION;
}

GELOGI("To build operator: %s", model_name.c_str());


+ 6
- 6
ge/single_op/task/aicpu_kernel_task_builder.cc View File

@@ -25,20 +25,20 @@ AiCpuCCTaskBuilder::AiCpuCCTaskBuilder(const OpDescPtr &op_desc, const domi::Ker
Status AiCpuCCTaskBuilder::SetKernelArgs(AiCpuCCTask &task) {
size_t aicpu_arg_size = kernel_def_.args_size();
if (aicpu_arg_size <= 0) {
GELOGE(RT_FAILED, "aicpu_arg_size is invalid, value = %zu", aicpu_arg_size);
return RT_FAILED;
GELOGE(ACL_ERROR_GE_PARAM_INVALID, "aicpu_arg_size is invalid, value = %zu", aicpu_arg_size);
return ACL_ERROR_GE_PARAM_INVALID;
}
std::unique_ptr<uint8_t[]> aicpu_args;
aicpu_args.reset(new(std::nothrow) uint8_t[aicpu_arg_size]());
if (aicpu_args == nullptr) {
GELOGE(RT_FAILED, "malloc failed, size = %zu", aicpu_arg_size);
return RT_FAILED;
GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, "malloc failed, size = %zu", aicpu_arg_size);
return ACL_ERROR_GE_MEMORY_ALLOCATION;
}

auto err = memcpy_s(aicpu_args.get(), aicpu_arg_size, kernel_def_.args().data(), aicpu_arg_size);
if (err != EOK) {
GELOGE(RT_FAILED, "memcpy_s args failed, size = %zu, err = %d", aicpu_arg_size, err);
return RT_FAILED;
GELOGE(ACL_ERROR_GE_INTERNAL_ERROR, "memcpy_s args failed, size = %zu, err = %d", aicpu_arg_size, err);
return ACL_ERROR_GE_INTERNAL_ERROR;
}

task.SetIoAddr(aicpu_args.get() + sizeof(aicpu::AicpuParamHead));


+ 16
- 16
ge/single_op/task/aicpu_task_builder.cc View File

@@ -30,8 +30,8 @@ namespace ge {
size_t arg_size = kernel_def_.args_size();
auto rt_ret = rtMalloc(io_addr, arg_size, RT_MEMORY_HBM);
if (rt_ret != RT_ERROR_NONE) {
GELOGE(RT_FAILED, "rtMalloc failed, size = %zu, ret = %d", arg_size, rt_ret);
return RT_FAILED;
GELOGE(rt_ret, "rtMalloc failed, size = %zu, ret = %d", arg_size, rt_ret);
return rt_ret;
}

const void *src_addr = reinterpret_cast<const void *>(addresses.data());
@@ -39,8 +39,8 @@ namespace ge {
rt_ret = rtMemcpy(*io_addr, arg_size, src_addr, src_len, RT_MEMCPY_HOST_TO_DEVICE);
if (rt_ret != RT_ERROR_NONE) {
(void)rtFree(*io_addr);
GELOGE(RT_FAILED, "rtMemcpy addresses failed, ret = %d", rt_ret);
return RT_FAILED;
GELOGE(rt_ret, "rtMemcpy addresses failed, ret = %d", rt_ret);
return rt_ret;
}

return SUCCESS;
@@ -50,8 +50,8 @@ namespace ge {
auto sec_ret = memcpy_s(&fwk_op_kernel, sizeof(STR_FWK_OP_KERNEL),
kernel_def_.args().data(), kernel_def_.args().size());
if (sec_ret != EOK) {
GELOGE(FAILED, "memcpy failed, ret: %d", sec_ret);
return FAILED;
GELOGE(ACL_ERROR_GE_INTERNAL_ERROR, "memcpy failed, ret: %d", sec_ret);
return ACL_ERROR_GE_INTERNAL_ERROR;
}

auto io_addr_val = static_cast<uint64_t>(reinterpret_cast<uintptr_t>(io_addr));
@@ -65,16 +65,16 @@ namespace ge {
void *fwk_op_args = nullptr;
auto rt_ret = rtMalloc(&fwk_op_args, sizeof(STR_FWK_OP_KERNEL), RT_MEMORY_HBM);
if (rt_ret != RT_ERROR_NONE) {
GELOGE(RT_FAILED, "malloc arg memory failed, ret = %d", rt_ret);
return RT_FAILED;
GELOGE(rt_ret, "malloc arg memory failed, ret = %d", rt_ret);
return rt_ret;
}

rt_ret = rtMemcpy(fwk_op_args, sizeof(STR_FWK_OP_KERNEL), &fwk_op_kernel,
sizeof(STR_FWK_OP_KERNEL), RT_MEMCPY_HOST_TO_DEVICE);
if (rt_ret != RT_ERROR_NONE) {
(void)rtFree(fwk_op_args);
GELOGE(RT_FAILED, "copy args failed, ret = %d", rt_ret);
return RT_FAILED;
GELOGE(rt_ret, "copy args failed, ret = %d", rt_ret);
return rt_ret;
}
*args = fwk_op_args;
return SUCCESS;
@@ -83,9 +83,9 @@ namespace ge {
Status AiCpuTaskBuilder::InitWorkspaceAndIO(void **io_addr, void **kernel_workspace,
const SingleOpModelParam &param, bool dynamic_flag) {
if (kernel_def_.args_size() > sizeof(STR_FWK_OP_KERNEL)) {
GELOGE(PARAM_INVALID, "sizeof STR_FWK_OP_KERNEL is: %lu, but args_size is: %d",
GELOGE(ACL_ERROR_GE_PARAM_INVALID, "sizeof STR_FWK_OP_KERNEL is: %lu, but args_size is: %d",
sizeof(STR_FWK_OP_KERNEL), kernel_def_.args_size());
return PARAM_INVALID;
return ACL_ERROR_GE_PARAM_INVALID;
}
auto addresses = BuildTaskUtils::GetAddresses(op_desc_, param);
auto ws_addr_vec = addresses.at(BuildTaskUtils::kAddressIndexWorkspace);
@@ -94,8 +94,8 @@ namespace ge {
GE_CHK_RT_RET(rtMalloc(kernel_workspace, kernel_def_.task_info_size(), RT_MEMORY_HBM));
} else {
if (ws_addr_vec.empty()) {
GELOGE(PARAM_INVALID, "workspace Data Address is empty.");
return PARAM_INVALID;
GELOGE(ACL_ERROR_GE_PARAM_INVALID, "workspace Data Address is empty.");
return ACL_ERROR_GE_PARAM_INVALID;
}
*kernel_workspace = ws_addr_vec[0];
}
@@ -143,8 +143,8 @@ namespace ge {
GELOGI("Begin to CreateAicpuSession, session id: %lu", session_id);
GE_CHECK_NOTNULL(ModelManager::GetInstance());
GE_IF_BOOL_EXEC(ModelManager::GetInstance()->CreateAicpuSession(session_id) != SUCCESS,
GELOGE(FAILED, "CreateAicpuSession error. session id: %lu", session_id);
return FAILED;)
GELOGE(ACL_ERROR_GE_INTERNAL_ERROR, "CreateAicpuSession error. session id: %lu", session_id);
return ACL_ERROR_GE_INTERNAL_ERROR;)
ret = SetKernelArgs(&task.args_, fwk_op_kernel);
if (ret != SUCCESS) {
return ret;


+ 2
- 2
ge/single_op/task/op_task.cc View File

@@ -693,8 +693,8 @@ Status AiCpuCCTask::LaunchKernel(rtStream_t stream) {
block_dim_, args_.get(), static_cast<uint32_t>(arg_size_),
sm_desc, stream, dump_flag_);
if (ret != RT_ERROR_NONE) {
GELOGE(RT_FAILED, "Invoke rtCpuKernelLaunch failed. ret = %d", ret);
return RT_FAILED;
GELOGE(ret, "Invoke rtCpuKernelLaunch failed. ret = %d", ret);
return ret;
}
GELOGD("Invoke rtCpuKernelLaunch succeeded");



+ 24
- 24
ge/single_op/task/tbe_task_builder.cc View File

@@ -91,9 +91,9 @@ Status TbeTaskBuilder::DoRegisterBinary(const OpKernelBin &kernel_bin, void **bi
binary.magic = param.core_type == 0 ? RT_DEV_BINARY_MAGIC_ELF : RT_DEV_BINARY_MAGIC_ELF_AIVEC;
auto ret = rtDevBinaryRegister(&binary, bin_handle);
if (ret != RT_ERROR_NONE) {
GELOGE(RT_FAILED, "rtDevBinaryRegister failed, bin key = %s, core_type = %ld, rt ret = %d", stub_name_.c_str(),
GELOGE(ret, "rtDevBinaryRegister failed, bin key = %s, core_type = %ld, rt ret = %d", stub_name_.c_str(),
param.core_type, static_cast<int>(ret));
return RT_FAILED;
return ret;
}

return SUCCESS;
@@ -106,9 +106,9 @@ Status TbeTaskBuilder::DoRegisterMeta(void *bin_handle) {
if (!meta_data.empty()) {
auto rt_ret = rtMetadataRegister(bin_handle, meta_data.c_str());
if (rt_ret != RT_ERROR_NONE) {
GELOGE(RT_FAILED, "rtMetadataRegister failed. bin key = %s, meta_data = %s, rt ret = %d", stub_name_.c_str(),
GELOGE(rt_ret, "rtMetadataRegister failed. bin key = %s, meta_data = %s, rt ret = %d", stub_name_.c_str(),
meta_data.c_str(), static_cast<int>(rt_ret));
return RT_FAILED;
return rt_ret;
}
}

@@ -118,9 +118,9 @@ Status TbeTaskBuilder::DoRegisterMeta(void *bin_handle) {
Status TbeTaskBuilder::DoRegisterFunction(void *bin_handle, const char *stub_name, const char *kernel_name) {
auto rt_ret = rtFunctionRegister(bin_handle, stub_name, stub_name, kernel_name, FUNC_MODE_NORMAL);
if (rt_ret != RT_ERROR_NONE) {
GELOGE(RT_FAILED, "rtFunctionRegister failed. bin key = %s, kernel name = %s, rt ret = %d", stub_name, kernel_name,
GELOGE(rt_ret, "rtFunctionRegister failed. bin key = %s, kernel name = %s, rt ret = %d", stub_name, kernel_name,
static_cast<int>(rt_ret));
return RT_FAILED;
return rt_ret;
}

return SUCCESS;
@@ -173,14 +173,14 @@ Status TbeTaskBuilder::RegisterKernel(TbeOpTask &task, const SingleOpModelParam

auto tbe_kernel = GetTbeKernel(op_desc_);
if (tbe_kernel == nullptr) {
GELOGE(PARAM_INVALID, "OP EXT ATTR NAME TBE_KERNEL not found. op = %s", op_desc_->GetName().c_str());
return PARAM_INVALID;
GELOGE(ACL_ERROR_GE_INTERNAL_ERROR, "OP EXT ATTR NAME TBE_KERNEL not found. op = %s", op_desc_->GetName().c_str());
return ACL_ERROR_GE_INTERNAL_ERROR;
}

auto holder = std::unique_ptr<KernelHolder>(new (std::nothrow) KernelHolder(stub_func, tbe_kernel));
if (holder == nullptr) {
GELOGE(MEMALLOC_FAILED, "create KernelHodler failed.");
return MEMALLOC_FAILED;
GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, "create KernelHodler failed.");
return ACL_ERROR_GE_MEMORY_ALLOCATION;
}

void *bin_handle = nullptr;
@@ -189,8 +189,8 @@ Status TbeTaskBuilder::RegisterKernel(TbeOpTask &task, const SingleOpModelParam
holder->SetBinHandle(bin_handle);
if (!registry.AddKernel(stub_name_, std::move(holder))) {
// should not happen. only one thread can reach here
GELOGE(INTERNAL_ERROR, "Add kernel failed. stub name = %s", stub_name_.c_str());
return INTERNAL_ERROR;
GELOGE(ACL_ERROR_GE_INTERNAL_ERROR, "Add kernel failed. stub name = %s", stub_name_.c_str());
return ACL_ERROR_GE_INTERNAL_ERROR;
}
}
}
@@ -218,15 +218,15 @@ Status TbeTaskBuilder::GetSmDesc(void **sm_desc, const SingleOpModelParam &param

auto rtRet = rtMemAllocManaged(sm_desc, sm_desc_str.size(), RT_MEMORY_SPM);
if (rtRet != RT_ERROR_NONE) {
GELOGE(RT_FAILED, "rtMemAllocManaged failed, ret: %d", static_cast<int>(rtRet));
return RT_FAILED;
GELOGE(rtRet, "rtMemAllocManaged failed, ret: %d", static_cast<int>(rtRet));
return rtRet;
}

rtRet = rtMemcpy(*sm_desc, sm_desc_str.size(), sm_desc_str.data(), sm_desc_str.size(), RT_MEMCPY_HOST_TO_DEVICE);
if (rtRet != RT_ERROR_NONE) {
(void)rtMemFreeManaged(*sm_desc);
GELOGE(RT_FAILED, "rtMemcpy, ret: %d", static_cast<int>(rtRet));
return RT_FAILED;
GELOGE(rtRet, "rtMemcpy, ret: %d", static_cast<int>(rtRet));
return rtRet;
}
}

@@ -240,8 +240,8 @@ Status TbeTaskBuilder::SetKernelArgs(TbeOpTask &task, const SingleOpModelParam &

auto rtRet = rtMemcpy(args.get(), arg_size, kernel_def_.args().data(), arg_size, RT_MEMCPY_HOST_TO_HOST);
if (rtRet != RT_ERROR_NONE) {
GELOGE(RT_FAILED, "rtMemcpy args failed, size = %zu, ret = %d", arg_size, static_cast<int>(rtRet));
return RT_FAILED;
GELOGE(rtRet, "rtMemcpy args failed, size = %zu, ret = %d", arg_size, static_cast<int>(rtRet));
return rtRet;
}

const domi::KernelContext &context = kernel_def_.context();
@@ -259,8 +259,8 @@ Status TbeTaskBuilder::SetKernelArgs(TbeOpTask &task, const SingleOpModelParam &
uint64_t src_len = sizeof(void *) * tensor_device_addr_vec.size();
rtRet = rtMemcpy(args.get() + offset, arg_size - offset, src_addr, src_len, RT_MEMCPY_HOST_TO_HOST);
if (rtRet != RT_ERROR_NONE) {
GELOGE(RT_FAILED, "rtMemcpy addresses failed, ret = %d", static_cast<int>(rtRet));
return RT_FAILED;
GELOGE(rtRet, "rtMemcpy addresses failed, ret = %d", static_cast<int>(rtRet));
return rtRet;
}
}

@@ -285,8 +285,8 @@ Status TbeTaskBuilder::BuildTask(TbeOpTask &task, const SingleOpModelParam &para
void *stub_func = nullptr;
auto rtRet = rtGetFunctionByName(stub_name_.c_str(), &stub_func);
if (rtRet != SUCCESS) {
GELOGE(RT_FAILED, "rtGetFunctionByName failed.");
return RT_FAILED;
GELOGE(rtRet, "rtGetFunctionByName failed.");
return rtRet;
}

task.SetStubFunc(stub_name_, stub_func);
@@ -299,8 +299,8 @@ Status TbeTaskBuilder::InitTilingInfo(TbeOpTask &task) {
(void)AttrUtils::GetInt(op_desc_, kAttrOpParamSize, max_size);
GELOGD("Got op param size by key: %s, ret = %ld", kAttrOpParamSize, max_size);
if (max_size <= 0) {
GELOGE(PARAM_INVALID, "[%s] Invalid op_param_size: %ld.", op_desc_->GetName().c_str(), max_size);
return PARAM_INVALID;
GELOGE(ACL_ERROR_GE_PARAM_INVALID, "[%s] Invalid op_param_size: %ld.", op_desc_->GetName().c_str(), max_size);
return ACL_ERROR_GE_PARAM_INVALID;
}

void *tiling_buffer = nullptr;


+ 37
- 0
inc/external/ge/ge_api_error_codes.h View File

@@ -19,6 +19,7 @@

#include <map>
#include <string>
#include "ge_error_codes.h"

namespace ge {
class StatusFactory {
@@ -66,11 +67,47 @@ class ErrorNoRegisterar {
((0xFF & (static_cast<uint8_t>(modid))) << 12) | (0x0FFF & (static_cast<uint16_t>(value))); \
const ErrorNoRegisterar g_##name##_errorno(name, desc);

#define GE_ERRORNO_EXTERNAL(name, desc) const ErrorNoRegisterar g_##name##_errorno(name, desc);

using Status = uint32_t;

// General error code
GE_ERRORNO(0, 0, 0, 0, 0, SUCCESS, 0, "success");
GE_ERRORNO(0b11, 0b11, 0b111, 0xFF, 0b11111, FAILED, 0xFFF, "failed"); /*lint !e401*/

GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_PARAM_INVALID, "Parameter invalid.");
GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_EXEC_NOT_INIT, "GE executor not initialized yet.");
GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_EXEC_MODEL_PATH_INVALID, "Model file path invalid.");
GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_EXEC_MODEL_ID_INVALID, "Model id invalid.");
GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_EXEC_MODEL_KEY_PATH_INVALID, "Model key path invalid.");
GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_EXEC_MODEL_NOT_SUPPORT_ENCRYPTION, "Model does not support encryption.");
GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_EXEC_MODEL_DATA_SIZE_INVALID, "Data size of model invalid.");
GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_EXEC_MODEL_ADDR_INVALID, "Model addr invalid.");
GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_EXEC_MODEL_QUEUE_ID_INVALID, "Queue id of model invalid.");
GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_EXEC_LOAD_MODEL_REPEATED, "The model loaded repeatedly.");
GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_EXEC_MODEL_PARTITION_NUM_INVALID, "Model partition num invalid.");
GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_DYNAMIC_INPUT_ADDR_INVALID, "Dynamic input addr invalid.");
GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_DYNAMIC_INPUT_LENGTH_INVALID, "Dynamic input size invalid.");
GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_DYNAMIC_BATCH_SIZE_INVALID, "Dynamic batch size invalid.");
GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_AIPP_BATCH_EMPTY, "AIPP batch parameter empty.");
GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_AIPP_NOT_EXIST, "AIPP parameter not exist.");
GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_AIPP_MODE_INVALID, "AIPP mode invalid.");
GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_OP_TASK_TYPE_INVALID, "Task type invalid.");
GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_OP_KERNEL_TYPE_INVALID, "Kernel type invalid.");

GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_MEMORY_ALLOCATION, "Memory allocation error.");

GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_INTERNAL_ERROR, "Internal error.");
GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_LOAD_MODEL, "Load model error.");
GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_EXEC_LOAD_MODEL_PARTITION_FAILED, "Failed to load model partition.");
GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_EXEC_LOAD_WEIGHT_PARTITION_FAILED, "Failed to load weight partition.");
GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_EXEC_LOAD_TASK_PARTITION_FAILED, "Failed to load task partition.");
GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_EXEC_LOAD_KERNEL_PARTITION_FAILED, "Failed to load op kernel partition.");
GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_EXEC_RELEASE_MODEL_DATA, "Failed to release the model data.");
GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_COMMAND_HANDLE, "Command handle error.");
GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_GET_TENSOR_INFO, "Get tensor info error.");
GE_ERRORNO_EXTERNAL(ACL_ERROR_GE_UNLOAD_MODEL, "Load model error.");

} // namespace ge

#endif // INC_EXTERNAL_GE_GE_API_ERROR_CODES_H_

+ 58
- 0
inc/external/ge/ge_error_codes.h View File

@@ -0,0 +1,58 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef INC_EXTERNAL_GE_GE_ERROR_CODES_H_
#define INC_EXTERNAL_GE_GE_ERROR_CODES_H_

#include <stddef.h>

#ifdef __cplusplus
extern "C" {
#endif
static const uint32_t ACL_ERROR_GE_PARAM_INVALID = 145000;
static const uint32_t ACL_ERROR_GE_EXEC_NOT_INIT = 145001;
static const uint32_t ACL_ERROR_GE_EXEC_MODEL_PATH_INVALID = 145002;
static const uint32_t ACL_ERROR_GE_EXEC_MODEL_ID_INVALID = 145003;
static const uint32_t ACL_ERROR_GE_EXEC_MODEL_KEY_PATH_INVALID = 145004;
static const uint32_t ACL_ERROR_GE_EXEC_MODEL_NOT_SUPPORT_ENCRYPTION = 145005;
static const uint32_t ACL_ERROR_GE_EXEC_MODEL_DATA_SIZE_INVALID = 145006;
static const uint32_t ACL_ERROR_GE_EXEC_MODEL_ADDR_INVALID = 145007;
static const uint32_t ACL_ERROR_GE_EXEC_MODEL_QUEUE_ID_INVALID = 145008;
static const uint32_t ACL_ERROR_GE_EXEC_LOAD_MODEL_REPEATED = 145009;
static const uint32_t ACL_ERROR_GE_EXEC_MODEL_PARTITION_NUM_INVALID = 145010;
static const uint32_t ACL_ERROR_GE_DYNAMIC_INPUT_ADDR_INVALID = 145011;
static const uint32_t ACL_ERROR_GE_DYNAMIC_INPUT_LENGTH_INVALID = 145012;
static const uint32_t ACL_ERROR_GE_DYNAMIC_BATCH_SIZE_INVALID = 145013;
static const uint32_t ACL_ERROR_GE_AIPP_BATCH_EMPTY = 145014;
static const uint32_t ACL_ERROR_GE_AIPP_NOT_EXIST = 145015;
static const uint32_t ACL_ERROR_GE_AIPP_MODE_INVALID = 145016;
static const uint32_t ACL_ERROR_GE_OP_TASK_TYPE_INVALID = 145017;
static const uint32_t ACL_ERROR_GE_OP_KERNEL_TYPE_INVALID = 145018;
static const uint32_t ACL_ERROR_GE_MEMORY_ALLOCATION = 245000;
static const uint32_t ACL_ERROR_GE_INTERNAL_ERROR = 545000;
static const uint32_t ACL_ERROR_GE_LOAD_MODEL = 545001;
static const uint32_t ACL_ERROR_GE_EXEC_LOAD_MODEL_PARTITION_FAILED = 545002;
static const uint32_t ACL_ERROR_GE_EXEC_LOAD_WEIGHT_PARTITION_FAILED = 545003;
static const uint32_t ACL_ERROR_GE_EXEC_LOAD_TASK_PARTITION_FAILED = 545004;
static const uint32_t ACL_ERROR_GE_EXEC_LOAD_KERNEL_PARTITION_FAILED = 545005;
static const uint32_t ACL_ERROR_GE_EXEC_RELEASE_MODEL_DATA = 545006;
static const uint32_t ACL_ERROR_GE_COMMAND_HANDLE = 545007;
static const uint32_t ACL_ERROR_GE_GET_TENSOR_INFO = 545008;
static const uint32_t ACL_ERROR_GE_UNLOAD_MODEL = 545009;
#ifdef __cplusplus
} // namespace ge
#endif
#endif // INC_EXTERNAL_GE_GE_ERROR_CODES_H_

+ 1
- 1
metadef

@@ -1 +1 @@
Subproject commit be949d5ff32baec332aa8765d2b211334ae84dbf
Subproject commit ba04e25e878af2ac5f9a697806daee0768ae3bad

+ 1
- 1
parser

@@ -1 +1 @@
Subproject commit d865fa6e67c00c536e6df2f86d4912c1f1feff4c
Subproject commit 308e3587ec54fdd32ed7113d64a1335208701f59

Loading…
Cancel
Save