| @@ -139,7 +139,8 @@ int MemoryDumper::OpenFile(const char *filename) { | |||||
| GE_IF_BOOL_EXEC( | GE_IF_BOOL_EXEC( | ||||
| -1 != path_split_pos, string prefix_path = std::string(filename).substr(0, path_split_pos); | -1 != path_split_pos, string prefix_path = std::string(filename).substr(0, path_split_pos); | ||||
| string last_path = std::string(filename).substr(path_split_pos, strlen(filename) - 1); | string last_path = std::string(filename).substr(path_split_pos, strlen(filename) - 1); | ||||
| GE_CHK_BOOL_TRUE_EXEC_WITH_LOG(prefix_path.length() >= MMPA_MAX_PATH, return kInvalidFd, "Prefix path is too long!"); | |||||
| GE_CHK_BOOL_TRUE_EXEC_WITH_LOG(prefix_path.length() >= MMPA_MAX_PATH, return kInvalidFd, | |||||
| "Prefix path is too long!"); | |||||
| GE_CHK_BOOL_TRUE_EXEC_WITH_LOG(mmRealPath(prefix_path.c_str(), tmp_path, MMPA_MAX_PATH) != EN_OK, return kInvalidFd, | GE_CHK_BOOL_TRUE_EXEC_WITH_LOG(mmRealPath(prefix_path.c_str(), tmp_path, MMPA_MAX_PATH) != EN_OK, return kInvalidFd, | ||||
| "Dir %s does not exit.", prefix_path.c_str()); | "Dir %s does not exit.", prefix_path.c_str()); | ||||
| real_path = std::string(tmp_path) + last_path;) | real_path = std::string(tmp_path) + last_path;) | ||||
| @@ -138,7 +138,8 @@ Status OmFileLoadHelper::LoadModelPartitionTable(uint8_t *model_data, const uint | |||||
| context_.partition_datas_.push_back(partition); | context_.partition_datas_.push_back(partition); | ||||
| if (partition.size > model_data_size || mem_offset > model_data_size - partition.size) { | if (partition.size > model_data_size || mem_offset > model_data_size - partition.size) { | ||||
| GELOGE(ACL_ERROR_GE_EXEC_MODEL_DATA_SIZE_INVALID, "The partition size %zu is greater than the model data size %u.", | |||||
| GELOGE(ACL_ERROR_GE_EXEC_MODEL_DATA_SIZE_INVALID, | |||||
| "The partition size %zu is greater than the model data size %u.", | |||||
| partition.size + mem_offset, model_data_size); | partition.size + mem_offset, model_data_size); | ||||
| return ACL_ERROR_GE_EXEC_MODEL_DATA_SIZE_INVALID; | return ACL_ERROR_GE_EXEC_MODEL_DATA_SIZE_INVALID; | ||||
| } | } | ||||
| @@ -350,7 +350,8 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY bool CheckInt64MulOverflow(int6 | |||||
| FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY std::string RealPath(const char *path) { | FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY std::string RealPath(const char *path) { | ||||
| GE_CHK_BOOL_TRUE_EXEC_WITH_LOG(path == nullptr, return "", "path pointer is NULL."); | GE_CHK_BOOL_TRUE_EXEC_WITH_LOG(path == nullptr, return "", "path pointer is NULL."); | ||||
| GE_CHK_BOOL_TRUE_EXEC_WITH_LOG(strlen(path) >= MMPA_MAX_PATH, | GE_CHK_BOOL_TRUE_EXEC_WITH_LOG(strlen(path) >= MMPA_MAX_PATH, | ||||
| ErrorManager::GetInstance().ATCReportErrMessage("E19002", {"filepath", "size"}, {path, std::to_string(MMPA_MAX_PATH)}); | |||||
| ErrorManager::GetInstance().ATCReportErrMessage("E19002", {"filepath", "size"}, | |||||
| {path, std::to_string(MMPA_MAX_PATH)}); | |||||
| return "", "Path[%s] len is too long, it must be less than %d", path, MMPA_MAX_PATH); | return "", "Path[%s] len is too long, it must be less than %d", path, MMPA_MAX_PATH); | ||||
| // Nullptr is returned when the path does not exist or there is no permission | // Nullptr is returned when the path does not exist or there is no permission | ||||
| @@ -1267,7 +1267,8 @@ Status BlockMemAssigner::AssignOutputMemoryWithReuse(const NodePtr &node, vector | |||||
| bool no_need_assign_memory = ((size == 0) || CheckIsZeroMemNodeType(node->GetType())); | bool no_need_assign_memory = ((size == 0) || CheckIsZeroMemNodeType(node->GetType())); | ||||
| if (!no_need_assign_memory) { | if (!no_need_assign_memory) { | ||||
| out_node_set_continuous_input = | out_node_set_continuous_input = | ||||
| IsOutNodeSetContinuousInput(node, i, peer_name, peer_input_index, no_need_assign_memory, reset_zero_copy_flag); | |||||
| IsOutNodeSetContinuousInput(node, i, peer_name, peer_input_index, | |||||
| no_need_assign_memory, reset_zero_copy_flag); | |||||
| GE_IF_BOOL_EXEC(!no_need_assign_memory, | GE_IF_BOOL_EXEC(!no_need_assign_memory, | ||||
| no_need_assign_memory = IsAtomicOutputMemory(node, i, is_atomic, out_node_set_continuous_input);); | no_need_assign_memory = IsAtomicOutputMemory(node, i, is_atomic, out_node_set_continuous_input);); | ||||
| } | } | ||||
| @@ -1349,7 +1350,8 @@ void BlockMemAssigner::AssignMemoryWithReuse(vector<int64_t> &ranges) { | |||||
| bool workspace_skip_flag = false; | bool workspace_skip_flag = false; | ||||
| if (has_tvm_workspace_mem_type_attr && tvm_workspace_memory_type[i] == RT_MEMORY_L1) { | if (has_tvm_workspace_mem_type_attr && tvm_workspace_memory_type[i] == RT_MEMORY_L1) { | ||||
| GELOGI( | GELOGI( | ||||
| "fusion: node[%s]workspace index[%zu] is not hbm type, add to zero_memory_list, workspace memory type [%ld]", | |||||
| "fusion: node[%s]workspace index[%zu] is not hbm type, add to zero_memory_list, " | |||||
| "workspace memory type [%ld]", | |||||
| node_op_desc->GetName().c_str(), i, tvm_workspace_memory_type[i]); | node_op_desc->GetName().c_str(), i, tvm_workspace_memory_type[i]); | ||||
| workspace_skip_flag = true; | workspace_skip_flag = true; | ||||
| } | } | ||||
| @@ -419,7 +419,8 @@ Status GraphMemoryAssigner::AssignContinuousInputMemory(const ge::NodePtr &node, | |||||
| GE_IF_BOOL_EXEC(is_peer_output_continuous && (peer_output_size != 1), | GE_IF_BOOL_EXEC(is_peer_output_continuous && (peer_output_size != 1), | ||||
| std::string error = "Current op" + FmtToStr(node->GetOpDesc()->GetName()) + | std::string error = "Current op" + FmtToStr(node->GetOpDesc()->GetName()) + | ||||
| " requires continuous input, while the previous op" + FmtToStr(peer_op_desc->GetName()) + | " requires continuous input, while the previous op" + FmtToStr(peer_op_desc->GetName()) + | ||||
| " requires continuous output. There may be conflict between the two. This node is not supported now."; | |||||
| " requires continuous output. There may be conflict between the two." + | |||||
| "This node is not supported now."; | |||||
| GE_ERRORLOG_AND_ERRORMSG(FAILED, error.c_str()); | GE_ERRORLOG_AND_ERRORMSG(FAILED, error.c_str()); | ||||
| return PARAM_INVALID;); | return PARAM_INVALID;); | ||||
| @@ -429,7 +430,8 @@ Status GraphMemoryAssigner::AssignContinuousInputMemory(const ge::NodePtr &node, | |||||
| GE_IF_BOOL_EXEC(is_peer_reference, | GE_IF_BOOL_EXEC(is_peer_reference, | ||||
| std::string error = "Current op" + FmtToStr(node->GetOpDesc()->GetName()) + | std::string error = "Current op" + FmtToStr(node->GetOpDesc()->GetName()) + | ||||
| " requires continuous input, while the previous op" + FmtToStr(peer_op_desc->GetName()) + | " requires continuous input, while the previous op" + FmtToStr(peer_op_desc->GetName()) + | ||||
| " requires continuous output. There may be conflict between the two. This node is not supported now."; | |||||
| " requires continuous output. There may be conflict between the two. " + | |||||
| "This node is not supported now."; | |||||
| GE_ERRORLOG_AND_ERRORMSG(FAILED, error.c_str()); | GE_ERRORLOG_AND_ERRORMSG(FAILED, error.c_str()); | ||||
| return PARAM_INVALID;); | return PARAM_INVALID;); | ||||
| @@ -917,7 +919,7 @@ Status GraphMemoryAssigner::ReAssignAtomicMemory(bool is_loop_graph) { | |||||
| auto mem_iter = memory_offset_.find(RT_MEMORY_HBM); | auto mem_iter = memory_offset_.find(RT_MEMORY_HBM); | ||||
| if (mem_iter == memory_offset_.end()) { | if (mem_iter == memory_offset_.end()) { | ||||
| std::string error = "Memory offset does not have memory type" + FmtToStr(RT_MEMORY_HBM); | std::string error = "Memory offset does not have memory type" + FmtToStr(RT_MEMORY_HBM); | ||||
| GE_ERRORLOG_AND_ERRORMSG(FAILED, error.c_str()); | |||||
| GE_ERRORLOG_AND_ERRORMSG(FAILED, error.c_str()); | |||||
| return FAILED; | return FAILED; | ||||
| } | } | ||||
| @@ -1544,7 +1544,8 @@ Status DavinciModel::LoadWithQueue() { | |||||
| } | } | ||||
| if (output_queue_ids_.size() != new_output_data_info_.size()) { | if (output_queue_ids_.size() != new_output_data_info_.size()) { | ||||
| GELOGE(ACL_ERROR_GE_EXEC_MODEL_QUEUE_ID_INVALID, "Output queue ids not match model: output_queue=%zu output_data=%zu", | |||||
| GELOGE(ACL_ERROR_GE_EXEC_MODEL_QUEUE_ID_INVALID, | |||||
| "Output queue ids not match model: output_queue=%zu output_data=%zu", | |||||
| output_queue_ids_.size(), new_output_data_info_.size()); | output_queue_ids_.size(), new_output_data_info_.size()); | ||||
| return ACL_ERROR_GE_EXEC_MODEL_QUEUE_ID_INVALID; | return ACL_ERROR_GE_EXEC_MODEL_QUEUE_ID_INVALID; | ||||
| } | } | ||||
| @@ -1164,7 +1164,8 @@ Status KernelTaskInfo::CceUpdateKernelArgs(const domi::KernelContext &context, u | |||||
| cce::ccStatus_t cc_ret; | cce::ccStatus_t cc_ret; | ||||
| std::string update_kernel_args = "ccUpdateKernelArgs"; | std::string update_kernel_args = "ccUpdateKernelArgs"; | ||||
| auto cceUpdateKernelArgs = (cce::ccStatus_t(*)(cce::ccOpContext &, uint64_t, uint64_t, uint64_t, void *, uint64_t, | auto cceUpdateKernelArgs = (cce::ccStatus_t(*)(cce::ccOpContext &, uint64_t, uint64_t, uint64_t, void *, uint64_t, | ||||
| void *))mmDlsym(handle, const_cast<char *>(update_kernel_args.c_str())); | |||||
| void *))mmDlsym(handle, | |||||
| const_cast<char *>(update_kernel_args.c_str())); | |||||
| if (cceUpdateKernelArgs == nullptr) { | if (cceUpdateKernelArgs == nullptr) { | ||||
| GELOGE(FAILED, "Failed to invoke function ccUpdateKernelArgs"); | GELOGE(FAILED, "Failed to invoke function ccUpdateKernelArgs"); | ||||
| if (mmDlclose(handle) != 0) { | if (mmDlclose(handle) != 0) { | ||||
| @@ -110,7 +110,8 @@ Status SuperKernelFactory::FuseKernels(const std::vector<void *> &stub_func_list | |||||
| GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE, GELOGE(RT_FAILED, "rtMalloc failed. error: 0x%X", rt_ret); | GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE, GELOGE(RT_FAILED, "rtMalloc failed. error: 0x%X", rt_ret); | ||||
| return RT_ERROR_TO_GE_STATUS(rt_ret);) | return RT_ERROR_TO_GE_STATUS(rt_ret);) | ||||
| rt_ret = | rt_ret = | ||||
| rtMemcpy((void *)hbm_nav_table_addr, nav_table_size, (void *)nav_table.get(), nav_table_size, RT_MEMCPY_HOST_TO_DEVICE); | |||||
| rtMemcpy((void *)hbm_nav_table_addr, nav_table_size, (void *)nav_table.get(), | |||||
| nav_table_size, RT_MEMCPY_HOST_TO_DEVICE); | |||||
| GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE, GELOGE(RT_FAILED, "rtMemcpy failed. error: 0x%X", rt_ret); | GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE, GELOGE(RT_FAILED, "rtMemcpy failed. error: 0x%X", rt_ret); | ||||
| GE_CHK_RT(rtFree(hbm_nav_table_addr)); return RT_ERROR_TO_GE_STATUS(rt_ret);) | GE_CHK_RT(rtFree(hbm_nav_table_addr)); return RT_ERROR_TO_GE_STATUS(rt_ret);) | ||||
| // Create the necessary metadata for the super kernel | // Create the necessary metadata for the super kernel | ||||
| @@ -32,7 +32,8 @@ Debug::~Debug() = default; | |||||
| void Debug::DumpProto(const Message &proto, const char *file) { | void Debug::DumpProto(const Message &proto, const char *file) { | ||||
| std::string file_path = RealPath(file); | std::string file_path = RealPath(file); | ||||
| int fd = mmOpen2(file_path.c_str(), M_WRONLY | M_CREAT | O_TRUNC, M_IRUSR | M_IWUSR | M_UMASK_GRPREAD | M_UMASK_OTHREAD); | |||||
| int fd = mmOpen2(file_path.c_str(), | |||||
| M_WRONLY | M_CREAT | O_TRUNC, M_IRUSR | M_IWUSR | M_UMASK_GRPREAD | M_UMASK_OTHREAD); | |||||
| if (fd == -1) { | if (fd == -1) { | ||||
| GELOGW("Write %s failed", file_path.c_str()); | GELOGW("Write %s failed", file_path.c_str()); | ||||
| return; | return; | ||||
| @@ -150,7 +150,8 @@ Status SubgraphPass::SubgraphOutputNode(const ComputeGraphPtr &graph, const Node | |||||
| std::string op_type; | std::string op_type; | ||||
| bool insert_flag = NodeUtils::GetConstOpType(in_node, op_type) || | bool insert_flag = NodeUtils::GetConstOpType(in_node, op_type) || | ||||
| IsAtomicRequired(in_node, peer_out_anchor->GetIdx()) || IsOutputContinuesRequired(in_node) || | IsAtomicRequired(in_node, peer_out_anchor->GetIdx()) || IsOutputContinuesRequired(in_node) || | ||||
| ((in_node->GetType() == DATA) && (kWhileOpTypes.count(graph->GetParentNode()->GetType()) == 0)) || | |||||
| ((in_node->GetType() == DATA) && | |||||
| (kWhileOpTypes.count(graph->GetParentNode()->GetType()) == 0)) || | |||||
| (!graph->GetGraphUnknownFlag() && NodeUtils::IsDynamicShape(node) && | (!graph->GetGraphUnknownFlag() && NodeUtils::IsDynamicShape(node) && | ||||
| (kWhileOpTypes.count(in_node->GetType()) != 0)); | (kWhileOpTypes.count(in_node->GetType()) != 0)); | ||||
| if (insert_flag) { | if (insert_flag) { | ||||
| @@ -1621,7 +1621,8 @@ Status GraphPrepare::CheckUserInput(const std::vector<GeTensor> &user_input) { | |||||
| for (size_t i = 0; i < desc.GetShape().GetDimNum(); ++i) { | for (size_t i = 0; i < desc.GetShape().GetDimNum(); ++i) { | ||||
| if (desc.GetShape().GetDim(i) < 0) { | if (desc.GetShape().GetDim(i) < 0) { | ||||
| std::string situation = "data dim[" + std::to_string(i) + "][" + std::to_string(desc.GetShape().GetDim(i)) + "]" ; | |||||
| std::string situation = "data dim[" + std::to_string(i) + "][" + | |||||
| std::to_string(desc.GetShape().GetDim(i)) + "]" ; | |||||
| std::string reason = "it need >= 0"; | std::string reason = "it need >= 0"; | ||||
| ErrorManager::GetInstance().ATCReportErrMessage("E19025", {"situation", "reason"}, {situation, reason}); | ErrorManager::GetInstance().ATCReportErrMessage("E19025", {"situation", "reason"}, {situation, reason}); | ||||
| GELOGE(GE_GRAPH_INIT_FAILED, "data dim %zu is not supported, need >= 0, real:%ld.", i, | GELOGE(GE_GRAPH_INIT_FAILED, "data dim %zu is not supported, need >= 0, real:%ld.", i, | ||||
| @@ -57,7 +57,8 @@ struct GraphExecutionContext { | |||||
| do { \ | do { \ | ||||
| if ((context != nullptr) && (context)->profiler != nullptr) { \ | if ((context != nullptr) && (context)->profiler != nullptr) { \ | ||||
| if (node_name != nullptr) { \ | if (node_name != nullptr) { \ | ||||
| context->profiler->RecordEvent(evt_type, "tid:%lu [%s] [%s] " fmt, GeLog::GetTid(), node_name, category, ##__VA_ARGS__);\ | |||||
| context->profiler->RecordEvent(evt_type, "tid:%lu [%s] [%s] " fmt, \ | |||||
| GeLog::GetTid(), node_name, category, ##__VA_ARGS__);\ | |||||
| } else { \ | } else { \ | ||||
| context->profiler->RecordEvent(evt_type, "tid:%lu [%s] " fmt, GeLog::GetTid(), category, ##__VA_ARGS__); \ | context->profiler->RecordEvent(evt_type, "tid:%lu [%s] " fmt, GeLog::GetTid(), category, ##__VA_ARGS__); \ | ||||
| }\ | }\ | ||||
| @@ -62,7 +62,8 @@ Status ShapeInferenceEngine::InferShape(NodeState &node_state) { | |||||
| { | { | ||||
| std::lock_guard<std::mutex> lk(mu_); | std::lock_guard<std::mutex> lk(mu_); | ||||
| RECORD_SHAPE_INFERENCE_EVENT(execution_context_, node_item.NodeName().c_str(), "[InferShapeAndType] Start"); | RECORD_SHAPE_INFERENCE_EVENT(execution_context_, node_item.NodeName().c_str(), "[InferShapeAndType] Start"); | ||||
| GE_CHK_STATUS_RET(ShapeRefiner::InferShapeAndTypeForRunning(node_item.node, true), "Invoke InferShapeAndType failed."); | |||||
| GE_CHK_STATUS_RET(ShapeRefiner::InferShapeAndTypeForRunning(node_item.node, true), | |||||
| "Invoke InferShapeAndType failed."); | |||||
| RECORD_SHAPE_INFERENCE_EVENT(execution_context_, node_item.NodeName().c_str(), "[InferShapeAndType] End"); | RECORD_SHAPE_INFERENCE_EVENT(execution_context_, node_item.NodeName().c_str(), "[InferShapeAndType] End"); | ||||
| } | } | ||||
| // Check again to make sure shape is valid after shape inference | // Check again to make sure shape is valid after shape inference | ||||
| @@ -175,7 +175,8 @@ Status OpsKernelManager::ParsePluginOptions(const map<string, string> &options, | |||||
| } else if (flag == 1) { | } else if (flag == 1) { | ||||
| enable_flag = true; | enable_flag = true; | ||||
| } else { | } else { | ||||
| GELOGE(GE_GRAPH_OPTIONS_INVALID, "option_key:%s, its value %s is invalid, it must be 0 or 1.", plugin_name.c_str(), | |||||
| GELOGE(GE_GRAPH_OPTIONS_INVALID, "option_key:%s, its value %s is invalid, it must be 0 or 1.", | |||||
| plugin_name.c_str(), | |||||
| iter->second.c_str()); | iter->second.c_str()); | ||||
| return GE_GRAPH_OPTIONS_INVALID; | return GE_GRAPH_OPTIONS_INVALID; | ||||
| } | } | ||||
| @@ -188,7 +189,8 @@ Status OpsKernelManager::ParsePluginOptions(const map<string, string> &options, | |||||
| iter->second.c_str()); | iter->second.c_str()); | ||||
| return GE_GRAPH_OPTIONS_INVALID; | return GE_GRAPH_OPTIONS_INVALID; | ||||
| } catch (...) { | } catch (...) { | ||||
| GELOGE(GE_GRAPH_OPTIONS_INVALID, "option_key:%s, its value %s is invalid, it must be 0 or 1.", plugin_name.c_str(), | |||||
| GELOGE(GE_GRAPH_OPTIONS_INVALID, "option_key:%s, its value %s is invalid, it must be 0 or 1.", | |||||
| plugin_name.c_str(), | |||||
| iter->second.c_str()); | iter->second.c_str()); | ||||
| return GE_GRAPH_OPTIONS_INVALID; | return GE_GRAPH_OPTIONS_INVALID; | ||||
| } | } | ||||
| @@ -641,7 +641,8 @@ Status ParseOutNodes(const string &out_nodes) { | |||||
| if (!domi::GetContext().user_out_nodes_top_vec.empty()) { | if (!domi::GetContext().user_out_nodes_top_vec.empty()) { | ||||
| ErrorManager::GetInstance().ATCReportErrMessage("E10001", {"parameter", "value", "reason"}, | ErrorManager::GetInstance().ATCReportErrMessage("E10001", {"parameter", "value", "reason"}, | ||||
| {"--out_nodes", out_nodes, "is not all index or top_name"}); | {"--out_nodes", out_nodes, "is not all index or top_name"}); | ||||
| GELOGE(PARAM_INVALID, "This out_nodes str must be all index or top_name, while the actual input is %s", out_nodes.c_str()); | |||||
| GELOGE(PARAM_INVALID, "This out_nodes str must be all index or top_name, while the actual input is %s", | |||||
| out_nodes.c_str()); | |||||
| return PARAM_INVALID; | return PARAM_INVALID; | ||||
| } | } | ||||
| // stoi: The method may throw an exception: invalid_argument/out_of_range | // stoi: The method may throw an exception: invalid_argument/out_of_range | ||||
| @@ -68,7 +68,8 @@ Status SingleOp::ValidateArgs(const std::vector<DataBuffer> &inputs, const std:: | |||||
| auto num_outputs = outputs.size(); | auto num_outputs = outputs.size(); | ||||
| if (num_outputs != output_sizes_.size()) { | if (num_outputs != output_sizes_.size()) { | ||||
| GELOGE(ACL_ERROR_GE_PARAM_INVALID, "output num mismatch. model expect %zu, but given %zu", output_sizes_.size(), outputs.size()); | |||||
| GELOGE(ACL_ERROR_GE_PARAM_INVALID, "output num mismatch. model expect %zu, but given %zu", | |||||
| output_sizes_.size(), outputs.size()); | |||||
| return ACL_ERROR_GE_PARAM_INVALID; | return ACL_ERROR_GE_PARAM_INVALID; | ||||
| } | } | ||||
| @@ -206,12 +207,14 @@ Status DynamicSingleOp::ValidateParams(const vector<GeTensorDesc> &input_desc, | |||||
| } | } | ||||
| if (input_desc.size() != num_inputs_) { | if (input_desc.size() != num_inputs_) { | ||||
| GELOGE(ACL_ERROR_GE_PARAM_INVALID, "Input number mismatches. expect %zu, but given %zu", num_inputs_, input_desc.size()); | |||||
| GELOGE(ACL_ERROR_GE_PARAM_INVALID, "Input number mismatches. expect %zu, but given %zu", num_inputs_, | |||||
| input_desc.size()); | |||||
| return ACL_ERROR_GE_PARAM_INVALID; | return ACL_ERROR_GE_PARAM_INVALID; | ||||
| } | } | ||||
| if (output_desc.size() != num_outputs_) { | if (output_desc.size() != num_outputs_) { | ||||
| GELOGE(ACL_ERROR_GE_PARAM_INVALID, "Output number mismatches. expect %zu, but given %zu", num_outputs_, output_desc.size()); | |||||
| GELOGE(ACL_ERROR_GE_PARAM_INVALID, "Output number mismatches. expect %zu, but given %zu", num_outputs_, | |||||
| output_desc.size()); | |||||
| return ACL_ERROR_GE_PARAM_INVALID; | return ACL_ERROR_GE_PARAM_INVALID; | ||||
| } | } | ||||
| @@ -260,7 +260,8 @@ Status SingleOpModel::BuildTaskList(SingleOp &single_op) { | |||||
| } | } | ||||
| single_op.tasks_.emplace_back(task); | single_op.tasks_.emplace_back(task); | ||||
| } else { | } else { | ||||
| GELOGE(ACL_ERROR_GE_OP_KERNEL_TYPE_INVALID, "Only TBE, AI_CPU, CUST_AI_CPU kernel are supported, but got %u", context.kernel_type()); | |||||
| GELOGE(ACL_ERROR_GE_OP_KERNEL_TYPE_INVALID, | |||||
| "Only TBE, AI_CPU, CUST_AI_CPU kernel are supported, but got %u", context.kernel_type()); | |||||
| return ACL_ERROR_GE_OP_KERNEL_TYPE_INVALID; | return ACL_ERROR_GE_OP_KERNEL_TYPE_INVALID; | ||||
| } | } | ||||
| } else if (task_type == RT_MODEL_TASK_KERNEL_EX) { | } else if (task_type == RT_MODEL_TASK_KERNEL_EX) { | ||||
| @@ -173,7 +173,8 @@ Status TbeTaskBuilder::RegisterKernel(TbeOpTask &task, const SingleOpModelParam | |||||
| auto tbe_kernel = GetTbeKernel(op_desc_); | auto tbe_kernel = GetTbeKernel(op_desc_); | ||||
| if (tbe_kernel == nullptr) { | if (tbe_kernel == nullptr) { | ||||
| GELOGE(ACL_ERROR_GE_INTERNAL_ERROR, "OP EXT ATTR NAME TBE_KERNEL not found. op = %s", op_desc_->GetName().c_str()); | |||||
| GELOGE(ACL_ERROR_GE_INTERNAL_ERROR, | |||||
| "OP EXT ATTR NAME TBE_KERNEL not found. op = %s", op_desc_->GetName().c_str()); | |||||
| return ACL_ERROR_GE_INTERNAL_ERROR; | return ACL_ERROR_GE_INTERNAL_ERROR; | ||||
| } | } | ||||
| @@ -38,17 +38,17 @@ extern "C" { | |||||
| enum TraceStatus { TRACE_INIT = 0, TRACE_RUNNING, TRACE_WAITING, TRACE_STOP }; | enum TraceStatus { TRACE_INIT = 0, TRACE_RUNNING, TRACE_WAITING, TRACE_STOP }; | ||||
| class GeLog { | class GeLog { | ||||
| public: | |||||
| public: | |||||
| #ifdef __GNUC__ | #ifdef __GNUC__ | ||||
| static pid_t GetTid() { | |||||
| thread_local static pid_t tid = syscall(__NR_gettid); | |||||
| return tid; | |||||
| } | |||||
| static pid_t GetTid() { | |||||
| thread_local static pid_t tid = syscall(__NR_gettid); | |||||
| return tid; | |||||
| } | |||||
| #else | #else | ||||
| static int GetTid() { | |||||
| thread_local static int tid = static_cast<int>(GetCurrentThreadId()); | |||||
| return tid; | |||||
| } | |||||
| static int GetTid() { | |||||
| thread_local static int tid = static_cast<int>(GetCurrentThreadId()); | |||||
| return tid; | |||||
| } | |||||
| #endif | #endif | ||||
| }; | }; | ||||
| @@ -61,30 +61,33 @@ inline bool IsLogEnable(int module_name, int log_level) { | |||||
| return false; | return false; | ||||
| } | } | ||||
| #define GELOGE(ERROR_CODE, fmt, ...) \ | |||||
| #define GELOGE(ERROR_CODE, fmt, ...) \ | |||||
| dlog_error(GE_MODULE_NAME, "%lu %s: ErrorNo: %d(%s) " fmt, GeLog::GetTid(), __FUNCTION__, ERROR_CODE, \ | dlog_error(GE_MODULE_NAME, "%lu %s: ErrorNo: %d(%s) " fmt, GeLog::GetTid(), __FUNCTION__, ERROR_CODE, \ | ||||
| ((GE_GET_ERRORNO_STR(ERROR_CODE)).c_str()), ##__VA_ARGS__) | ((GE_GET_ERRORNO_STR(ERROR_CODE)).c_str()), ##__VA_ARGS__) | ||||
| #define GELOGW(fmt, ...) \ | |||||
| if (IsLogEnable(GE_MODULE_NAME, DLOG_WARN)) dlog_warn(GE_MODULE_NAME, "%lu %s:" fmt, GeLog::GetTid(), __FUNCTION__, ##__VA_ARGS__) | |||||
| #define GELOGI(fmt, ...) \ | |||||
| if (IsLogEnable(GE_MODULE_NAME, DLOG_INFO)) dlog_info(GE_MODULE_NAME, "%lu %s:" fmt, GeLog::GetTid(), __FUNCTION__, ##__VA_ARGS__) | |||||
| #define GELOGD(fmt, ...) \ | |||||
| if (IsLogEnable(GE_MODULE_NAME, DLOG_DEBUG)) dlog_debug(GE_MODULE_NAME, "%lu %s:" fmt, GeLog::GetTid(), __FUNCTION__, ##__VA_ARGS__) | |||||
| #define GELOGW(fmt, ...) \ | |||||
| if (IsLogEnable(GE_MODULE_NAME, DLOG_WARN)) \ | |||||
| dlog_warn(GE_MODULE_NAME, "%lu %s:" fmt, GeLog::GetTid(), __FUNCTION__, ##__VA_ARGS__) | |||||
| #define GELOGI(fmt, ...) \ | |||||
| if (IsLogEnable(GE_MODULE_NAME, DLOG_INFO)) \ | |||||
| dlog_info(GE_MODULE_NAME, "%lu %s:" fmt, GeLog::GetTid(), __FUNCTION__, ##__VA_ARGS__) | |||||
| #define GELOGD(fmt, ...) \ | |||||
| if (IsLogEnable(GE_MODULE_NAME, DLOG_DEBUG)) \ | |||||
| dlog_debug(GE_MODULE_NAME, "%lu %s:" fmt, GeLog::GetTid(), __FUNCTION__, ##__VA_ARGS__) | |||||
| #define GEEVENT(fmt, ...) dlog_event(GE_MODULE_NAME, "%lu %s:" fmt, GeLog::GetTid(), __FUNCTION__, ##__VA_ARGS__) | #define GEEVENT(fmt, ...) dlog_event(GE_MODULE_NAME, "%lu %s:" fmt, GeLog::GetTid(), __FUNCTION__, ##__VA_ARGS__) | ||||
| #define GELOGO(fmt, ...) \ | |||||
| Dlog(GE_MODULE_NAME, DLOG_OPLOG, "%lu %s:" fmt, GeLog::GetTid(), __FUNCTION__, ##__VA_ARGS__) | |||||
| #define GELOGT(VALUE, fmt, ...) \ | |||||
| do { \ | |||||
| TraceStatus stat = VALUE; \ | |||||
| const char *const TraceStatStr[] = {"INIT", "RUNNING", "WAITING", "STOP"}; \ | |||||
| int idx = static_cast<int>(stat); \ | |||||
| char *k = const_cast<char *>("status"); \ | |||||
| char *v = const_cast<char *>(TraceStatStr[idx]); \ | |||||
| KeyValue kv = {k, v}; \ | |||||
| DlogWithKV(static_cast<int>(GE_MODULE_NAME), DLOG_TRACE, &kv, 1, "%lu %s:" fmt, GeLog::GetTid(), __FUNCTION__, ##__VA_ARGS__); \ | |||||
| #define GELOGO(fmt, ...) Dlog(GE_MODULE_NAME, DLOG_OPLOG, "%lu %s:" fmt, GeLog::GetTid(), __FUNCTION__, ##__VA_ARGS__) | |||||
| #define GELOGT(VALUE, fmt, ...) \ | |||||
| do { \ | |||||
| TraceStatus stat = VALUE; \ | |||||
| const char *const TraceStatStr[] = {"INIT", "RUNNING", "WAITING", "STOP"}; \ | |||||
| int idx = static_cast<int>(stat); \ | |||||
| char *k = const_cast<char *>("status"); \ | |||||
| char *v = const_cast<char *>(TraceStatStr[idx]); \ | |||||
| KeyValue kv = {k, v}; \ | |||||
| DlogWithKV(static_cast<int>(GE_MODULE_NAME), DLOG_TRACE, &kv, 1, "%lu %s:" fmt, GeLog::GetTid(), __FUNCTION__, \ | |||||
| ##__VA_ARGS__); \ | |||||
| } while (0) | } while (0) | ||||
| #define GE_LOG_ERROR(MOD_NAME, ERROR_CODE, fmt, ...) \ | |||||
| #define GE_LOG_ERROR(MOD_NAME, ERROR_CODE, fmt, ...) \ | |||||
| dlog_error(MOD_NAME, "%lu %s: ErrorNo: %d(%s) " fmt, GeLog::GetTid(), __FUNCTION__, ERROR_CODE, \ | dlog_error(MOD_NAME, "%lu %s: ErrorNo: %d(%s) " fmt, GeLog::GetTid(), __FUNCTION__, ERROR_CODE, \ | ||||
| ((GE_GET_ERRORNO_STR(ERROR_CODE)).c_str()), ##__VA_ARGS__) | ((GE_GET_ERRORNO_STR(ERROR_CODE)).c_str()), ##__VA_ARGS__) | ||||
| #define GE_LOG_WARN(MOD_NAME, fmt, ...) \ | #define GE_LOG_WARN(MOD_NAME, fmt, ...) \ | ||||
| @@ -92,20 +95,23 @@ inline bool IsLogEnable(int module_name, int log_level) { | |||||
| #define GE_LOG_INFO(MOD_NAME, fmt, ...) \ | #define GE_LOG_INFO(MOD_NAME, fmt, ...) \ | ||||
| if (IsLogEnable(MOD_NAME, DLOG_INFO)) dlog_info(MOD_NAME, "%lu %s:" fmt, GeLog::GetTid(), __FUNCTION__, ##__VA_ARGS__) | if (IsLogEnable(MOD_NAME, DLOG_INFO)) dlog_info(MOD_NAME, "%lu %s:" fmt, GeLog::GetTid(), __FUNCTION__, ##__VA_ARGS__) | ||||
| #define GE_LOG_DEBUG(MOD_NAME, fmt, ...) \ | #define GE_LOG_DEBUG(MOD_NAME, fmt, ...) \ | ||||
| if (IsLogEnable(MOD_NAME, DLOG_DEBUG)) dlog_debug(MOD_NAME, "%lu %s:" fmt, GeLog::GetTid(), __FUNCTION__, ##__VA_ARGS__) | |||||
| #define GE_LOG_EVENT(MOD_NAME, fmt, ...) dlog_event(MOD_NAME, "%lu %s:" fmt, GeLog::GetTid(), __FUNCTION__, ##__VA_ARGS__) | |||||
| if (IsLogEnable(MOD_NAME, DLOG_DEBUG)) \ | |||||
| dlog_debug(MOD_NAME, "%lu %s:" fmt, GeLog::GetTid(), __FUNCTION__, ##__VA_ARGS__) | |||||
| #define GE_LOG_EVENT(MOD_NAME, fmt, ...) \ | |||||
| dlog_event(MOD_NAME, "%lu %s:" fmt, GeLog::GetTid(), __FUNCTION__, ##__VA_ARGS__) | |||||
| #define GE_LOG_OPLOG(MOD_NAME, fmt, ...) \ | #define GE_LOG_OPLOG(MOD_NAME, fmt, ...) \ | ||||
| Dlog(MOD_NAME, DLOG_OPLOG, "%lu %s:" fmt, GeLog::GetTid(), __FUNCTION__, ##__VA_ARGS__) | Dlog(MOD_NAME, DLOG_OPLOG, "%lu %s:" fmt, GeLog::GetTid(), __FUNCTION__, ##__VA_ARGS__) | ||||
| #define GE_LOG_TRACE(MOD_NAME, value, fmt, ...) \ | |||||
| do { \ | |||||
| TraceStatus stat = value; \ | |||||
| const char *const TraceStatStr[] = {"INIT", "RUNNING", "WAITING", "STOP"}; \ | |||||
| int idx = static_cast<int>(stat); \ | |||||
| char *k = const_cast<char *>("status"); \ | |||||
| char *v = const_cast<char *>(TraceStatStr[idx]); \ | |||||
| KeyValue kv = {k, v}; \ | |||||
| DlogWithKV(static_cast<int>(MOD_NAME), DLOG_TRACE, &kv, 1, "%lu %s:" fmt, GeLog::GetTid(), __FUNCTION__, ##__VA_ARGS__); \ | |||||
| #define GE_LOG_TRACE(MOD_NAME, value, fmt, ...) \ | |||||
| do { \ | |||||
| TraceStatus stat = value; \ | |||||
| const char *const TraceStatStr[] = {"INIT", "RUNNING", "WAITING", "STOP"}; \ | |||||
| int idx = static_cast<int>(stat); \ | |||||
| char *k = const_cast<char *>("status"); \ | |||||
| char *v = const_cast<char *>(TraceStatStr[idx]); \ | |||||
| KeyValue kv = {k, v}; \ | |||||
| DlogWithKV(static_cast<int>(MOD_NAME), DLOG_TRACE, &kv, 1, "%lu %s:" fmt, GeLog::GetTid(), __FUNCTION__, \ | |||||
| ##__VA_ARGS__); \ | |||||
| } while (0) | } while (0) | ||||
| // print memory when it is greater than 1KB. | // print memory when it is greater than 1KB. | ||||
| @@ -360,7 +360,8 @@ MMPA_FUNC_VISIBILITY INT32 mmDladdr(VOID *addr, mmDlInfo *info); | |||||
| MMPA_FUNC_VISIBILITY VOID *mmDlsym(VOID *handle, const CHAR *funcName); | MMPA_FUNC_VISIBILITY VOID *mmDlsym(VOID *handle, const CHAR *funcName); | ||||
| MMPA_FUNC_VISIBILITY INT32 mmDlclose(VOID *handle); | MMPA_FUNC_VISIBILITY INT32 mmDlclose(VOID *handle); | ||||
| MMPA_FUNC_VISIBILITY CHAR *mmDlerror(); | MMPA_FUNC_VISIBILITY CHAR *mmDlerror(); | ||||
| MMPA_FUNC_VISIBILITY INT32 mmCreateAndSetTimer(mmTimer *timerHandle, mmUserBlock_t *timerBlock, UINT milliSecond, UINT period); | |||||
| MMPA_FUNC_VISIBILITY INT32 mmCreateAndSetTimer(mmTimer *timerHandle, mmUserBlock_t *timerBlock, | |||||
| UINT milliSecond, UINT period); | |||||
| MMPA_FUNC_VISIBILITY INT32 mmDeleteTimer(mmTimer timerHandle); | MMPA_FUNC_VISIBILITY INT32 mmDeleteTimer(mmTimer timerHandle); | ||||
| MMPA_FUNC_VISIBILITY INT32 mmStatGet(const CHAR *path, mmStat_t *buffer); | MMPA_FUNC_VISIBILITY INT32 mmStatGet(const CHAR *path, mmStat_t *buffer); | ||||
| MMPA_FUNC_VISIBILITY INT32 mmStat64Get(const CHAR *path, mmStat64_t *buffer); | MMPA_FUNC_VISIBILITY INT32 mmStat64Get(const CHAR *path, mmStat64_t *buffer); | ||||
| @@ -80,10 +80,16 @@ REG_OP(Pooling) | |||||
| *x: A tensor of type float16, float32, double . \n | *x: A tensor of type float16, float32, double . \n | ||||
| *@par Attributes: | *@par Attributes: | ||||
| *@li ksize: A required list of 4 ints, specifying the size (N, C, H, and W) of the sliding window, where N = C = 1, and H and W are positive integers within the range [1, 255]. | |||||
| *@li strides: A required list of 4 ints, specifying the stride of the sliding window. The strides of the N and C dimensions are 1. The strides of the H and W dimensions are positive integers within the range [1, 63]. | |||||
| *@li padding: A required string, specifying the padding algorithm, either "VALID" or "SAME". With "SAME" means that the outputs will have the same spatial dimensions as its inputs. With "VALID" means no padding. | |||||
| *@li data_format: An optional string, specifying the data format of "ksize" and "strides", either "NCHW", "NC1HWC0", or "NHWC" (default) . \n | |||||
| *@li ksize: A required list of 4 ints, specifying the size (N, C, H, and W) of the sliding window, where N = C = 1, | |||||
| * and H and W are positive integers within the range [1, 255]. | |||||
| *@li strides: A required list of 4 ints, specifying the stride of the sliding window. | |||||
| * The strides of the N and C dimensions are 1. | |||||
| * The strides of the H and W dimensions are positive integers within the range [1, 63]. | |||||
| *@li padding: A required string, specifying the padding algorithm, either "VALID" or "SAME". | |||||
| * With "SAME" means that the outputs will have the same spatial dimensions as its inputs. | |||||
| * With "VALID" means no padding. | |||||
| *@li data_format: An optional string, specifying the data format of "ksize" and "strides", either "NCHW", | |||||
| * "NC1HWC0", or "NHWC" (default) . \n | |||||
| *@par Outputs: | *@par Outputs: | ||||
| *y: The average pooled output tensor. Has the same type and format as input "x" . \n | *y: The average pooled output tensor. Has the same type and format as input "x" . \n | ||||
| @@ -93,7 +99,8 @@ REG_OP(Pooling) | |||||
| *@li Only single input and single output are supported. | *@li Only single input and single output are supported. | ||||
| *@li Global pooling is supported. | *@li Global pooling is supported. | ||||
| *@li "ksize_H" and "ksize_W" are positive integers within the range [1, 255]. ksize_H * ksize_W < 256 | *@li "ksize_H" and "ksize_W" are positive integers within the range [1, 255]. ksize_H * ksize_W < 256 | ||||
| *@li Due to instruction restrictions, the values of "strides_h" and "strides_w" are positive integers within the range [1, 63]. | |||||
| *@li Due to instruction restrictions, | |||||
| * the values of "strides_h" and "strides_w" are positive integers within the range [1, 63]. | |||||
| *@par Third-party framework compatibility | *@par Third-party framework compatibility | ||||
| * Compatible with the TensorFlow operator AvgPool. | * Compatible with the TensorFlow operator AvgPool. | ||||
| */ | */ | ||||
| @@ -1230,8 +1237,10 @@ REG_OP(MaxPoolGradWithArgmaxV2) | |||||
| * Compatible with the TensorFlow operator MaxPool. | * Compatible with the TensorFlow operator MaxPool. | ||||
| */ | */ | ||||
| REG_OP(MaxPoolV3) | REG_OP(MaxPoolV3) | ||||
| .INPUT(x,TensorType({DT_FLOAT16, DT_FLOAT32, DT_DOUBLE, DT_INT32, DT_INT64, DT_UINT8, DT_INT16, DT_INT8, DT_UINT16, DT_QINT8})) | |||||
| .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT32, DT_DOUBLE, DT_INT32, DT_INT64, DT_UINT8, DT_INT16, DT_INT8, DT_UINT16, DT_QINT8})) | |||||
| .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT32, DT_DOUBLE, DT_INT32, DT_INT64, | |||||
| DT_UINT8, DT_INT16, DT_INT8, DT_UINT16, DT_QINT8})) | |||||
| .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT32, DT_DOUBLE, DT_INT32, DT_INT64, | |||||
| DT_UINT8, DT_INT16, DT_INT8, DT_UINT16, DT_QINT8})) | |||||
| .REQUIRED_ATTR(ksize, ListInt) | .REQUIRED_ATTR(ksize, ListInt) | ||||
| .REQUIRED_ATTR(strides, ListInt) | .REQUIRED_ATTR(strides, ListInt) | ||||
| .ATTR(padding_mode, String, "CALCULATED") | .ATTR(padding_mode, String, "CALCULATED") | ||||