| @@ -45,6 +45,9 @@ message AippOpParams { | |||||
| // 标识对模型的第几个输入做AIPP处理,例如模型有两个输入,需要对第2个输入做AIPP,则配置related_input_rank为1。 | // 标识对模型的第几个输入做AIPP处理,例如模型有两个输入,需要对第2个输入做AIPP,则配置related_input_rank为1。 | ||||
| uint32 related_input_rank = 2; | uint32 related_input_rank = 2; | ||||
| // related_input_name is optional and the top name of data node which inserts aipp | |||||
| string related_input_name = 6; | |||||
| // input_edge_idx参数为可选,类型为整型,配置范围为>=0。 | // input_edge_idx参数为可选,类型为整型,配置范围为>=0。 | ||||
| // 配置该参数的作用,在于对Data算子不同的输出做不同的AIPP处理,如果该参数没有配置,默认对related_input_rank指定的模型输入的所有输出边做AIPP。 | // 配置该参数的作用,在于对Data算子不同的输出做不同的AIPP处理,如果该参数没有配置,默认对related_input_rank指定的模型输入的所有输出边做AIPP。 | ||||
| // 配置值 <= Data算子输出边的个数。 | // 配置值 <= Data算子输出边的个数。 | ||||
| @@ -45,6 +45,9 @@ message AippOpParams { | |||||
| // 标识对模型的第几个输入做AIPP处理,例如模型有两个输入,需要对第2个输入做AIPP,则配置related_input_rank为1。 | // 标识对模型的第几个输入做AIPP处理,例如模型有两个输入,需要对第2个输入做AIPP,则配置related_input_rank为1。 | ||||
| uint32 related_input_rank = 2; | uint32 related_input_rank = 2; | ||||
| // related_input_name is optional and the top name of data node which inserts aipp | |||||
| string related_input_name = 6; | |||||
| // input_edge_idx参数为可选,类型为整型,配置范围为>=0。 | // input_edge_idx参数为可选,类型为整型,配置范围为>=0。 | ||||
| // 配置该参数的作用,在于对Data算子不同的输出做不同的AIPP处理,如果该参数没有配置,默认对related_input_rank指定的模型输入的所有输出边做AIPP。 | // 配置该参数的作用,在于对Data算子不同的输出做不同的AIPP处理,如果该参数没有配置,默认对related_input_rank指定的模型输入的所有输出边做AIPP。 | ||||
| // 配置值 <= Data算子输出边的个数。 | // 配置值 <= Data算子输出边的个数。 | ||||
| @@ -45,6 +45,9 @@ message AippOpParams { | |||||
| // 标识对模型的第几个输入做AIPP处理,例如模型有两个输入,需要对第2个输入做AIPP,则配置related_input_rank为1。 | // 标识对模型的第几个输入做AIPP处理,例如模型有两个输入,需要对第2个输入做AIPP,则配置related_input_rank为1。 | ||||
| uint32 related_input_rank = 2; | uint32 related_input_rank = 2; | ||||
| // related_input_name is optional and the top name of data node which inserts aipp | |||||
| string related_input_name = 6; | |||||
| // input_edge_idx参数为可选,类型为整型,配置范围为>=0。 | // input_edge_idx参数为可选,类型为整型,配置范围为>=0。 | ||||
| // 配置该参数的作用,在于对Data算子不同的输出做不同的AIPP处理,如果该参数没有配置,默认对related_input_rank指定的模型输入的所有输出边做AIPP。 | // 配置该参数的作用,在于对Data算子不同的输出做不同的AIPP处理,如果该参数没有配置,默认对related_input_rank指定的模型输入的所有输出边做AIPP。 | ||||
| // 配置值 <= Data算子输出边的个数。 | // 配置值 <= Data算子输出边的个数。 | ||||
| @@ -183,6 +183,11 @@ Status AippOp::InsertAippToGraph(ComputeGraphPtr &graph, std::string &aippConfig | |||||
| GE_CHECK_NOTNULL(graph); | GE_CHECK_NOTNULL(graph); | ||||
| NodePtr target_input = nullptr; | NodePtr target_input = nullptr; | ||||
| std::vector<std::pair<OutDataAnchorPtr, InDataAnchorPtr>> target_edges; | std::vector<std::pair<OutDataAnchorPtr, InDataAnchorPtr>> target_edges; | ||||
| if (this->ConvertRelatedInputNameToRank() != SUCCESS) { | |||||
| GELOGE(FAILED, "AippOp: convert related input name to rank failed."); | |||||
| return FAILED; | |||||
| } | |||||
| GE_CHK_STATUS_RET(this->GetTargetPosition(graph, target_input, target_edges), "Get data nodes position failed"); | GE_CHK_STATUS_RET(this->GetTargetPosition(graph, target_input, target_edges), "Get data nodes position failed"); | ||||
| std::map<OutDataAnchorPtr, NodePtr> out_anchors_to_aipp; | std::map<OutDataAnchorPtr, NodePtr> out_anchors_to_aipp; | ||||
| @@ -410,6 +415,38 @@ Status AippOp::GetStaticTargetNode(const ComputeGraphPtr &graph, NodePtr &data_n | |||||
| return SUCCESS; | return SUCCESS; | ||||
| } | } | ||||
| Status AippOp::ConvertRelatedInputNameToRank() { | |||||
| GE_CHECK_NOTNULL(aipp_params_); | |||||
| string related_input_name = aipp_params_->related_input_name(); | |||||
| if(related_input_name.empty()) { | |||||
| return SUCCESS; | |||||
| } | |||||
| std::vector<std::string> data_top_names = domi::GetContext().data_top_names; | |||||
| GELOGI("Convert name to rank start: data size[%zu]", data_top_names.size()); | |||||
| uint32_t index = 0; | |||||
| bool convert_flag = false; | |||||
| for (const auto &data_top_name : data_top_names) { | |||||
| if (related_input_name == data_top_name) { | |||||
| aipp_params_->set_related_input_rank(index); | |||||
| convert_flag = true; | |||||
| GELOGI("AippOp: rank: %u, top name: %s.", index, data_top_name.c_str()); | |||||
| break; | |||||
| } | |||||
| index++; | |||||
| } | |||||
| if (!convert_flag) { | |||||
| string error_msg = "Top name " + related_input_name + "convert rank failed, Please" | |||||
| " ensure top name in aipp config is the top name of data node."; | |||||
| ErrorManager::GetInstance().ATCReportErrMessage("E10043", {"reason"}, {error_msg}); | |||||
| GELOGE(PARAM_INVALID, "Top name[%s] converts rank failed.", related_input_name.c_str()); | |||||
| return PARAM_INVALID; | |||||
| } | |||||
| return SUCCESS; | |||||
| } | |||||
| Status AippOp::GetTargetPosition(ComputeGraphPtr graph, NodePtr &target_input, | Status AippOp::GetTargetPosition(ComputeGraphPtr graph, NodePtr &target_input, | ||||
| std::vector<std::pair<OutDataAnchorPtr, InDataAnchorPtr>> &target_edges) { | std::vector<std::pair<OutDataAnchorPtr, InDataAnchorPtr>> &target_edges) { | ||||
| @@ -82,6 +82,7 @@ class AippOp : public InsertOpBase { | |||||
| Status AddNodeToGraph(const NodePtr &aipp_node, int64_t max_dynamic_aipp_size); | Status AddNodeToGraph(const NodePtr &aipp_node, int64_t max_dynamic_aipp_size); | ||||
| Status AddAippAttrbutes(const OpDescPtr &op_desc, const std::string &aipp_cfg_path, const uint32_t &index); | Status AddAippAttrbutes(const OpDescPtr &op_desc, const std::string &aipp_cfg_path, const uint32_t &index); | ||||
| Status AddAttrToAippData(const OpDescPtr &aipp_data_op_desc); | Status AddAttrToAippData(const OpDescPtr &aipp_data_op_desc); | ||||
| Status ConvertRelatedInputNameToRank(); | |||||
| domi::AippOpParams *aipp_params_ = nullptr; | domi::AippOpParams *aipp_params_ = nullptr; | ||||
| ge::NodePtr aipp_node_ = nullptr; | ge::NodePtr aipp_node_ = nullptr; | ||||
| @@ -34,6 +34,7 @@ | |||||
| #include "graph/utils/op_desc_utils.h" | #include "graph/utils/op_desc_utils.h" | ||||
| #include "graph/utils/tensor_utils.h" | #include "graph/utils/tensor_utils.h" | ||||
| #include "graph/utils/type_utils.h" | #include "graph/utils/type_utils.h" | ||||
| #include "util_insert_aipp_op.h" | |||||
| using domi::AippOpParams; | using domi::AippOpParams; | ||||
| @@ -115,22 +116,94 @@ void InsertNewOpUtil::ClearNewOps() { | |||||
| } | } | ||||
| } | } | ||||
| Status InsertNewOpUtil::CheckPositionNotRepeat() { | |||||
| Status InsertNewOpUtil::CheckInputNamePositionNotRepeat() { | |||||
| for (int i = 0; i < insert_op_conf_->aipp_op_size(); i++) { | |||||
| const domi::AippOpParams *item = insert_op_conf_->mutable_aipp_op(i); | |||||
| GE_CHECK_NOTNULL(item); | |||||
| for (int j = i + 1; j < insert_op_conf_->aipp_op_size(); j++) { | |||||
| const domi::AippOpParams *another_item = insert_op_conf_->mutable_aipp_op(j); | |||||
| GE_CHECK_NOTNULL(another_item); | |||||
| if (another_item->related_input_name().empty()) { | |||||
| string error_msg = "Can not both set related_input_name and related_input_rank!" | |||||
| " Please ensure param is the same with the first aipp config(related_input_name)."; | |||||
| ErrorManager::GetInstance().ATCReportErrMessage("E10043", {"reason"}, {error_msg}); | |||||
| GELOGE(PARAM_INVALID, | |||||
| "Can not both set related_input_rank and related_input_name!" | |||||
| " Please ensure param is the same with the first aipp config(related_input_name)."); | |||||
| return PARAM_INVALID; | |||||
| } | |||||
| if (item->related_input_name() == another_item->related_input_name()) { | |||||
| string error_msg = "Can not insert aipp to the same postion! Please ensure related_input_name" | |||||
| " param is different in different aipp config."; | |||||
| ErrorManager::GetInstance().ATCReportErrMessage("E10043", {"reason"}, {error_msg}); | |||||
| GELOGE(PARAM_INVALID, | |||||
| "Can not insert aipp op to the same postion! Please ensure related_input_rank param " | |||||
| "is different in different aipp config."); | |||||
| return PARAM_INVALID; | |||||
| } | |||||
| } | |||||
| } | |||||
| return SUCCESS; | |||||
| } | |||||
| Status InsertNewOpUtil::CheckInputRankPositionNoRepeat() { | |||||
| for (int i = 0; i < insert_op_conf_->aipp_op_size(); i++) { | for (int i = 0; i < insert_op_conf_->aipp_op_size(); i++) { | ||||
| const domi::AippOpParams *item = insert_op_conf_->mutable_aipp_op(i); | const domi::AippOpParams *item = insert_op_conf_->mutable_aipp_op(i); | ||||
| GE_CHECK_NOTNULL(item); | |||||
| for (int j = i + 1; j < insert_op_conf_->aipp_op_size(); j++) { | for (int j = i + 1; j < insert_op_conf_->aipp_op_size(); j++) { | ||||
| const domi::AippOpParams *another_item = insert_op_conf_->mutable_aipp_op(j); | const domi::AippOpParams *another_item = insert_op_conf_->mutable_aipp_op(j); | ||||
| GE_IF_BOOL_EXEC(item->related_input_rank() == another_item->related_input_rank(), | |||||
| string errormsg = "Can not insert aipp to the same postion! Please ensure related_input_rank" | |||||
| " param is different in different aipp config."; | |||||
| ErrorManager::GetInstance().ATCReportErrMessage("E10043", {"reason"}, {errormsg}); | |||||
| GELOGE(PARAM_INVALID, | |||||
| "Can not insert aipp op to the same postion! Please ensure related_input_rank param " | |||||
| "is different in different aipp config."); | |||||
| return PARAM_INVALID;); | |||||
| GE_CHECK_NOTNULL(another_item); | |||||
| if (!another_item->related_input_name().empty()) { | |||||
| string error_msg = "Can not both set related_input_rank and related_input_name!" | |||||
| " Please ensure param is the same with the first aipp config(related_input_rank)."; | |||||
| ErrorManager::GetInstance().ATCReportErrMessage("E10043", {"reason"}, {error_msg}); | |||||
| GELOGE(PARAM_INVALID, | |||||
| "Can not both set related_input_rank and related_input_name!" | |||||
| " Please ensure param is the same with the first aipp config(related_input_rank)."); | |||||
| return PARAM_INVALID; | |||||
| } | |||||
| if (item->related_input_rank() == another_item->related_input_rank()) { | |||||
| string error_msg = "Can not insert aipp to the same postion! Please ensure related_input_rank" | |||||
| " param is different in different aipp config."; | |||||
| ErrorManager::GetInstance().ATCReportErrMessage("E10043", {"reason"}, {error_msg}); | |||||
| GELOGE(PARAM_INVALID, | |||||
| "Can not insert aipp op to the same postion! Please ensure related_input_rank param " | |||||
| "is different in different aipp config."); | |||||
| return PARAM_INVALID; | |||||
| } | |||||
| } | } | ||||
| } | } | ||||
| return SUCCESS; | |||||
| } | |||||
| Status InsertNewOpUtil::CheckPositionNotRepeat() { | |||||
| GE_CHECK_NOTNULL(insert_op_conf_); | |||||
| if (insert_op_conf_->aipp_op_size() <= 1) { | |||||
| GELOGI("Aipp op size[%d] less than 2, no need to check position repeat.", insert_op_conf_->aipp_op_size()); | |||||
| return SUCCESS; | |||||
| } | |||||
| const domi::AippOpParams *item = insert_op_conf_->mutable_aipp_op(0); | |||||
| GE_CHECK_NOTNULL(item); | |||||
| string related_input_name = item->related_input_name(); | |||||
| Status ret = FAILED; | |||||
| if (related_input_name.empty()) { | |||||
| ret = CheckInputRankPositionNoRepeat(); | |||||
| } else { | |||||
| ret = CheckInputNamePositionNotRepeat(); | |||||
| } | |||||
| if (ret != SUCCESS) { | |||||
| GELOGE(FAILED, "Check position not repeat failed."); | |||||
| return FAILED; | |||||
| } | |||||
| return SUCCESS; | return SUCCESS; | ||||
| } | } | ||||
| @@ -51,6 +51,10 @@ class InsertNewOpUtil { | |||||
| Status GetAippParams(const std::unique_ptr<domi::AippOpParams> &aippParams, const ge::NodePtr &aipp_node); | Status GetAippParams(const std::unique_ptr<domi::AippOpParams> &aippParams, const ge::NodePtr &aipp_node); | ||||
| Status CheckInputNamePositionNotRepeat(); | |||||
| Status CheckInputRankPositionNoRepeat(); | |||||
| Status CheckGraph(const ge::ComputeGraphPtr &graph); | Status CheckGraph(const ge::ComputeGraphPtr &graph); | ||||
| InsertNewOpUtil() = default; | InsertNewOpUtil() = default; | ||||
| @@ -45,6 +45,9 @@ message AippOpParams { | |||||
| // 标识对模型的第几个输入做AIPP处理,例如模型有两个输入,需要对第2个输入做AIPP,则配置related_input_rank为1。 | // 标识对模型的第几个输入做AIPP处理,例如模型有两个输入,需要对第2个输入做AIPP,则配置related_input_rank为1。 | ||||
| uint32 related_input_rank = 2; | uint32 related_input_rank = 2; | ||||
| // related_input_name is optional and the top name of data node which inserts aipp | |||||
| string related_input_name = 6; | |||||
| // input_edge_idx参数为可选,类型为整型,配置范围为>=0。 | // input_edge_idx参数为可选,类型为整型,配置范围为>=0。 | ||||
| // 配置该参数的作用,在于对Data算子不同的输出做不同的AIPP处理,如果该参数没有配置,默认对related_input_rank指定的模型输入的所有输出边做AIPP。 | // 配置该参数的作用,在于对Data算子不同的输出做不同的AIPP处理,如果该参数没有配置,默认对related_input_rank指定的模型输入的所有输出边做AIPP。 | ||||
| // 配置值 <= Data算子输出边的个数。 | // 配置值 <= Data算子输出边的个数。 | ||||
| @@ -45,6 +45,9 @@ message AippOpParams { | |||||
| // 标识对模型的第几个输入做AIPP处理,例如模型有两个输入,需要对第2个输入做AIPP,则配置related_input_rank为1。 | // 标识对模型的第几个输入做AIPP处理,例如模型有两个输入,需要对第2个输入做AIPP,则配置related_input_rank为1。 | ||||
| uint32 related_input_rank = 2; | uint32 related_input_rank = 2; | ||||
| // related_input_name is optional and the top name of data node which inserts aipp | |||||
| string related_input_name = 6; | |||||
| // input_edge_idx参数为可选,类型为整型,配置范围为>=0。 | // input_edge_idx参数为可选,类型为整型,配置范围为>=0。 | ||||
| // 配置该参数的作用,在于对Data算子不同的输出做不同的AIPP处理,如果该参数没有配置,默认对related_input_rank指定的模型输入的所有输出边做AIPP。 | // 配置该参数的作用,在于对Data算子不同的输出做不同的AIPP处理,如果该参数没有配置,默认对related_input_rank指定的模型输入的所有输出边做AIPP。 | ||||
| // 配置值 <= Data算子输出边的个数。 | // 配置值 <= Data算子输出边的个数。 | ||||
| @@ -1038,6 +1038,7 @@ void UpdateOmgCtxWithParserCtx() { | |||||
| domi::GetContext().out_top_names = GetParserContext().out_top_names; | domi::GetContext().out_top_names = GetParserContext().out_top_names; | ||||
| domi::GetContext().user_out_nodes_top_vec = GetParserContext().user_out_nodes_top_vec; | domi::GetContext().user_out_nodes_top_vec = GetParserContext().user_out_nodes_top_vec; | ||||
| domi::GetContext().default_out_nodes = GetParserContext().default_out_nodes; | domi::GetContext().default_out_nodes = GetParserContext().default_out_nodes; | ||||
| domi::GetContext().data_top_names = GetParserContext().data_top_names; | |||||
| } | } | ||||
| void UpdateParserCtxWithOmgCtx() { | void UpdateParserCtxWithOmgCtx() { | ||||
| @@ -1054,5 +1055,6 @@ void UpdateParserCtxWithOmgCtx() { | |||||
| GetParserContext().input_nodes_format_map = domi::GetContext().input_nodes_format_map; | GetParserContext().input_nodes_format_map = domi::GetContext().input_nodes_format_map; | ||||
| GetParserContext().out_top_names = domi::GetContext().out_top_names; | GetParserContext().out_top_names = domi::GetContext().out_top_names; | ||||
| GetParserContext().user_out_nodes_top_vec = domi::GetContext().user_out_nodes_top_vec; | GetParserContext().user_out_nodes_top_vec = domi::GetContext().user_out_nodes_top_vec; | ||||
| GetParserContext().data_top_names = domi::GetContext().data_top_names; | |||||
| } | } | ||||
| } // namespace ge | } // namespace ge | ||||
| @@ -100,6 +100,8 @@ struct OmgContext { | |||||
| std::vector<std::string> net_out_nodes; | std::vector<std::string> net_out_nodes; | ||||
| // net out nodes top names(only caffe has top) | // net out nodes top names(only caffe has top) | ||||
| std::vector<std::string> out_top_names; | std::vector<std::string> out_top_names; | ||||
| // net data nodes top names(only caffe has top) | |||||
| std::vector<std::string> data_top_names; | |||||
| // preferential format used by the entire network | // preferential format used by the entire network | ||||
| domiTensorFormat_t net_format = DOMI_TENSOR_RESERVED; | domiTensorFormat_t net_format = DOMI_TENSOR_RESERVED; | ||||
| domi::FrameworkType type = domi::FRAMEWORK_RESERVED; | domi::FrameworkType type = domi::FRAMEWORK_RESERVED; | ||||
| @@ -49,6 +49,8 @@ struct ParserContext { | |||||
| std::vector<std::string> user_out_nodes_top_vec; | std::vector<std::string> user_out_nodes_top_vec; | ||||
| // net out nodes (where user_out_nodes or leaf nodes) | // net out nodes (where user_out_nodes or leaf nodes) | ||||
| std::vector<std::string> net_out_nodes; | std::vector<std::string> net_out_nodes; | ||||
| // net data nodes top names(only caffe has top) | |||||
| std::vector<std::string> data_top_names; | |||||
| // net out nodes top names(only caffe has top) | // net out nodes top names(only caffe has top) | ||||
| std::vector<std::string> out_top_names; | std::vector<std::string> out_top_names; | ||||
| // Whether to use dynamic batch size or dynamic image size | // Whether to use dynamic batch size or dynamic image size | ||||
| @@ -57,9 +59,12 @@ struct ParserContext { | |||||
| domi::domiTensorFormat_t format = domi::DOMI_TENSOR_ND; | domi::domiTensorFormat_t format = domi::DOMI_TENSOR_ND; | ||||
| domi::FrameworkType type = domi::FRAMEWORK_RESERVED; | domi::FrameworkType type = domi::FRAMEWORK_RESERVED; | ||||
| RunMode run_mode = ONLY_PRE_CHECK; | RunMode run_mode = ONLY_PRE_CHECK; | ||||
| std::string custom_proto_path; // save caffe custom proto path, used by caffe parse | |||||
| std::string caffe_proto_path; // save caffe proto path, used by caffe parse | |||||
| std::string enable_scope_fusion_passes; // name of the pass that needs to take effect | |||||
| // save caffe custom proto path, used by caffe parse | |||||
| std::string custom_proto_path; | |||||
| // save caffe proto path, used by caffe parse | |||||
| std::string caffe_proto_path; | |||||
| // name of the pass that needs to take effect | |||||
| std::string enable_scope_fusion_passes; | |||||
| }; | }; | ||||
| ParserContext &GetParserContext(); | ParserContext &GetParserContext(); | ||||
| @@ -1 +1 @@ | |||||
| Subproject commit 1cc55bcae09902b3d158993dd57bfbd1d3337066 | |||||
| Subproject commit 5d06bc7547189f24195b3cedcb0bfc3d787c80a5 | |||||
| @@ -1 +1 @@ | |||||
| Subproject commit db4e6070bb2cec01cead264a44ceae07e7f3048e | |||||
| Subproject commit 5af5c72fba1315f3d52113a5e88dc618d68e7dbc | |||||